diff --git a/.gitignore b/.gitignore
index 4d8e0a6..a59e8b8 100755
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
rssmon3
exec-rssmon3
**.sw*
+**/testdata
**/._*
**/exec-*
exec-*
diff --git a/server/routes.go b/server/routes.go
index 75f963e..ae232b3 100755
--- a/server/routes.go
+++ b/server/routes.go
@@ -88,6 +88,12 @@ func (s *Server) feed(w http.ResponseWriter, r *http.Request) {
ContentFilter string `json:"content"`
Tags []string `json:"tags"`
Headers map[string]string `json:"headers"`
+ URL string `json:"url"`
+ Interval string `json:"refresh"`
+ TitleFilter string `json:"items"`
+ Copyright string `json:"copyright"`
+ ContentFilter string `json:"content"`
+ Tags []string `json:"tags"`
}
if err := json.NewDecoder(r.Body).Decode(&putFeed); err != nil {
s.userError(w, r, err)
diff --git a/testdata/2do b/testdata/2do
deleted file mode 100755
index 9c14062..0000000
--- a/testdata/2do
+++ /dev/null
@@ -1,5 +0,0 @@
-* load feed jobs on startup and queue
-* implement endpoints for server
-* implement torret+podcast handlers
-* fetch and embed?
-* render tags -> feeds for server
diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE
new file mode 100755
index 0000000..d645695
--- /dev/null
+++ b/vendor/cloud.google.com/go/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
new file mode 100755
index 0000000..125b703
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -0,0 +1,513 @@
+// Copyright 2014 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metadata provides access to Google Compute Engine (GCE)
+// metadata and API service accounts.
+//
+// This package is a wrapper around the GCE metadata service,
+// as documented at https://developers.google.com/compute/docs/metadata.
+package metadata // import "cloud.google.com/go/compute/metadata"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ // metadataIP is the documented metadata server IP address.
+ metadataIP = "169.254.169.254"
+
+ // metadataHostEnv is the environment variable specifying the
+ // GCE metadata hostname. If empty, the default value of
+ // metadataIP ("169.254.169.254") is used instead.
+ // This is variable name is not defined by any spec, as far as
+ // I know; it was made up for the Go package.
+ metadataHostEnv = "GCE_METADATA_HOST"
+
+ userAgent = "gcloud-golang/0.1"
+)
+
+type cachedValue struct {
+ k string
+ trim bool
+ mu sync.Mutex
+ v string
+}
+
+var (
+ projID = &cachedValue{k: "project/project-id", trim: true}
+ projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
+ instID = &cachedValue{k: "instance/id", trim: true}
+)
+
+var (
+ defaultClient = &Client{hc: &http.Client{
+ Transport: &http.Transport{
+ Dial: (&net.Dialer{
+ Timeout: 2 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ ResponseHeaderTimeout: 2 * time.Second,
+ },
+ }}
+ subscribeClient = &Client{hc: &http.Client{
+ Transport: &http.Transport{
+ Dial: (&net.Dialer{
+ Timeout: 2 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ },
+ }}
+)
+
+// NotDefinedError is returned when requested metadata is not defined.
+//
+// The underlying string is the suffix after "/computeMetadata/v1/".
+//
+// This error is not returned if the value is defined to be the empty
+// string.
+type NotDefinedError string
+
+func (suffix NotDefinedError) Error() string {
+ return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
+}
+
+func (c *cachedValue) get(cl *Client) (v string, err error) {
+ defer c.mu.Unlock()
+ c.mu.Lock()
+ if c.v != "" {
+ return c.v, nil
+ }
+ if c.trim {
+ v, err = cl.getTrimmed(c.k)
+ } else {
+ v, err = cl.Get(c.k)
+ }
+ if err == nil {
+ c.v = v
+ }
+ return
+}
+
+var (
+ onGCEOnce sync.Once
+ onGCE bool
+)
+
+// OnGCE reports whether this process is running on Google Compute Engine.
+func OnGCE() bool {
+ onGCEOnce.Do(initOnGCE)
+ return onGCE
+}
+
+func initOnGCE() {
+ onGCE = testOnGCE()
+}
+
+func testOnGCE() bool {
+ // The user explicitly said they're on GCE, so trust them.
+ if os.Getenv(metadataHostEnv) != "" {
+ return true
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ resc := make(chan bool, 2)
+
+ // Try two strategies in parallel.
+ // See https://github.com/googleapis/google-cloud-go/issues/194
+ go func() {
+ req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
+ req.Header.Set("User-Agent", userAgent)
+ res, err := defaultClient.hc.Do(req.WithContext(ctx))
+ if err != nil {
+ resc <- false
+ return
+ }
+ defer res.Body.Close()
+ resc <- res.Header.Get("Metadata-Flavor") == "Google"
+ }()
+
+ go func() {
+ addrs, err := net.LookupHost("metadata.google.internal")
+ if err != nil || len(addrs) == 0 {
+ resc <- false
+ return
+ }
+ resc <- strsContains(addrs, metadataIP)
+ }()
+
+ tryHarder := systemInfoSuggestsGCE()
+ if tryHarder {
+ res := <-resc
+ if res {
+ // The first strategy succeeded, so let's use it.
+ return true
+ }
+ // Wait for either the DNS or metadata server probe to
+ // contradict the other one and say we are running on
+ // GCE. Give it a lot of time to do so, since the system
+ // info already suggests we're running on a GCE BIOS.
+ timer := time.NewTimer(5 * time.Second)
+ defer timer.Stop()
+ select {
+ case res = <-resc:
+ return res
+ case <-timer.C:
+ // Too slow. Who knows what this system is.
+ return false
+ }
+ }
+
+ // There's no hint from the system info that we're running on
+ // GCE, so use the first probe's result as truth, whether it's
+ // true or false. The goal here is to optimize for speed for
+ // users who are NOT running on GCE. We can't assume that
+ // either a DNS lookup or an HTTP request to a blackholed IP
+ // address is fast. Worst case this should return when the
+ // metaClient's Transport.ResponseHeaderTimeout or
+ // Transport.Dial.Timeout fires (in two seconds).
+ return <-resc
+}
+
+// systemInfoSuggestsGCE reports whether the local system (without
+// doing network requests) suggests that we're running on GCE. If this
+// returns true, testOnGCE tries a bit harder to reach its metadata
+// server.
+func systemInfoSuggestsGCE() bool {
+ if runtime.GOOS != "linux" {
+ // We don't have any non-Linux clues available, at least yet.
+ return false
+ }
+ slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
+ name := strings.TrimSpace(string(slurp))
+ return name == "Google" || name == "Google Compute Engine"
+}
+
+// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
+// ResponseHeaderTimeout).
+func Subscribe(suffix string, fn func(v string, ok bool) error) error {
+ return subscribeClient.Subscribe(suffix, fn)
+}
+
+// Get calls Client.Get on the default client.
+func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
+
+// ProjectID returns the current instance's project ID string.
+func ProjectID() (string, error) { return defaultClient.ProjectID() }
+
+// NumericProjectID returns the current instance's numeric project ID.
+func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
+
+// InternalIP returns the instance's primary internal IP address.
+func InternalIP() (string, error) { return defaultClient.InternalIP() }
+
+// ExternalIP returns the instance's primary external (public) IP address.
+func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
+
+// Hostname returns the instance's hostname. This will be of the form
+// ".c..internal".
+func Hostname() (string, error) { return defaultClient.Hostname() }
+
+// InstanceTags returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
+
+// InstanceID returns the current VM's numeric instance ID.
+func InstanceID() (string, error) { return defaultClient.InstanceID() }
+
+// InstanceName returns the current VM's instance ID string.
+func InstanceName() (string, error) { return defaultClient.InstanceName() }
+
+// Zone returns the current VM's zone, such as "us-central1-b".
+func Zone() (string, error) { return defaultClient.Zone() }
+
+// InstanceAttributes calls Client.InstanceAttributes on the default client.
+func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
+
+// ProjectAttributes calls Client.ProjectAttributes on the default client.
+func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
+
+// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
+func InstanceAttributeValue(attr string) (string, error) {
+ return defaultClient.InstanceAttributeValue(attr)
+}
+
+// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
+func ProjectAttributeValue(attr string) (string, error) {
+ return defaultClient.ProjectAttributeValue(attr)
+}
+
+// Scopes calls Client.Scopes on the default client.
+func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
+
+func strsContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+// A Client provides metadata.
+type Client struct {
+ hc *http.Client
+}
+
+// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
+// will use the given http.Client instead of the default client.
+func NewClient(c *http.Client) *Client {
+ return &Client{hc: c}
+}
+
+// getETag returns a value from the metadata service as well as the associated ETag.
+// This func is otherwise equivalent to Get.
+func (c *Client) getETag(suffix string) (value, etag string, err error) {
+ // Using a fixed IP makes it very difficult to spoof the metadata service in
+ // a container, which is an important use-case for local testing of cloud
+ // deployments. To enable spoofing of the metadata service, the environment
+ // variable GCE_METADATA_HOST is first inspected to decide where metadata
+ // requests shall go.
+ host := os.Getenv(metadataHostEnv)
+ if host == "" {
+ // Using 169.254.169.254 instead of "metadata" here because Go
+ // binaries built with the "netgo" tag and without cgo won't
+ // know the search suffix for "metadata" is
+ // ".google.internal", and this IP address is documented as
+ // being stable anyway.
+ host = metadataIP
+ }
+ u := "http://" + host + "/computeMetadata/v1/" + suffix
+ req, _ := http.NewRequest("GET", u, nil)
+ req.Header.Set("Metadata-Flavor", "Google")
+ req.Header.Set("User-Agent", userAgent)
+ res, err := c.hc.Do(req)
+ if err != nil {
+ return "", "", err
+ }
+ defer res.Body.Close()
+ if res.StatusCode == http.StatusNotFound {
+ return "", "", NotDefinedError(suffix)
+ }
+ all, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return "", "", err
+ }
+ if res.StatusCode != 200 {
+ return "", "", &Error{Code: res.StatusCode, Message: string(all)}
+ }
+ return string(all), res.Header.Get("Etag"), nil
+}
+
+// Get returns a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+//
+// If the GCE_METADATA_HOST environment variable is not defined, a default of
+// 169.254.169.254 will be used instead.
+//
+// If the requested metadata is not defined, the returned error will
+// be of type NotDefinedError.
+func (c *Client) Get(suffix string) (string, error) {
+ val, _, err := c.getETag(suffix)
+ return val, err
+}
+
+func (c *Client) getTrimmed(suffix string) (s string, err error) {
+ s, err = c.Get(suffix)
+ s = strings.TrimSpace(s)
+ return
+}
+
+func (c *Client) lines(suffix string) ([]string, error) {
+ j, err := c.Get(suffix)
+ if err != nil {
+ return nil, err
+ }
+ s := strings.Split(strings.TrimSpace(j), "\n")
+ for i := range s {
+ s[i] = strings.TrimSpace(s[i])
+ }
+ return s, nil
+}
+
+// ProjectID returns the current instance's project ID string.
+func (c *Client) ProjectID() (string, error) { return projID.get(c) }
+
+// NumericProjectID returns the current instance's numeric project ID.
+func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
+
+// InstanceID returns the current VM's numeric instance ID.
+func (c *Client) InstanceID() (string, error) { return instID.get(c) }
+
+// InternalIP returns the instance's primary internal IP address.
+func (c *Client) InternalIP() (string, error) {
+ return c.getTrimmed("instance/network-interfaces/0/ip")
+}
+
+// ExternalIP returns the instance's primary external (public) IP address.
+func (c *Client) ExternalIP() (string, error) {
+ return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
+}
+
+// Hostname returns the instance's hostname. This will be of the form
+// ".c..internal".
+func (c *Client) Hostname() (string, error) {
+ return c.getTrimmed("instance/hostname")
+}
+
+// InstanceTags returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func (c *Client) InstanceTags() ([]string, error) {
+ var s []string
+ j, err := c.Get("instance/tags")
+ if err != nil {
+ return nil, err
+ }
+ if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
+ return nil, err
+ }
+ return s, nil
+}
+
+// InstanceName returns the current VM's instance ID string.
+func (c *Client) InstanceName() (string, error) {
+ host, err := c.Hostname()
+ if err != nil {
+ return "", err
+ }
+ return strings.Split(host, ".")[0], nil
+}
+
+// Zone returns the current VM's zone, such as "us-central1-b".
+func (c *Client) Zone() (string, error) {
+ zone, err := c.getTrimmed("instance/zone")
+ // zone is of the form "projects//zones/".
+ if err != nil {
+ return "", err
+ }
+ return zone[strings.LastIndex(zone, "/")+1:], nil
+}
+
+// InstanceAttributes returns the list of user-defined attributes,
+// assigned when initially creating a GCE VM instance. The value of an
+// attribute can be obtained with InstanceAttributeValue.
+func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
+
+// ProjectAttributes returns the list of user-defined attributes
+// applying to the project as a whole, not just this VM. The value of
+// an attribute can be obtained with ProjectAttributeValue.
+func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
+
+// InstanceAttributeValue returns the value of the provided VM
+// instance attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// InstanceAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func (c *Client) InstanceAttributeValue(attr string) (string, error) {
+ return c.Get("instance/attributes/" + attr)
+}
+
+// ProjectAttributeValue returns the value of the provided
+// project attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// ProjectAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func (c *Client) ProjectAttributeValue(attr string) (string, error) {
+ return c.Get("project/attributes/" + attr)
+}
+
+// Scopes returns the service account scopes for the given account.
+// The account may be empty or the string "default" to use the instance's
+// main account.
+func (c *Client) Scopes(serviceAccount string) ([]string, error) {
+ if serviceAccount == "" {
+ serviceAccount = "default"
+ }
+ return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
+}
+
+// Subscribe subscribes to a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+// The suffix may contain query parameters.
+//
+// Subscribe calls fn with the latest metadata value indicated by the provided
+// suffix. If the metadata value is deleted, fn is called with the empty string
+// and ok false. Subscribe blocks until fn returns a non-nil error or the value
+// is deleted. Subscribe returns the error value returned from the last call to
+// fn, which may be nil when ok == false.
+func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
+ const failedSubscribeSleep = time.Second * 5
+
+ // First check to see if the metadata value exists at all.
+ val, lastETag, err := c.getETag(suffix)
+ if err != nil {
+ return err
+ }
+
+ if err := fn(val, true); err != nil {
+ return err
+ }
+
+ ok := true
+ if strings.ContainsRune(suffix, '?') {
+ suffix += "&wait_for_change=true&last_etag="
+ } else {
+ suffix += "?wait_for_change=true&last_etag="
+ }
+ for {
+ val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
+ if err != nil {
+ if _, deleted := err.(NotDefinedError); !deleted {
+ time.Sleep(failedSubscribeSleep)
+ continue // Retry on other errors.
+ }
+ ok = false
+ }
+ lastETag = etag
+
+ if err := fn(val, ok); err != nil || !ok {
+ return err
+ }
+ }
+}
+
+// Error contains an error response from the server.
+type Error struct {
+ // Code is the HTTP response status code.
+ Code int
+ // Message is the server response message.
+ Message string
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
+}
diff --git a/vendor/github.com/PuerkitoBio/goquery/LICENSE b/vendor/github.com/PuerkitoBio/goquery/LICENSE
old mode 100644
new mode 100755
diff --git a/vendor/github.com/PuerkitoBio/goquery/README.md b/vendor/github.com/PuerkitoBio/goquery/README.md
old mode 100644
new mode 100755
index 41f6512..a098f44
--- a/vendor/github.com/PuerkitoBio/goquery/README.md
+++ b/vendor/github.com/PuerkitoBio/goquery/README.md
@@ -37,6 +37,7 @@ Please note that because of the net/html dependency, goquery requires Go1.1+.
**Note that goquery's API is now stable, and will not break.**
+* **2018-11-15 (v1.5.0)** : Go module support (thanks @Zaba505).
* **2018-06-07 (v1.4.1)** : Add `NewDocumentFromReader` examples.
* **2018-03-24 (v1.4.0)** : Deprecate `NewDocument(url)` and `NewDocumentFromResponse(response)`.
* **2018-01-28 (v1.3.0)** : Add `ToEnd` constant to `Slice` until the end of the selection (thanks to @davidjwilkins for raising the issue).
@@ -137,9 +138,12 @@ func main() {
- [Goq][goq], an HTML deserialization and scraping library based on goquery and struct tags.
- [andybalholm/cascadia][cascadia], the CSS selector library used by goquery.
- [suntong/cascadia][cascadiacli], a command-line interface to the cascadia CSS selector library, useful to test selectors.
-- [asciimoo/colly](https://github.com/asciimoo/colly), a lightning fast and elegant Scraping Framework
+- [gocolly/colly](https://github.com/gocolly/colly), a lightning fast and elegant Scraping Framework
- [gnulnx/goperf](https://github.com/gnulnx/goperf), a website performance test tool that also fetches static assets.
- [MontFerret/ferret](https://github.com/MontFerret/ferret), declarative web scraping.
+- [tacusci/berrycms](https://github.com/tacusci/berrycms), a modern simple to use CMS with easy to write plugins
+- [Dataflow kit](https://github.com/slotix/dataflowkit), Web Scraping framework for Gophers.
+- [Geziyor](https://github.com/geziyor/geziyor), a fast web crawling & scraping framework for Go. Supports JS rendering.
## Support
diff --git a/vendor/github.com/PuerkitoBio/goquery/array.go b/vendor/github.com/PuerkitoBio/goquery/array.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/PuerkitoBio/goquery/doc.go b/vendor/github.com/PuerkitoBio/goquery/doc.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/PuerkitoBio/goquery/expand.go b/vendor/github.com/PuerkitoBio/goquery/expand.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/PuerkitoBio/goquery/filter.go b/vendor/github.com/PuerkitoBio/goquery/filter.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/PuerkitoBio/goquery/go.mod b/vendor/github.com/PuerkitoBio/goquery/go.mod
new file mode 100755
index 0000000..2fa1332
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/go.mod
@@ -0,0 +1,6 @@
+module github.com/PuerkitoBio/goquery
+
+require (
+ github.com/andybalholm/cascadia v1.0.0
+ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a
+)
diff --git a/vendor/github.com/PuerkitoBio/goquery/go.sum b/vendor/github.com/PuerkitoBio/goquery/go.sum
new file mode 100755
index 0000000..11c5757
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/goquery/go.sum
@@ -0,0 +1,5 @@
+github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
+github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
diff --git a/vendor/github.com/PuerkitoBio/goquery/iteration.go b/vendor/github.com/PuerkitoBio/goquery/iteration.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/PuerkitoBio/goquery/manipulation.go b/vendor/github.com/PuerkitoBio/goquery/manipulation.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/PuerkitoBio/goquery/property.go b/vendor/github.com/PuerkitoBio/goquery/property.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/PuerkitoBio/goquery/query.go b/vendor/github.com/PuerkitoBio/goquery/query.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/PuerkitoBio/goquery/traversal.go b/vendor/github.com/PuerkitoBio/goquery/traversal.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/PuerkitoBio/goquery/type.go b/vendor/github.com/PuerkitoBio/goquery/type.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/PuerkitoBio/goquery/utilities.go b/vendor/github.com/PuerkitoBio/goquery/utilities.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/Unknwon/goconfig/LICENSE b/vendor/github.com/Unknwon/goconfig/LICENSE
new file mode 100755
index 0000000..8405e89
--- /dev/null
+++ b/vendor/github.com/Unknwon/goconfig/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/Unknwon/goconfig/README.md b/vendor/github.com/Unknwon/goconfig/README.md
new file mode 100755
index 0000000..da6f144
--- /dev/null
+++ b/vendor/github.com/Unknwon/goconfig/README.md
@@ -0,0 +1,72 @@
+goconfig [](https://drone.io/github.com/Unknwon/goconfig/latest) [](http://gowalker.org/github.com/Unknwon/goconfig)
+========
+
+[中文文档](README_ZH.md)
+
+**IMPORTANT**
+
+- This library is under bug fix only mode, which means no more features will be added.
+- I'm continuing working on better Go code with a different library: [ini](https://github.com/go-ini/ini).
+
+## About
+
+Package goconfig is a easy-use, comments-support configuration file parser for the Go Programming Language, which provides a structure similar to what you would find on Microsoft Windows INI files.
+
+The configuration file consists of sections, led by a `[section]` header and followed by `name:value` or `name=value` entries. Note that leading whitespace is removed from values. The optional values can contain format strings which refer to other values in the same section, or values in a special DEFAULT section. Comments are indicated by ";" or "#"; comments may begin anywhere on a single line.
+
+## Features
+
+- It simplified operation processes, easy to use and undersatnd; therefore, there are less chances to have errors.
+- It uses exactly the same way to access a configuration file as you use Windows APIs, so you don't need to change your code style.
+- It supports read recursion sections.
+- It supports auto increment of key.
+- It supports **READ** and **WRITE** configuration file with comments each section or key which all the other parsers don't support!!!!!!!
+- It supports get value through type bool, float64, int, int64 and string, methods that start with "Must" means ignore errors and get zero-value if error occurs, or you can specify a default value.
+- It's able to load multiple files to overwrite key values.
+
+## Installation
+
+ go get github.com/Unknwon/goconfig
+
+Or
+
+ gopm get github.com/Unknwon/goconfig
+
+## API Documentation
+
+[Go Walker](http://gowalker.org/github.com/Unknwon/goconfig).
+
+## Example
+
+Please see [conf.ini](testdata/conf.ini) as an example.
+
+### Usage
+
+- Function `LoadConfigFile` load file(s) depends on your situation, and return a variable with type `ConfigFile`.
+- `GetValue` gives basic functionality of getting a value of given section and key.
+- Methods like `Bool`, `Int`, `Int64` return corresponding type of values.
+- Methods start with `Must` return corresponding type of values and returns zero-value of given type if something goes wrong.
+- `SetValue` sets value to given section and key, and inserts somewhere if it does not exist.
+- `DeleteKey` deletes by given section and key.
+- Finally, `SaveConfigFile` saves your configuration to local file system.
+- Use method `Reload` in case someone else modified your file(s).
+- Methods contains `Comment` help you manipulate comments.
+- `LoadFromReader` allows loading data without an intermediate file.
+- `SaveConfigData` added, which writes configuration to an arbitrary writer.
+- `ReloadData` allows to reload data from memory.
+
+Note that you cannot mix in-memory configuration with on-disk configuration.
+
+## More Information
+
+- All characters are CASE SENSITIVE, BE CAREFUL!
+
+## Credits
+
+- [goconf](http://code.google.com/p/goconf/)
+- [robfig/config](https://github.com/robfig/config)
+- [Delete an item from a slice](https://groups.google.com/forum/?fromgroups=#!topic/golang-nuts/lYz8ftASMQ0)
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/vendor/github.com/Unknwon/goconfig/README_ZH.md b/vendor/github.com/Unknwon/goconfig/README_ZH.md
new file mode 100755
index 0000000..d5fcbb9
--- /dev/null
+++ b/vendor/github.com/Unknwon/goconfig/README_ZH.md
@@ -0,0 +1,64 @@
+goconfig [](https://drone.io/github.com/Unknwon/goconfig/latest) [](http://gowalker.org/github.com/Unknwon/goconfig)
+========
+
+本库已被 [《Go名库讲解》](https://github.com/Unknwon/go-rock-libraries-showcases/tree/master/lectures/01-goconfig) 收录讲解,欢迎前往学习如何使用!
+
+编码规范:基于 [Go 编码规范](https://github.com/Unknwon/go-code-convention)
+
+## 关于
+
+包 goconfig 是一个易于使用,支持注释的 Go 语言配置文件解析器,该文件的书写格式和 Windows 下的 INI 文件一样。
+
+配置文件由形为 `[section]` 的节构成,内部使用 `name:value` 或 `name=value` 这样的键值对;每行开头和尾部的空白符号都将被忽略;如果未指定任何节,则会默认放入名为 `DEFAULT` 的节当中;可以使用 “;” 或 “#” 来作为注释的开头,并可以放置于任意的单独一行中。
+
+## 特性
+
+- 简化流程,易于理解,更少出错。
+- 提供与 Windows API 一模一样的操作方式。
+- 支持读取递归节。
+- 支持自增键名。
+- 支持对注释的 **读** 和 **写** 操作,其它所有解析器都不支持!!!!
+- 可以直接返回 bool, float64, int, int64 和 string 类型的值,如果使用 “Must” 开头的方法,则一定会返回这个类型的一个值而不返回错误,如果错误发生则会返回零值。
+- 支持加载多个文件来重写值。
+
+## 安装
+
+ go get github.com/Unknwon/goconfig
+
+或
+
+ gopm get github.com/Unknwon/goconfig
+
+
+## API 文档
+
+[Go Walker](http://gowalker.org/github.com/Unknwon/goconfig).
+
+## 示例
+
+请查看 [conf.ini](testdata/conf.ini) 文件作为使用示例。
+
+### 用例
+
+- 函数 `LoadConfigFile` 加载一个或多个文件,然后返回一个类型为 `ConfigFile` 的变量。
+- `GetValue` 可以简单的获取某个值。
+- 像 `Bool`、`Int`、`Int64` 这样的方法会直接返回指定类型的值。
+- 以 `Must` 开头的方法不会返回错误,但当错误发生时会返回零值。
+- `SetValue` 可以设置某个值。
+- `DeleteKey` 可以删除某个键。
+- 最后,`SaveConfigFile` 可以保持您的配置到本地文件系统。
+- 使用方法 `Reload` 可以重载您的配置文件。
+
+## 更多信息
+
+- 所有字符都是大小写敏感的!
+
+## 参考信息
+
+- [goconf](http://code.google.com/p/goconf/)
+- [robfig/config](https://github.com/robfig/config)
+- [Delete an item from a slice](https://groups.google.com/forum/?fromgroups=#!topic/golang-nuts/lYz8ftASMQ0)
+
+## 授权许可
+
+本项目采用 Apache v2 开源授权许可证,完整的授权说明已放置在 [LICENSE](LICENSE) 文件中。
diff --git a/vendor/github.com/Unknwon/goconfig/conf.go b/vendor/github.com/Unknwon/goconfig/conf.go
new file mode 100755
index 0000000..4e7c8ad
--- /dev/null
+++ b/vendor/github.com/Unknwon/goconfig/conf.go
@@ -0,0 +1,555 @@
+// Copyright 2013 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package goconfig is a fully functional and comments-support configuration file(.ini) parser.
+package goconfig
+
+import (
+ "fmt"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const (
+ // Default section name.
+ DEFAULT_SECTION = "DEFAULT"
+ // Maximum allowed depth when recursively substituing variable names.
+ _DEPTH_VALUES = 200
+)
+
+type ParseError int
+
+const (
+ ERR_SECTION_NOT_FOUND ParseError = iota + 1
+ ERR_KEY_NOT_FOUND
+ ERR_BLANK_SECTION_NAME
+ ERR_COULD_NOT_PARSE
+)
+
+var LineBreak = "\n"
+
+// Variable regexp pattern: %(variable)s
+var varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
+
+func init() {
+ if runtime.GOOS == "windows" {
+ LineBreak = "\r\n"
+ }
+}
+
+// A ConfigFile represents a INI formar configuration file.
+type ConfigFile struct {
+ lock sync.RWMutex // Go map is not safe.
+ fileNames []string // Support mutil-files.
+ data map[string]map[string]string // Section -> key : value
+
+ // Lists can keep sections and keys in order.
+ sectionList []string // Section name list.
+ keyList map[string][]string // Section -> Key name list
+
+ sectionComments map[string]string // Sections comments.
+ keyComments map[string]map[string]string // Keys comments.
+ BlockMode bool // Indicates whether use lock or not.
+}
+
+// newConfigFile creates an empty configuration representation.
+func newConfigFile(fileNames []string) *ConfigFile {
+ c := new(ConfigFile)
+ c.fileNames = fileNames
+ c.data = make(map[string]map[string]string)
+ c.keyList = make(map[string][]string)
+ c.sectionComments = make(map[string]string)
+ c.keyComments = make(map[string]map[string]string)
+ c.BlockMode = true
+ return c
+}
+
+// SetValue adds a new section-key-value to the configuration.
+// It returns true if the key and value were inserted,
+// or returns false if the value was overwritten.
+// If the section does not exist in advance, it will be created.
+func (c *ConfigFile) SetValue(section, key, value string) bool {
+ // Blank section name represents DEFAULT section.
+ if len(section) == 0 {
+ section = DEFAULT_SECTION
+ }
+ if len(key) == 0 {
+ return false
+ }
+
+ if c.BlockMode {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ }
+
+ // Check if section exists.
+ if _, ok := c.data[section]; !ok {
+ // Execute add operation.
+ c.data[section] = make(map[string]string)
+ // Append section to list.
+ c.sectionList = append(c.sectionList, section)
+ }
+
+ // Check if key exists.
+ _, ok := c.data[section][key]
+ c.data[section][key] = value
+ if !ok {
+ // If not exists, append to key list.
+ c.keyList[section] = append(c.keyList[section], key)
+ }
+ return !ok
+}
+
+// DeleteKey deletes the key in given section.
+// It returns true if the key was deleted,
+// or returns false if the section or key didn't exist.
+func (c *ConfigFile) DeleteKey(section, key string) bool {
+ // Blank section name represents DEFAULT section.
+ if len(section) == 0 {
+ section = DEFAULT_SECTION
+ }
+
+ if c.BlockMode {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ }
+
+ // Check if section exists.
+ if _, ok := c.data[section]; !ok {
+ return false
+ }
+
+ // Check if key exists.
+ if _, ok := c.data[section][key]; ok {
+ delete(c.data[section], key)
+ // Remove comments of key.
+ c.SetKeyComments(section, key, "")
+ // Get index of key.
+ i := 0
+ for _, keyName := range c.keyList[section] {
+ if keyName == key {
+ break
+ }
+ i++
+ }
+ // Remove from key list.
+ c.keyList[section] = append(c.keyList[section][:i], c.keyList[section][i+1:]...)
+ return true
+ }
+ return false
+}
+
+// GetValue returns the value of key available in the given section.
+// If the value needs to be unfolded
+// (see e.g. %(google)s example in the GoConfig_test.go),
+// then String does this unfolding automatically, up to
+// _DEPTH_VALUES number of iterations.
+// It returns an error and empty string value if the section does not exist,
+// or key does not exist in DEFAULT and current sections.
+func (c *ConfigFile) GetValue(section, key string) (string, error) {
+ if c.BlockMode {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ }
+
+ // Blank section name represents DEFAULT section.
+ if len(section) == 0 {
+ section = DEFAULT_SECTION
+ }
+
+ // Check if section exists
+ if _, ok := c.data[section]; !ok {
+ // Section does not exist.
+ return "", getError{ERR_SECTION_NOT_FOUND, section}
+ }
+
+ // Section exists.
+ // Check if key exists or empty value.
+ value, ok := c.data[section][key]
+ if !ok {
+ // Check if it is a sub-section.
+ if i := strings.LastIndex(section, "."); i > -1 {
+ return c.GetValue(section[:i], key)
+ }
+
+ // Return empty value.
+ return "", getError{ERR_KEY_NOT_FOUND, key}
+ }
+
+ // Key exists.
+ var i int
+ for i = 0; i < _DEPTH_VALUES; i++ {
+ vr := varPattern.FindString(value)
+ if len(vr) == 0 {
+ break
+ }
+
+ // Take off leading '%(' and trailing ')s'.
+ noption := strings.TrimLeft(vr, "%(")
+ noption = strings.TrimRight(noption, ")s")
+
+ // Search variable in default section.
+ nvalue, err := c.GetValue(DEFAULT_SECTION, noption)
+ if err != nil && section != DEFAULT_SECTION {
+ // Search in the same section.
+ if _, ok := c.data[section][noption]; ok {
+ nvalue = c.data[section][noption]
+ }
+ }
+
+ // Substitute by new value and take off leading '%(' and trailing ')s'.
+ value = strings.Replace(value, vr, nvalue, -1)
+ }
+ return value, nil
+}
+
+// Bool returns bool type value.
+func (c *ConfigFile) Bool(section, key string) (bool, error) {
+ value, err := c.GetValue(section, key)
+ if err != nil {
+ return false, err
+ }
+ return strconv.ParseBool(value)
+}
+
+// Float64 returns float64 type value.
+func (c *ConfigFile) Float64(section, key string) (float64, error) {
+ value, err := c.GetValue(section, key)
+ if err != nil {
+ return 0.0, err
+ }
+ return strconv.ParseFloat(value, 64)
+}
+
+// Int returns int type value.
+func (c *ConfigFile) Int(section, key string) (int, error) {
+ value, err := c.GetValue(section, key)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.Atoi(value)
+}
+
+// Int64 returns int64 type value.
+func (c *ConfigFile) Int64(section, key string) (int64, error) {
+ value, err := c.GetValue(section, key)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseInt(value, 10, 64)
+}
+
+// MustValue always returns value without error.
+// It returns empty string if error occurs, or the default value if given.
+func (c *ConfigFile) MustValue(section, key string, defaultVal ...string) string {
+ val, err := c.GetValue(section, key)
+ if len(defaultVal) > 0 && (err != nil || len(val) == 0) {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustValue always returns value without error,
+// It returns empty string if error occurs, or the default value if given,
+// and a bool value indicates whether default value is returned.
+func (c *ConfigFile) MustValueSet(section, key string, defaultVal ...string) (string, bool) {
+ val, err := c.GetValue(section, key)
+ if len(defaultVal) > 0 && (err != nil || len(val) == 0) {
+ c.SetValue(section, key, defaultVal[0])
+ return defaultVal[0], true
+ }
+ return val, false
+}
+
+// MustValueRange always returns value without error,
+// it returns default value if error occurs or doesn't fit into range.
+func (c *ConfigFile) MustValueRange(section, key, defaultVal string, candidates []string) string {
+ val, err := c.GetValue(section, key)
+ if err != nil || len(val) == 0 {
+ return defaultVal
+ }
+
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// MustValueArray always returns value array without error,
+// it returns empty array if error occurs, split by delimiter otherwise.
+func (c *ConfigFile) MustValueArray(section, key, delim string) []string {
+ val, err := c.GetValue(section, key)
+ if err != nil || len(val) == 0 {
+ return []string{}
+ }
+
+ vals := strings.Split(val, delim)
+ for i := range vals {
+ vals[i] = strings.TrimSpace(vals[i])
+ }
+ return vals
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (c *ConfigFile) MustBool(section, key string, defaultVal ...bool) bool {
+ val, err := c.Bool(section, key)
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (c *ConfigFile) MustFloat64(section, key string, defaultVal ...float64) float64 {
+ value, err := c.Float64(section, key)
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return value
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (c *ConfigFile) MustInt(section, key string, defaultVal ...int) int {
+ value, err := c.Int(section, key)
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return value
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (c *ConfigFile) MustInt64(section, key string, defaultVal ...int64) int64 {
+ value, err := c.Int64(section, key)
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return value
+}
+
+// GetSectionList returns the list of all sections
+// in the same order in the file.
+func (c *ConfigFile) GetSectionList() []string {
+ list := make([]string, len(c.sectionList))
+ copy(list, c.sectionList)
+ return list
+}
+
+// GetKeyList returns the list of all keys in give section
+// in the same order in the file.
+// It returns nil if given section does not exist.
+func (c *ConfigFile) GetKeyList(section string) []string {
+ // Blank section name represents DEFAULT section.
+ if len(section) == 0 {
+ section = DEFAULT_SECTION
+ }
+
+ if c.BlockMode {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ }
+
+ // Check if section exists.
+ if _, ok := c.data[section]; !ok {
+ return nil
+ }
+
+ // Non-default section has a blank key as section keeper.
+ list := make([]string, 0, len(c.keyList[section]))
+ for _, key := range c.keyList[section] {
+ if key != " " {
+ list = append(list, key)
+ }
+ }
+ return list
+}
+
+// DeleteSection deletes the entire section by given name.
+// It returns true if the section was deleted, and false if the section didn't exist.
+func (c *ConfigFile) DeleteSection(section string) bool {
+ // Blank section name represents DEFAULT section.
+ if len(section) == 0 {
+ section = DEFAULT_SECTION
+ }
+
+ if c.BlockMode {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ }
+
+ // Check if section exists.
+ if _, ok := c.data[section]; !ok {
+ return false
+ }
+
+ delete(c.data, section)
+ // Remove comments of section.
+ c.SetSectionComments(section, "")
+ // Get index of section.
+ i := 0
+ for _, secName := range c.sectionList {
+ if secName == section {
+ break
+ }
+ i++
+ }
+ // Remove from section and key list.
+ c.sectionList = append(c.sectionList[:i], c.sectionList[i+1:]...)
+ delete(c.keyList, section)
+ return true
+}
+
+// GetSection returns key-value pairs in given section.
+// If section does not exist, returns nil and error.
+func (c *ConfigFile) GetSection(section string) (map[string]string, error) {
+ // Blank section name represents DEFAULT section.
+ if len(section) == 0 {
+ section = DEFAULT_SECTION
+ }
+
+ if c.BlockMode {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ }
+
+ // Check if section exists.
+ if _, ok := c.data[section]; !ok {
+ // Section does not exist.
+ return nil, getError{ERR_SECTION_NOT_FOUND, section}
+ }
+
+ // Remove pre-defined key.
+ secMap := c.data[section]
+ delete(c.data[section], " ")
+
+ // Section exists.
+ return secMap, nil
+}
+
+// SetSectionComments adds new section comments to the configuration.
+// If comments are empty(0 length), it will remove its section comments!
+// It returns true if the comments were inserted or removed,
+// or returns false if the comments were overwritten.
+func (c *ConfigFile) SetSectionComments(section, comments string) bool {
+ // Blank section name represents DEFAULT section.
+ if len(section) == 0 {
+ section = DEFAULT_SECTION
+ }
+
+ if len(comments) == 0 {
+ if _, ok := c.sectionComments[section]; ok {
+ delete(c.sectionComments, section)
+ }
+
+ // Not exists can be seen as remove.
+ return true
+ }
+
+ // Check if comments exists.
+ _, ok := c.sectionComments[section]
+ if comments[0] != '#' && comments[0] != ';' {
+ comments = "; " + comments
+ }
+ c.sectionComments[section] = comments
+ return !ok
+}
+
+// SetKeyComments adds new section-key comments to the configuration.
+// If comments are empty(0 length), it will remove its section-key comments!
+// It returns true if the comments were inserted or removed,
+// or returns false if the comments were overwritten.
+// If the section does not exist in advance, it is created.
+func (c *ConfigFile) SetKeyComments(section, key, comments string) bool {
+ // Blank section name represents DEFAULT section.
+ if len(section) == 0 {
+ section = DEFAULT_SECTION
+ }
+
+ // Check if section exists.
+ if _, ok := c.keyComments[section]; ok {
+ if len(comments) == 0 {
+ if _, ok := c.keyComments[section][key]; ok {
+ delete(c.keyComments[section], key)
+ }
+
+ // Not exists can be seen as remove.
+ return true
+ }
+ } else {
+ if len(comments) == 0 {
+ // Not exists can be seen as remove.
+ return true
+ } else {
+ // Execute add operation.
+ c.keyComments[section] = make(map[string]string)
+ }
+ }
+
+ // Check if key exists.
+ _, ok := c.keyComments[section][key]
+ if comments[0] != '#' && comments[0] != ';' {
+ comments = "; " + comments
+ }
+ c.keyComments[section][key] = comments
+ return !ok
+}
+
+// GetSectionComments returns the comments in the given section.
+// It returns an empty string(0 length) if the comments do not exist.
+func (c *ConfigFile) GetSectionComments(section string) (comments string) {
+ // Blank section name represents DEFAULT section.
+ if len(section) == 0 {
+ section = DEFAULT_SECTION
+ }
+ return c.sectionComments[section]
+}
+
+// GetKeyComments returns the comments of key in the given section.
+// It returns an empty string(0 length) if the comments do not exist.
+func (c *ConfigFile) GetKeyComments(section, key string) (comments string) {
+ // Blank section name represents DEFAULT section.
+ if len(section) == 0 {
+ section = DEFAULT_SECTION
+ }
+
+ if _, ok := c.keyComments[section]; ok {
+ return c.keyComments[section][key]
+ }
+ return ""
+}
+
+// getError occurs when get value in configuration file with invalid parameter.
+type getError struct {
+ Reason ParseError
+ Name string
+}
+
+// Error implements Error interface.
+func (err getError) Error() string {
+ switch err.Reason {
+ case ERR_SECTION_NOT_FOUND:
+ return fmt.Sprintf("section '%s' not found", err.Name)
+ case ERR_KEY_NOT_FOUND:
+ return fmt.Sprintf("key '%s' not found", err.Name)
+ }
+ return "invalid get error"
+}
diff --git a/vendor/github.com/Unknwon/goconfig/read.go b/vendor/github.com/Unknwon/goconfig/read.go
new file mode 100755
index 0000000..6afb071
--- /dev/null
+++ b/vendor/github.com/Unknwon/goconfig/read.go
@@ -0,0 +1,294 @@
+// Copyright 2013 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package goconfig
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "strings"
+ "time"
+)
+
+// Read reads an io.Reader and returns a configuration representation.
+// This representation can be queried with GetValue.
+func (c *ConfigFile) read(reader io.Reader) (err error) {
+ buf := bufio.NewReader(reader)
+
+ // Handle BOM-UTF8.
+ // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+ mask, err := buf.Peek(3)
+ if err == nil && len(mask) >= 3 &&
+ mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
+ buf.Read(mask)
+ }
+
+ count := 1 // Counter for auto increment.
+ // Current section name.
+ section := DEFAULT_SECTION
+ var comments string
+ // Parse line-by-line
+ for {
+ line, err := buf.ReadString('\n')
+ line = strings.TrimSpace(line)
+ lineLengh := len(line) //[SWH|+]
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+
+ // Reached end of file, if nothing to read then break,
+ // otherwise handle the last line.
+ if lineLengh == 0 {
+ break
+ }
+ }
+
+ // switch written for readability (not performance)
+ switch {
+ case lineLengh == 0: // Empty line
+ continue
+ case line[0] == '#' || line[0] == ';': // Comment
+ // Append comments
+ if len(comments) == 0 {
+ comments = line
+ } else {
+ comments += LineBreak + line
+ }
+ continue
+ case line[0] == '[' && line[lineLengh-1] == ']': // New sction.
+ // Get section name.
+ section = strings.TrimSpace(line[1 : lineLengh-1])
+ // Set section comments and empty if it has comments.
+ if len(comments) > 0 {
+ c.SetSectionComments(section, comments)
+ comments = ""
+ }
+ // Make section exist even though it does not have any key.
+ c.SetValue(section, " ", " ")
+ // Reset counter.
+ count = 1
+ continue
+ case section == "": // No section defined so far
+ return readError{ERR_BLANK_SECTION_NAME, line}
+ default: // Other alternatives
+ var (
+ i int
+ keyQuote string
+ key string
+ valQuote string
+ value string
+ )
+ //[SWH|+]:支持引号包围起来的字串
+ if line[0] == '"' {
+ if lineLengh >= 6 && line[0:3] == `"""` {
+ keyQuote = `"""`
+ } else {
+ keyQuote = `"`
+ }
+ } else if line[0] == '`' {
+ keyQuote = "`"
+ }
+ if keyQuote != "" {
+ qLen := len(keyQuote)
+ pos := strings.Index(line[qLen:], keyQuote)
+ if pos == -1 {
+ return readError{ERR_COULD_NOT_PARSE, line}
+ }
+ pos = pos + qLen
+ i = strings.IndexAny(line[pos:], "=:")
+ if i <= 0 {
+ return readError{ERR_COULD_NOT_PARSE, line}
+ }
+ i = i + pos
+ key = line[qLen:pos] //保留引号内的两端的空格
+ } else {
+ i = strings.IndexAny(line, "=:")
+ if i <= 0 {
+ return readError{ERR_COULD_NOT_PARSE, line}
+ }
+ key = strings.TrimSpace(line[0:i])
+ }
+ //[SWH|+];
+
+ // Check if it needs auto increment.
+ if key == "-" {
+ key = "#" + fmt.Sprint(count)
+ count++
+ }
+
+ //[SWH|+]:支持引号包围起来的字串
+ lineRight := strings.TrimSpace(line[i+1:])
+ lineRightLength := len(lineRight)
+ firstChar := ""
+ if lineRightLength >= 2 {
+ firstChar = lineRight[0:1]
+ }
+ if firstChar == "`" {
+ valQuote = "`"
+ } else if lineRightLength >= 6 && lineRight[0:3] == `"""` {
+ valQuote = `"""`
+ }
+ if valQuote != "" {
+ qLen := len(valQuote)
+ pos := strings.LastIndex(lineRight[qLen:], valQuote)
+ if pos == -1 {
+ return readError{ERR_COULD_NOT_PARSE, line}
+ }
+ pos = pos + qLen
+ value = lineRight[qLen:pos]
+ } else {
+ value = strings.TrimSpace(lineRight[0:])
+ }
+ //[SWH|+];
+
+ c.SetValue(section, key, value)
+ // Set key comments and empty if it has comments.
+ if len(comments) > 0 {
+ c.SetKeyComments(section, key, comments)
+ comments = ""
+ }
+ }
+
+ // Reached end of file.
+ if err == io.EOF {
+ break
+ }
+ }
+ return nil
+}
+
+// LoadFromData accepts raw data directly from memory
+// and returns a new configuration representation.
+// Note that the configuration is written to the system
+// temporary folder, so your file should not contain
+// sensitive information.
+func LoadFromData(data []byte) (c *ConfigFile, err error) {
+ // Save memory data to temporary file to support further operations.
+ tmpName := path.Join(os.TempDir(), "goconfig", fmt.Sprintf("%d", time.Now().Nanosecond()))
+ if err = os.MkdirAll(path.Dir(tmpName), os.ModePerm); err != nil {
+ return nil, err
+ }
+ if err = ioutil.WriteFile(tmpName, data, 0655); err != nil {
+ return nil, err
+ }
+
+ c = newConfigFile([]string{tmpName})
+ err = c.read(bytes.NewBuffer(data))
+ return c, err
+}
+
+// LoadFromReader accepts raw data directly from a reader
+// and returns a new configuration representation.
+// You must use ReloadData to reload.
+// You cannot append files a configfile read this way.
+func LoadFromReader(in io.Reader) (c *ConfigFile, err error) {
+ c = newConfigFile([]string{""})
+ err = c.read(in)
+ return c, err
+}
+
+func (c *ConfigFile) loadFile(fileName string) (err error) {
+ f, err := os.Open(fileName)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ return c.read(f)
+}
+
+// LoadConfigFile reads a file and returns a new configuration representation.
+// This representation can be queried with GetValue.
+func LoadConfigFile(fileName string, moreFiles ...string) (c *ConfigFile, err error) {
+ // Append files' name together.
+ fileNames := make([]string, 1, len(moreFiles)+1)
+ fileNames[0] = fileName
+ if len(moreFiles) > 0 {
+ fileNames = append(fileNames, moreFiles...)
+ }
+
+ c = newConfigFile(fileNames)
+
+ for _, name := range fileNames {
+ if err = c.loadFile(name); err != nil {
+ return nil, err
+ }
+ }
+
+ return c, nil
+}
+
+// Reload reloads configuration file in case it has changes.
+func (c *ConfigFile) Reload() (err error) {
+ var cfg *ConfigFile
+ if len(c.fileNames) == 1 {
+ if c.fileNames[0] == "" {
+ return fmt.Errorf("file opened from in-memory data, use ReloadData to reload")
+ }
+ cfg, err = LoadConfigFile(c.fileNames[0])
+ } else {
+ cfg, err = LoadConfigFile(c.fileNames[0], c.fileNames[1:]...)
+ }
+
+ if err == nil {
+ *c = *cfg
+ }
+ return err
+}
+
+// ReloadData reloads configuration file from memory
+func (c *ConfigFile) ReloadData(in io.Reader) (err error) {
+ var cfg *ConfigFile
+ if len(c.fileNames) != 1 {
+ return fmt.Errorf("Multiple files loaded, unable to mix in-memory and file data")
+ }
+
+ cfg, err = LoadFromReader(in)
+ if err == nil {
+ *c = *cfg
+ }
+ return err
+}
+
+// AppendFiles appends more files to ConfigFile and reload automatically.
+func (c *ConfigFile) AppendFiles(files ...string) error {
+ if len(c.fileNames) == 1 && c.fileNames[0] == "" {
+ return fmt.Errorf("Cannot append file data to in-memory data")
+ }
+ c.fileNames = append(c.fileNames, files...)
+ return c.Reload()
+}
+
+// readError occurs when read configuration file with wrong format.
+type readError struct {
+ Reason ParseError
+ Content string // Line content
+}
+
+// Error implement Error interface.
+func (err readError) Error() string {
+ switch err.Reason {
+ case ERR_BLANK_SECTION_NAME:
+ return "empty section name not allowed"
+ case ERR_COULD_NOT_PARSE:
+ return fmt.Sprintf("could not parse line: %s", string(err.Content))
+ }
+ return "invalid read error"
+}
diff --git a/vendor/github.com/Unknwon/goconfig/write.go b/vendor/github.com/Unknwon/goconfig/write.go
new file mode 100755
index 0000000..af36d1a
--- /dev/null
+++ b/vendor/github.com/Unknwon/goconfig/write.go
@@ -0,0 +1,117 @@
+// Copyright 2013 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package goconfig
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "strings"
+)
+
+// Write spaces around "=" to look better.
+var PrettyFormat = true
+
+// SaveConfigData writes configuration to a writer
+func SaveConfigData(c *ConfigFile, out io.Writer) (err error) {
+ equalSign := "="
+ if PrettyFormat {
+ equalSign = " = "
+ }
+
+ buf := bytes.NewBuffer(nil)
+ for _, section := range c.sectionList {
+ // Write section comments.
+ if len(c.GetSectionComments(section)) > 0 {
+ if _, err = buf.WriteString(c.GetSectionComments(section) + LineBreak); err != nil {
+ return err
+ }
+ }
+
+ if section != DEFAULT_SECTION {
+ // Write section name.
+ if _, err = buf.WriteString("[" + section + "]" + LineBreak); err != nil {
+ return err
+ }
+ }
+
+ for _, key := range c.keyList[section] {
+ if key != " " {
+ // Write key comments.
+ if len(c.GetKeyComments(section, key)) > 0 {
+ if _, err = buf.WriteString(c.GetKeyComments(section, key) + LineBreak); err != nil {
+ return err
+ }
+ }
+
+ keyName := key
+ // Check if it's auto increment.
+ if keyName[0] == '#' {
+ keyName = "-"
+ }
+ //[SWH|+]:支持键名包含等号和冒号
+ if strings.Contains(keyName, `=`) || strings.Contains(keyName, `:`) {
+ if strings.Contains(keyName, "`") {
+ if strings.Contains(keyName, `"`) {
+ keyName = `"""` + keyName + `"""`
+ } else {
+ keyName = `"` + keyName + `"`
+ }
+ } else {
+ keyName = "`" + keyName + "`"
+ }
+ }
+ value := c.data[section][key]
+ // In case key value contains "`" or "\"".
+ if strings.Contains(value, "`") {
+ if strings.Contains(value, `"`) {
+ value = `"""` + value + `"""`
+ } else {
+ value = `"` + value + `"`
+ }
+ }
+
+ // Write key and value.
+ if _, err = buf.WriteString(keyName + equalSign + value + LineBreak); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Put a line between sections.
+ if _, err = buf.WriteString(LineBreak); err != nil {
+ return err
+ }
+ }
+
+ if _, err := buf.WriteTo(out); err != nil {
+ return err
+ }
+ return nil
+}
+
+// SaveConfigFile writes configuration file to local file system
+func SaveConfigFile(c *ConfigFile, filename string) (err error) {
+ // Write configuration file by filename.
+ var f *os.File
+ if f, err = os.Create(filename); err != nil {
+ return err
+ }
+
+ if err := SaveConfigData(c, f); err != nil {
+ return err
+ }
+ return f.Close()
+}
diff --git a/vendor/github.com/abbot/go-http-auth/LICENSE b/vendor/github.com/abbot/go-http-auth/LICENSE
new file mode 100755
index 0000000..e454a52
--- /dev/null
+++ b/vendor/github.com/abbot/go-http-auth/LICENSE
@@ -0,0 +1,178 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
diff --git a/vendor/github.com/abbot/go-http-auth/Makefile b/vendor/github.com/abbot/go-http-auth/Makefile
new file mode 100755
index 0000000..25f208d
--- /dev/null
+++ b/vendor/github.com/abbot/go-http-auth/Makefile
@@ -0,0 +1,12 @@
+include $(GOROOT)/src/Make.inc
+
+TARG=auth_digest
+GOFILES=\
+ auth.go\
+ digest.go\
+ basic.go\
+ misc.go\
+ md5crypt.go\
+ users.go\
+
+include $(GOROOT)/src/Make.pkg
diff --git a/vendor/github.com/abbot/go-http-auth/README.md b/vendor/github.com/abbot/go-http-auth/README.md
new file mode 100755
index 0000000..73ae985
--- /dev/null
+++ b/vendor/github.com/abbot/go-http-auth/README.md
@@ -0,0 +1,71 @@
+HTTP Authentication implementation in Go
+========================================
+
+This is an implementation of HTTP Basic and HTTP Digest authentication
+in Go language. It is designed as a simple wrapper for
+http.RequestHandler functions.
+
+Features
+--------
+
+ * Supports HTTP Basic and HTTP Digest authentication.
+ * Supports htpasswd and htdigest formatted files.
+ * Automatic reloading of password files.
+ * Pluggable interface for user/password storage.
+ * Supports MD5, SHA1 and BCrypt for Basic authentication password storage.
+ * Configurable Digest nonce cache size with expiration.
+ * Wrapper for legacy http handlers (http.HandlerFunc interface)
+
+Example usage
+-------------
+
+This is a complete working example for Basic auth:
+
+ package main
+
+ import (
+ "fmt"
+ "net/http"
+
+ auth "github.com/abbot/go-http-auth"
+ )
+
+ func Secret(user, realm string) string {
+ if user == "john" {
+ // password is "hello"
+ return "$1$dlPL2MqE$oQmn16q49SqdmhenQuNgs1"
+ }
+ return ""
+ }
+
+ func handle(w http.ResponseWriter, r *auth.AuthenticatedRequest) {
+ fmt.Fprintf(w, "
Hello, %s!
", r.Username)
+ }
+
+ func main() {
+ authenticator := auth.NewBasicAuthenticator("example.com", Secret)
+ http.HandleFunc("/", authenticator.Wrap(handle))
+ http.ListenAndServe(":8080", nil)
+ }
+
+See more examples in the "examples" directory.
+
+Legal
+-----
+
+This module is developed under Apache 2.0 license, and can be used for
+open and proprietary projects.
+
+Copyright 2012-2013 Lev Shamardin
+
+Licensed under the Apache License, Version 2.0 (the "License"); you
+may not use this file or any other part of this project except in
+compliance with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+implied. See the License for the specific language governing
+permissions and limitations under the License.
diff --git a/vendor/github.com/abbot/go-http-auth/auth.go b/vendor/github.com/abbot/go-http-auth/auth.go
new file mode 100755
index 0000000..05ded16
--- /dev/null
+++ b/vendor/github.com/abbot/go-http-auth/auth.go
@@ -0,0 +1,109 @@
+// Package auth is an implementation of HTTP Basic and HTTP Digest authentication.
+package auth
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+)
+
+/*
+ Request handlers must take AuthenticatedRequest instead of http.Request
+*/
+type AuthenticatedRequest struct {
+ http.Request
+ /*
+ Authenticated user name. Current API implies that Username is
+ never empty, which means that authentication is always done
+ before calling the request handler.
+ */
+ Username string
+}
+
+/*
+ AuthenticatedHandlerFunc is like http.HandlerFunc, but takes
+ AuthenticatedRequest instead of http.Request
+*/
+type AuthenticatedHandlerFunc func(http.ResponseWriter, *AuthenticatedRequest)
+
+/*
+ Authenticator wraps an AuthenticatedHandlerFunc with
+ authentication-checking code.
+
+ Typical Authenticator usage is something like:
+
+ authenticator := SomeAuthenticator(...)
+ http.HandleFunc("/", authenticator(my_handler))
+
+ Authenticator wrapper checks the user authentication and calls the
+ wrapped function only after authentication has succeeded. Otherwise,
+ it returns a handler which initiates the authentication procedure.
+*/
+type Authenticator func(AuthenticatedHandlerFunc) http.HandlerFunc
+
+// Info contains authentication information for the request.
+type Info struct {
+ // Authenticated is set to true when request was authenticated
+ // successfully, i.e. username and password passed in request did
+ // pass the check.
+ Authenticated bool
+
+ // Username contains a user name passed in the request when
+ // Authenticated is true. It's value is undefined if Authenticated
+ // is false.
+ Username string
+
+ // ResponseHeaders contains extra headers that must be set by server
+ // when sending back HTTP response.
+ ResponseHeaders http.Header
+}
+
+// UpdateHeaders updates headers with this Info's ResponseHeaders. It is
+// safe to call this function on nil Info.
+func (i *Info) UpdateHeaders(headers http.Header) {
+ if i == nil {
+ return
+ }
+ for k, values := range i.ResponseHeaders {
+ for _, v := range values {
+ headers.Add(k, v)
+ }
+ }
+}
+
+type key int // used for context keys
+
+var infoKey key = 0
+
+type AuthenticatorInterface interface {
+ // NewContext returns a new context carrying authentication
+ // information extracted from the request.
+ NewContext(ctx context.Context, r *http.Request) context.Context
+
+ // Wrap returns an http.HandlerFunc which wraps
+ // AuthenticatedHandlerFunc with this authenticator's
+ // authentication checks.
+ Wrap(AuthenticatedHandlerFunc) http.HandlerFunc
+}
+
+// FromContext returns authentication information from the context or
+// nil if no such information present.
+func FromContext(ctx context.Context) *Info {
+ info, ok := ctx.Value(infoKey).(*Info)
+ if !ok {
+ return nil
+ }
+ return info
+}
+
+// AuthUsernameHeader is the header set by JustCheck functions. It
+// contains an authenticated username (if authentication was
+// successful).
+const AuthUsernameHeader = "X-Authenticated-Username"
+
+func JustCheck(auth AuthenticatorInterface, wrapped http.HandlerFunc) http.HandlerFunc {
+ return auth.Wrap(func(w http.ResponseWriter, ar *AuthenticatedRequest) {
+ ar.Header.Set(AuthUsernameHeader, ar.Username)
+ wrapped(w, &ar.Request)
+ })
+}
diff --git a/vendor/github.com/abbot/go-http-auth/basic.go b/vendor/github.com/abbot/go-http-auth/basic.go
new file mode 100755
index 0000000..b03dd58
--- /dev/null
+++ b/vendor/github.com/abbot/go-http-auth/basic.go
@@ -0,0 +1,163 @@
+package auth
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "crypto/subtle"
+ "encoding/base64"
+ "errors"
+ "net/http"
+ "strings"
+
+ "golang.org/x/crypto/bcrypt"
+ "golang.org/x/net/context"
+)
+
+type compareFunc func(hashedPassword, password []byte) error
+
+var (
+ errMismatchedHashAndPassword = errors.New("mismatched hash and password")
+
+ compareFuncs = []struct {
+ prefix string
+ compare compareFunc
+ }{
+ {"", compareMD5HashAndPassword}, // default compareFunc
+ {"{SHA}", compareShaHashAndPassword},
+ // Bcrypt is complicated. According to crypt(3) from
+ // crypt_blowfish version 1.3 (fetched from
+ // http://www.openwall.com/crypt/crypt_blowfish-1.3.tar.gz), there
+ // are three different has prefixes: "$2a$", used by versions up
+ // to 1.0.4, and "$2x$" and "$2y$", used in all later
+ // versions. "$2a$" has a known bug, "$2x$" was added as a
+ // migration path for systems with "$2a$" prefix and still has a
+ // bug, and only "$2y$" should be used by modern systems. The bug
+ // has something to do with handling of 8-bit characters. Since
+ // both "$2a$" and "$2x$" are deprecated, we are handling them the
+ // same way as "$2y$", which will yield correct results for 7-bit
+ // character passwords, but is wrong for 8-bit character
+ // passwords. You have to upgrade to "$2y$" if you want sant 8-bit
+ // character password support with bcrypt. To add to the mess,
+ // OpenBSD 5.5. introduced "$2b$" prefix, which behaves exactly
+ // like "$2y$" according to the same source.
+ {"$2a$", bcrypt.CompareHashAndPassword},
+ {"$2b$", bcrypt.CompareHashAndPassword},
+ {"$2x$", bcrypt.CompareHashAndPassword},
+ {"$2y$", bcrypt.CompareHashAndPassword},
+ }
+)
+
+type BasicAuth struct {
+ Realm string
+ Secrets SecretProvider
+ // Headers used by authenticator. Set to ProxyHeaders to use with
+ // proxy server. When nil, NormalHeaders are used.
+ Headers *Headers
+}
+
+// check that BasicAuth implements AuthenticatorInterface
+var _ = (AuthenticatorInterface)((*BasicAuth)(nil))
+
+/*
+ Checks the username/password combination from the request. Returns
+ either an empty string (authentication failed) or the name of the
+ authenticated user.
+
+ Supports MD5 and SHA1 password entries
+*/
+func (a *BasicAuth) CheckAuth(r *http.Request) string {
+ s := strings.SplitN(r.Header.Get(a.Headers.V().Authorization), " ", 2)
+ if len(s) != 2 || s[0] != "Basic" {
+ return ""
+ }
+
+ b, err := base64.StdEncoding.DecodeString(s[1])
+ if err != nil {
+ return ""
+ }
+ pair := strings.SplitN(string(b), ":", 2)
+ if len(pair) != 2 {
+ return ""
+ }
+ user, password := pair[0], pair[1]
+ secret := a.Secrets(user, a.Realm)
+ if secret == "" {
+ return ""
+ }
+ compare := compareFuncs[0].compare
+ for _, cmp := range compareFuncs[1:] {
+ if strings.HasPrefix(secret, cmp.prefix) {
+ compare = cmp.compare
+ break
+ }
+ }
+ if compare([]byte(secret), []byte(password)) != nil {
+ return ""
+ }
+ return pair[0]
+}
+
+func compareShaHashAndPassword(hashedPassword, password []byte) error {
+ d := sha1.New()
+ d.Write(password)
+ if subtle.ConstantTimeCompare(hashedPassword[5:], []byte(base64.StdEncoding.EncodeToString(d.Sum(nil)))) != 1 {
+ return errMismatchedHashAndPassword
+ }
+ return nil
+}
+
+func compareMD5HashAndPassword(hashedPassword, password []byte) error {
+ parts := bytes.SplitN(hashedPassword, []byte("$"), 4)
+ if len(parts) != 4 {
+ return errMismatchedHashAndPassword
+ }
+ magic := []byte("$" + string(parts[1]) + "$")
+ salt := parts[2]
+ if subtle.ConstantTimeCompare(hashedPassword, MD5Crypt(password, salt, magic)) != 1 {
+ return errMismatchedHashAndPassword
+ }
+ return nil
+}
+
+/*
+ http.Handler for BasicAuth which initiates the authentication process
+ (or requires reauthentication).
+*/
+func (a *BasicAuth) RequireAuth(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set(contentType, a.Headers.V().UnauthContentType)
+ w.Header().Set(a.Headers.V().Authenticate, `Basic realm="`+a.Realm+`"`)
+ w.WriteHeader(a.Headers.V().UnauthCode)
+ w.Write([]byte(a.Headers.V().UnauthResponse))
+}
+
+/*
+ BasicAuthenticator returns a function, which wraps an
+ AuthenticatedHandlerFunc converting it to http.HandlerFunc. This
+ wrapper function checks the authentication and either sends back
+ required authentication headers, or calls the wrapped function with
+ authenticated username in the AuthenticatedRequest.
+*/
+func (a *BasicAuth) Wrap(wrapped AuthenticatedHandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if username := a.CheckAuth(r); username == "" {
+ a.RequireAuth(w, r)
+ } else {
+ ar := &AuthenticatedRequest{Request: *r, Username: username}
+ wrapped(w, ar)
+ }
+ }
+}
+
+// NewContext returns a context carrying authentication information for the request.
+func (a *BasicAuth) NewContext(ctx context.Context, r *http.Request) context.Context {
+ info := &Info{Username: a.CheckAuth(r), ResponseHeaders: make(http.Header)}
+ info.Authenticated = (info.Username != "")
+ if !info.Authenticated {
+ info.ResponseHeaders.Set(a.Headers.V().Authenticate, `Basic realm="`+a.Realm+`"`)
+ }
+ return context.WithValue(ctx, infoKey, info)
+}
+
+func NewBasicAuthenticator(realm string, secrets SecretProvider) *BasicAuth {
+ return &BasicAuth{Realm: realm, Secrets: secrets}
+}
diff --git a/vendor/github.com/abbot/go-http-auth/digest.go b/vendor/github.com/abbot/go-http-auth/digest.go
new file mode 100755
index 0000000..21b0933
--- /dev/null
+++ b/vendor/github.com/abbot/go-http-auth/digest.go
@@ -0,0 +1,274 @@
+package auth
+
+import (
+ "crypto/subtle"
+ "fmt"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+type digest_client struct {
+ nc uint64
+ last_seen int64
+}
+
+type DigestAuth struct {
+ Realm string
+ Opaque string
+ Secrets SecretProvider
+ PlainTextSecrets bool
+ IgnoreNonceCount bool
+ // Headers used by authenticator. Set to ProxyHeaders to use with
+ // proxy server. When nil, NormalHeaders are used.
+ Headers *Headers
+
+ /*
+ Approximate size of Client's Cache. When actual number of
+ tracked client nonces exceeds
+ ClientCacheSize+ClientCacheTolerance, ClientCacheTolerance*2
+ older entries are purged.
+ */
+ ClientCacheSize int
+ ClientCacheTolerance int
+
+ clients map[string]*digest_client
+ mutex sync.Mutex
+}
+
+// check that DigestAuth implements AuthenticatorInterface
+var _ = (AuthenticatorInterface)((*DigestAuth)(nil))
+
+type digest_cache_entry struct {
+ nonce string
+ last_seen int64
+}
+
+type digest_cache []digest_cache_entry
+
+func (c digest_cache) Less(i, j int) bool {
+ return c[i].last_seen < c[j].last_seen
+}
+
+func (c digest_cache) Len() int {
+ return len(c)
+}
+
+func (c digest_cache) Swap(i, j int) {
+ c[i], c[j] = c[j], c[i]
+}
+
+/*
+ Remove count oldest entries from DigestAuth.clients
+*/
+func (a *DigestAuth) Purge(count int) {
+ entries := make([]digest_cache_entry, 0, len(a.clients))
+ for nonce, client := range a.clients {
+ entries = append(entries, digest_cache_entry{nonce, client.last_seen})
+ }
+ cache := digest_cache(entries)
+ sort.Sort(cache)
+ for _, client := range cache[:count] {
+ delete(a.clients, client.nonce)
+ }
+}
+
+/*
+ http.Handler for DigestAuth which initiates the authentication process
+ (or requires reauthentication).
+*/
+func (a *DigestAuth) RequireAuth(w http.ResponseWriter, r *http.Request) {
+ if len(a.clients) > a.ClientCacheSize+a.ClientCacheTolerance {
+ a.Purge(a.ClientCacheTolerance * 2)
+ }
+ nonce := RandomKey()
+ a.clients[nonce] = &digest_client{nc: 0, last_seen: time.Now().UnixNano()}
+ w.Header().Set(contentType, a.Headers.V().UnauthContentType)
+ w.Header().Set(a.Headers.V().Authenticate,
+ fmt.Sprintf(`Digest realm="%s", nonce="%s", opaque="%s", algorithm="MD5", qop="auth"`,
+ a.Realm, nonce, a.Opaque))
+ w.WriteHeader(a.Headers.V().UnauthCode)
+ w.Write([]byte(a.Headers.V().UnauthResponse))
+}
+
+/*
+ Parse Authorization header from the http.Request. Returns a map of
+ auth parameters or nil if the header is not a valid parsable Digest
+ auth header.
+*/
+func DigestAuthParams(authorization string) map[string]string {
+ s := strings.SplitN(authorization, " ", 2)
+ if len(s) != 2 || s[0] != "Digest" {
+ return nil
+ }
+
+ return ParsePairs(s[1])
+}
+
+/*
+ Check if request contains valid authentication data. Returns a pair
+ of username, authinfo where username is the name of the authenticated
+ user or an empty string and authinfo is the contents for the optional
+ Authentication-Info response header.
+*/
+func (da *DigestAuth) CheckAuth(r *http.Request) (username string, authinfo *string) {
+ da.mutex.Lock()
+ defer da.mutex.Unlock()
+ username = ""
+ authinfo = nil
+ auth := DigestAuthParams(r.Header.Get(da.Headers.V().Authorization))
+ if auth == nil {
+ return "", nil
+ }
+ // RFC2617 Section 3.2.1 specifies that unset value of algorithm in
+ // WWW-Authenticate Response header should be treated as
+ // "MD5". According to section 3.2.2 the "algorithm" value in
+ // subsequent Request Authorization header must be set to whatever
+ // was supplied in the WWW-Authenticate Response header. This
+ // implementation always returns an algorithm in WWW-Authenticate
+ // header, however there seems to be broken clients in the wild
+ // which do not set the algorithm. Assume the unset algorithm in
+ // Authorization header to be equal to MD5.
+ if _, ok := auth["algorithm"]; !ok {
+ auth["algorithm"] = "MD5"
+ }
+ if da.Opaque != auth["opaque"] || auth["algorithm"] != "MD5" || auth["qop"] != "auth" {
+ return "", nil
+ }
+
+ // Check if the requested URI matches auth header
+ if r.RequestURI != auth["uri"] {
+ // We allow auth["uri"] to be a full path prefix of request-uri
+ // for some reason lost in history, which is probably wrong, but
+ // used to be like that for quite some time
+ // (https://tools.ietf.org/html/rfc2617#section-3.2.2 explicitly
+ // says that auth["uri"] is the request-uri).
+ //
+ // TODO: make an option to allow only strict checking.
+ switch u, err := url.Parse(auth["uri"]); {
+ case err != nil:
+ return "", nil
+ case r.URL == nil:
+ return "", nil
+ case len(u.Path) > len(r.URL.Path):
+ return "", nil
+ case !strings.HasPrefix(r.URL.Path, u.Path):
+ return "", nil
+ }
+ }
+
+ HA1 := da.Secrets(auth["username"], da.Realm)
+ if da.PlainTextSecrets {
+ HA1 = H(auth["username"] + ":" + da.Realm + ":" + HA1)
+ }
+ HA2 := H(r.Method + ":" + auth["uri"])
+ KD := H(strings.Join([]string{HA1, auth["nonce"], auth["nc"], auth["cnonce"], auth["qop"], HA2}, ":"))
+
+ if subtle.ConstantTimeCompare([]byte(KD), []byte(auth["response"])) != 1 {
+ return "", nil
+ }
+
+ // At this point crypto checks are completed and validated.
+ // Now check if the session is valid.
+
+ nc, err := strconv.ParseUint(auth["nc"], 16, 64)
+ if err != nil {
+ return "", nil
+ }
+
+ if client, ok := da.clients[auth["nonce"]]; !ok {
+ return "", nil
+ } else {
+ if client.nc != 0 && client.nc >= nc && !da.IgnoreNonceCount {
+ return "", nil
+ }
+ client.nc = nc
+ client.last_seen = time.Now().UnixNano()
+ }
+
+ resp_HA2 := H(":" + auth["uri"])
+ rspauth := H(strings.Join([]string{HA1, auth["nonce"], auth["nc"], auth["cnonce"], auth["qop"], resp_HA2}, ":"))
+
+ info := fmt.Sprintf(`qop="auth", rspauth="%s", cnonce="%s", nc="%s"`, rspauth, auth["cnonce"], auth["nc"])
+ return auth["username"], &info
+}
+
+/*
+ Default values for ClientCacheSize and ClientCacheTolerance for DigestAuth
+*/
+const DefaultClientCacheSize = 1000
+const DefaultClientCacheTolerance = 100
+
+/*
+ Wrap returns an Authenticator which uses HTTP Digest
+ authentication. Arguments:
+
+ realm: The authentication realm.
+
+ secrets: SecretProvider which must return HA1 digests for the same
+ realm as above.
+*/
+func (a *DigestAuth) Wrap(wrapped AuthenticatedHandlerFunc) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if username, authinfo := a.CheckAuth(r); username == "" {
+ a.RequireAuth(w, r)
+ } else {
+ ar := &AuthenticatedRequest{Request: *r, Username: username}
+ if authinfo != nil {
+ w.Header().Set(a.Headers.V().AuthInfo, *authinfo)
+ }
+ wrapped(w, ar)
+ }
+ }
+}
+
+/*
+ JustCheck returns function which converts an http.HandlerFunc into a
+ http.HandlerFunc which requires authentication. Username is passed as
+ an extra X-Authenticated-Username header.
+*/
+func (a *DigestAuth) JustCheck(wrapped http.HandlerFunc) http.HandlerFunc {
+ return a.Wrap(func(w http.ResponseWriter, ar *AuthenticatedRequest) {
+ ar.Header.Set(AuthUsernameHeader, ar.Username)
+ wrapped(w, &ar.Request)
+ })
+}
+
+// NewContext returns a context carrying authentication information for the request.
+func (a *DigestAuth) NewContext(ctx context.Context, r *http.Request) context.Context {
+ username, authinfo := a.CheckAuth(r)
+ info := &Info{Username: username, ResponseHeaders: make(http.Header)}
+ if username != "" {
+ info.Authenticated = true
+ info.ResponseHeaders.Set(a.Headers.V().AuthInfo, *authinfo)
+ } else {
+ // return back digest WWW-Authenticate header
+ if len(a.clients) > a.ClientCacheSize+a.ClientCacheTolerance {
+ a.Purge(a.ClientCacheTolerance * 2)
+ }
+ nonce := RandomKey()
+ a.clients[nonce] = &digest_client{nc: 0, last_seen: time.Now().UnixNano()}
+ info.ResponseHeaders.Set(a.Headers.V().Authenticate,
+ fmt.Sprintf(`Digest realm="%s", nonce="%s", opaque="%s", algorithm="MD5", qop="auth"`,
+ a.Realm, nonce, a.Opaque))
+ }
+ return context.WithValue(ctx, infoKey, info)
+}
+
+func NewDigestAuthenticator(realm string, secrets SecretProvider) *DigestAuth {
+ da := &DigestAuth{
+ Opaque: RandomKey(),
+ Realm: realm,
+ Secrets: secrets,
+ PlainTextSecrets: false,
+ ClientCacheSize: DefaultClientCacheSize,
+ ClientCacheTolerance: DefaultClientCacheTolerance,
+ clients: map[string]*digest_client{}}
+ return da
+}
diff --git a/vendor/github.com/abbot/go-http-auth/md5crypt.go b/vendor/github.com/abbot/go-http-auth/md5crypt.go
new file mode 100755
index 0000000..a7a031c
--- /dev/null
+++ b/vendor/github.com/abbot/go-http-auth/md5crypt.go
@@ -0,0 +1,92 @@
+package auth
+
+import "crypto/md5"
+import "strings"
+
+const itoa64 = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+
+var md5_crypt_swaps = [16]int{12, 6, 0, 13, 7, 1, 14, 8, 2, 15, 9, 3, 5, 10, 4, 11}
+
+type MD5Entry struct {
+ Magic, Salt, Hash []byte
+}
+
+func NewMD5Entry(e string) *MD5Entry {
+ parts := strings.SplitN(e, "$", 4)
+ if len(parts) != 4 {
+ return nil
+ }
+ return &MD5Entry{
+ Magic: []byte("$" + parts[1] + "$"),
+ Salt: []byte(parts[2]),
+ Hash: []byte(parts[3]),
+ }
+}
+
+/*
+ MD5 password crypt implementation
+*/
+func MD5Crypt(password, salt, magic []byte) []byte {
+ d := md5.New()
+
+ d.Write(password)
+ d.Write(magic)
+ d.Write(salt)
+
+ d2 := md5.New()
+ d2.Write(password)
+ d2.Write(salt)
+ d2.Write(password)
+
+ for i, mixin := 0, d2.Sum(nil); i < len(password); i++ {
+ d.Write([]byte{mixin[i%16]})
+ }
+
+ for i := len(password); i != 0; i >>= 1 {
+ if i&1 == 0 {
+ d.Write([]byte{password[0]})
+ } else {
+ d.Write([]byte{0})
+ }
+ }
+
+ final := d.Sum(nil)
+
+ for i := 0; i < 1000; i++ {
+ d2 := md5.New()
+ if i&1 == 0 {
+ d2.Write(final)
+ } else {
+ d2.Write(password)
+ }
+
+ if i%3 != 0 {
+ d2.Write(salt)
+ }
+
+ if i%7 != 0 {
+ d2.Write(password)
+ }
+
+ if i&1 == 0 {
+ d2.Write(password)
+ } else {
+ d2.Write(final)
+ }
+ final = d2.Sum(nil)
+ }
+
+ result := make([]byte, 0, 22)
+ v := uint(0)
+ bits := uint(0)
+ for _, i := range md5_crypt_swaps {
+ v |= (uint(final[i]) << bits)
+ for bits = bits + 8; bits > 6; bits -= 6 {
+ result = append(result, itoa64[v&0x3f])
+ v >>= 6
+ }
+ }
+ result = append(result, itoa64[v&0x3f])
+
+ return append(append(append(magic, salt...), '$'), result...)
+}
diff --git a/vendor/github.com/abbot/go-http-auth/misc.go b/vendor/github.com/abbot/go-http-auth/misc.go
new file mode 100755
index 0000000..4536ce6
--- /dev/null
+++ b/vendor/github.com/abbot/go-http-auth/misc.go
@@ -0,0 +1,141 @@
+package auth
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "strings"
+)
+
+// RandomKey returns a random 16-byte base64 alphabet string
+func RandomKey() string {
+ k := make([]byte, 12)
+ for bytes := 0; bytes < len(k); {
+ n, err := rand.Read(k[bytes:])
+ if err != nil {
+ panic("rand.Read() failed")
+ }
+ bytes += n
+ }
+ return base64.StdEncoding.EncodeToString(k)
+}
+
+// H function for MD5 algorithm (returns a lower-case hex MD5 digest)
+func H(data string) string {
+ digest := md5.New()
+ digest.Write([]byte(data))
+ return fmt.Sprintf("%x", digest.Sum(nil))
+}
+
+// ParseList parses a comma-separated list of values as described by
+// RFC 2068 and returns list elements.
+//
+// Lifted from https://code.google.com/p/gorilla/source/browse/http/parser/parser.go
+// which was ported from urllib2.parse_http_list, from the Python
+// standard library.
+func ParseList(value string) []string {
+ var list []string
+ var escape, quote bool
+ b := new(bytes.Buffer)
+ for _, r := range value {
+ switch {
+ case escape:
+ b.WriteRune(r)
+ escape = false
+ case quote:
+ if r == '\\' {
+ escape = true
+ } else {
+ if r == '"' {
+ quote = false
+ }
+ b.WriteRune(r)
+ }
+ case r == ',':
+ list = append(list, strings.TrimSpace(b.String()))
+ b.Reset()
+ case r == '"':
+ quote = true
+ b.WriteRune(r)
+ default:
+ b.WriteRune(r)
+ }
+ }
+ // Append last part.
+ if s := b.String(); s != "" {
+ list = append(list, strings.TrimSpace(s))
+ }
+ return list
+}
+
+// ParsePairs extracts key/value pairs from a comma-separated list of
+// values as described by RFC 2068 and returns a map[key]value. The
+// resulting values are unquoted. If a list element doesn't contain a
+// "=", the key is the element itself and the value is an empty
+// string.
+//
+// Lifted from https://code.google.com/p/gorilla/source/browse/http/parser/parser.go
+func ParsePairs(value string) map[string]string {
+ m := make(map[string]string)
+ for _, pair := range ParseList(strings.TrimSpace(value)) {
+ if i := strings.Index(pair, "="); i < 0 {
+ m[pair] = ""
+ } else {
+ v := pair[i+1:]
+ if v[0] == '"' && v[len(v)-1] == '"' {
+ // Unquote it.
+ v = v[1 : len(v)-1]
+ }
+ m[pair[:i]] = v
+ }
+ }
+ return m
+}
+
+// Headers contains header and error codes used by authenticator.
+type Headers struct {
+ Authenticate string // WWW-Authenticate
+ Authorization string // Authorization
+ AuthInfo string // Authentication-Info
+ UnauthCode int // 401
+ UnauthContentType string // text/plain
+ UnauthResponse string // Unauthorized.
+}
+
+// V returns NormalHeaders when h is nil, or h otherwise. Allows to
+// use uninitialized *Headers values in structs.
+func (h *Headers) V() *Headers {
+ if h == nil {
+ return NormalHeaders
+ }
+ return h
+}
+
+var (
+ // NormalHeaders are the regular Headers used by an HTTP Server for
+ // request authentication.
+ NormalHeaders = &Headers{
+ Authenticate: "WWW-Authenticate",
+ Authorization: "Authorization",
+ AuthInfo: "Authentication-Info",
+ UnauthCode: http.StatusUnauthorized,
+ UnauthContentType: "text/plain",
+ UnauthResponse: fmt.Sprintf("%d %s\n", http.StatusUnauthorized, http.StatusText(http.StatusUnauthorized)),
+ }
+
+ // ProxyHeaders are Headers used by an HTTP Proxy server for proxy
+ // access authentication.
+ ProxyHeaders = &Headers{
+ Authenticate: "Proxy-Authenticate",
+ Authorization: "Proxy-Authorization",
+ AuthInfo: "Proxy-Authentication-Info",
+ UnauthCode: http.StatusProxyAuthRequired,
+ UnauthContentType: "text/plain",
+ UnauthResponse: fmt.Sprintf("%d %s\n", http.StatusProxyAuthRequired, http.StatusText(http.StatusProxyAuthRequired)),
+ }
+)
+
+const contentType = "Content-Type"
diff --git a/vendor/github.com/abbot/go-http-auth/test.htdigest b/vendor/github.com/abbot/go-http-auth/test.htdigest
new file mode 100755
index 0000000..6c8c75b
--- /dev/null
+++ b/vendor/github.com/abbot/go-http-auth/test.htdigest
@@ -0,0 +1 @@
+test:example.com:aa78524fceb0e50fd8ca96dd818b8cf9
diff --git a/vendor/github.com/abbot/go-http-auth/test.htpasswd b/vendor/github.com/abbot/go-http-auth/test.htpasswd
new file mode 100755
index 0000000..4844a3c
--- /dev/null
+++ b/vendor/github.com/abbot/go-http-auth/test.htpasswd
@@ -0,0 +1,4 @@
+test:{SHA}qvTGHdzF6KLavt4PO0gs2a6pQ00=
+test2:$apr1$a0j62R97$mYqFkloXH0/UOaUnAiV2b0
+test16:$apr1$JI4wh3am$AmhephVqLTUyAVpFQeHZC0
+test3:$2y$05$ih3C91zUBSTFcAh2mQnZYuob0UOZVEf16wl/ukgjDhjvj.xgM1WwS
diff --git a/vendor/github.com/abbot/go-http-auth/users.go b/vendor/github.com/abbot/go-http-auth/users.go
new file mode 100755
index 0000000..3771812
--- /dev/null
+++ b/vendor/github.com/abbot/go-http-auth/users.go
@@ -0,0 +1,154 @@
+package auth
+
+import (
+ "encoding/csv"
+ "os"
+ "sync"
+)
+
+/*
+ SecretProvider is used by authenticators. Takes user name and realm
+ as an argument, returns secret required for authentication (HA1 for
+ digest authentication, properly encrypted password for basic).
+
+ Returning an empty string means failing the authentication.
+*/
+type SecretProvider func(user, realm string) string
+
+/*
+ Common functions for file auto-reloading
+*/
+type File struct {
+ Path string
+ Info os.FileInfo
+ /* must be set in inherited types during initialization */
+ Reload func()
+ mu sync.Mutex
+}
+
+func (f *File) ReloadIfNeeded() {
+ info, err := os.Stat(f.Path)
+ if err != nil {
+ panic(err)
+ }
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ if f.Info == nil || f.Info.ModTime() != info.ModTime() {
+ f.Info = info
+ f.Reload()
+ }
+}
+
+/*
+ Structure used for htdigest file authentication. Users map realms to
+ maps of users to their HA1 digests.
+*/
+type HtdigestFile struct {
+ File
+ Users map[string]map[string]string
+ mu sync.RWMutex
+}
+
+func reload_htdigest(hf *HtdigestFile) {
+ r, err := os.Open(hf.Path)
+ if err != nil {
+ panic(err)
+ }
+ csv_reader := csv.NewReader(r)
+ csv_reader.Comma = ':'
+ csv_reader.Comment = '#'
+ csv_reader.TrimLeadingSpace = true
+
+ records, err := csv_reader.ReadAll()
+ if err != nil {
+ panic(err)
+ }
+
+ hf.mu.Lock()
+ defer hf.mu.Unlock()
+ hf.Users = make(map[string]map[string]string)
+ for _, record := range records {
+ _, exists := hf.Users[record[1]]
+ if !exists {
+ hf.Users[record[1]] = make(map[string]string)
+ }
+ hf.Users[record[1]][record[0]] = record[2]
+ }
+}
+
+/*
+ SecretProvider implementation based on htdigest-formated files. Will
+ reload htdigest file on changes. Will panic on syntax errors in
+ htdigest files.
+*/
+func HtdigestFileProvider(filename string) SecretProvider {
+ hf := &HtdigestFile{File: File{Path: filename}}
+ hf.Reload = func() { reload_htdigest(hf) }
+ return func(user, realm string) string {
+ hf.ReloadIfNeeded()
+ hf.mu.RLock()
+ defer hf.mu.RUnlock()
+ _, exists := hf.Users[realm]
+ if !exists {
+ return ""
+ }
+ digest, exists := hf.Users[realm][user]
+ if !exists {
+ return ""
+ }
+ return digest
+ }
+}
+
+/*
+ Structure used for htdigest file authentication. Users map users to
+ their salted encrypted password
+*/
+type HtpasswdFile struct {
+ File
+ Users map[string]string
+ mu sync.RWMutex
+}
+
+func reload_htpasswd(h *HtpasswdFile) {
+ r, err := os.Open(h.Path)
+ if err != nil {
+ panic(err)
+ }
+ csv_reader := csv.NewReader(r)
+ csv_reader.Comma = ':'
+ csv_reader.Comment = '#'
+ csv_reader.TrimLeadingSpace = true
+
+ records, err := csv_reader.ReadAll()
+ if err != nil {
+ panic(err)
+ }
+
+ h.mu.Lock()
+ defer h.mu.Unlock()
+ h.Users = make(map[string]string)
+ for _, record := range records {
+ h.Users[record[0]] = record[1]
+ }
+}
+
+/*
+ SecretProvider implementation based on htpasswd-formated files. Will
+ reload htpasswd file on changes. Will panic on syntax errors in
+ htpasswd files. Realm argument of the SecretProvider is ignored.
+*/
+func HtpasswdFileProvider(filename string) SecretProvider {
+ h := &HtpasswdFile{File: File{Path: filename}}
+ h.Reload = func() { reload_htpasswd(h) }
+ return func(user, realm string) string {
+ h.ReloadIfNeeded()
+ h.mu.RLock()
+ password, exists := h.Users[user]
+ h.mu.RUnlock()
+ if !exists {
+ return ""
+ }
+ return password
+ }
+}
diff --git a/vendor/github.com/andybalholm/cascadia/README.md b/vendor/github.com/andybalholm/cascadia/README.md
old mode 100644
new mode 100755
index 9021cb9..26f4c37
--- a/vendor/github.com/andybalholm/cascadia/README.md
+++ b/vendor/github.com/andybalholm/cascadia/README.md
@@ -5,3 +5,5 @@
The Cascadia package implements CSS selectors for use with the parse trees produced by the html package.
To test CSS selectors without writing Go code, check out [cascadia](https://github.com/suntong/cascadia) the command line tool, a thin wrapper around this package.
+
+[Refer to godoc here](https://godoc.org/github.com/andybalholm/cascadia).
diff --git a/vendor/github.com/andybalholm/cascadia/go.mod b/vendor/github.com/andybalholm/cascadia/go.mod
old mode 100644
new mode 100755
index e6febbb..51a330b
--- a/vendor/github.com/andybalholm/cascadia/go.mod
+++ b/vendor/github.com/andybalholm/cascadia/go.mod
@@ -1,3 +1,5 @@
-module "github.com/andybalholm/cascadia"
+module github.com/andybalholm/cascadia
-require "golang.org/x/net" v0.0.0-20180218175443-cbe0f9307d01
+require golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01
+
+go 1.13
diff --git a/vendor/github.com/andybalholm/cascadia/parser.go b/vendor/github.com/andybalholm/cascadia/parser.go
old mode 100644
new mode 100755
index 495db9c..c40a39f
--- a/vendor/github.com/andybalholm/cascadia/parser.go
+++ b/vendor/github.com/andybalholm/cascadia/parser.go
@@ -7,14 +7,16 @@ import (
"regexp"
"strconv"
"strings"
-
- "golang.org/x/net/html"
)
// a parser for CSS selectors
type parser struct {
s string // the source text
i int // the current position
+
+ // if `false`, parsing a pseudo-element
+ // returns an error.
+ acceptPseudoElements bool
}
// parseEscape parses a backslash escape.
@@ -31,7 +33,7 @@ func (p *parser) parseEscape() (result string, err error) {
case hexDigit(c):
// unicode escape (hex)
var i int
- for i = start; i < p.i+6 && i < len(p.s) && hexDigit(p.s[i]); i++ {
+ for i = start; i < start+6 && i < len(p.s) && hexDigit(p.s[i]); i++ {
// empty
}
v, _ := strconv.ParseUint(p.s[start:i], 16, 21)
@@ -56,6 +58,26 @@ func (p *parser) parseEscape() (result string, err error) {
return result, nil
}
+// toLowerASCII returns s with all ASCII capital letters lowercased.
+func toLowerASCII(s string) string {
+ var b []byte
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; 'A' <= c && c <= 'Z' {
+ if b == nil {
+ b = make([]byte, len(s))
+ copy(b, s)
+ }
+ b[i] = s[i] + ('a' - 'A')
+ }
+ }
+
+ if b == nil {
+ return s
+ }
+
+ return string(b)
+}
+
func hexDigit(c byte) bool {
return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
}
@@ -280,92 +302,92 @@ func (p *parser) consumeClosingParenthesis() bool {
}
// parseTypeSelector parses a type selector (one that matches by tag name).
-func (p *parser) parseTypeSelector() (result Selector, err error) {
+func (p *parser) parseTypeSelector() (result tagSelector, err error) {
tag, err := p.parseIdentifier()
if err != nil {
- return nil, err
+ return
}
-
- return typeSelector(tag), nil
+ return tagSelector{tag: toLowerASCII(tag)}, nil
}
// parseIDSelector parses a selector that matches by id attribute.
-func (p *parser) parseIDSelector() (Selector, error) {
+func (p *parser) parseIDSelector() (idSelector, error) {
if p.i >= len(p.s) {
- return nil, fmt.Errorf("expected id selector (#id), found EOF instead")
+ return idSelector{}, fmt.Errorf("expected id selector (#id), found EOF instead")
}
if p.s[p.i] != '#' {
- return nil, fmt.Errorf("expected id selector (#id), found '%c' instead", p.s[p.i])
+ return idSelector{}, fmt.Errorf("expected id selector (#id), found '%c' instead", p.s[p.i])
}
p.i++
id, err := p.parseName()
if err != nil {
- return nil, err
+ return idSelector{}, err
}
- return attributeEqualsSelector("id", id), nil
+ return idSelector{id: id}, nil
}
// parseClassSelector parses a selector that matches by class attribute.
-func (p *parser) parseClassSelector() (Selector, error) {
+func (p *parser) parseClassSelector() (classSelector, error) {
if p.i >= len(p.s) {
- return nil, fmt.Errorf("expected class selector (.class), found EOF instead")
+ return classSelector{}, fmt.Errorf("expected class selector (.class), found EOF instead")
}
if p.s[p.i] != '.' {
- return nil, fmt.Errorf("expected class selector (.class), found '%c' instead", p.s[p.i])
+ return classSelector{}, fmt.Errorf("expected class selector (.class), found '%c' instead", p.s[p.i])
}
p.i++
class, err := p.parseIdentifier()
if err != nil {
- return nil, err
+ return classSelector{}, err
}
- return attributeIncludesSelector("class", class), nil
+ return classSelector{class: class}, nil
}
// parseAttributeSelector parses a selector that matches by attribute value.
-func (p *parser) parseAttributeSelector() (Selector, error) {
+func (p *parser) parseAttributeSelector() (attrSelector, error) {
if p.i >= len(p.s) {
- return nil, fmt.Errorf("expected attribute selector ([attribute]), found EOF instead")
+ return attrSelector{}, fmt.Errorf("expected attribute selector ([attribute]), found EOF instead")
}
if p.s[p.i] != '[' {
- return nil, fmt.Errorf("expected attribute selector ([attribute]), found '%c' instead", p.s[p.i])
+ return attrSelector{}, fmt.Errorf("expected attribute selector ([attribute]), found '%c' instead", p.s[p.i])
}
p.i++
p.skipWhitespace()
key, err := p.parseIdentifier()
if err != nil {
- return nil, err
+ return attrSelector{}, err
}
+ key = toLowerASCII(key)
p.skipWhitespace()
if p.i >= len(p.s) {
- return nil, errors.New("unexpected EOF in attribute selector")
+ return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
if p.s[p.i] == ']' {
p.i++
- return attributeExistsSelector(key), nil
+ return attrSelector{key: key, operation: ""}, nil
}
if p.i+2 >= len(p.s) {
- return nil, errors.New("unexpected EOF in attribute selector")
+ return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
op := p.s[p.i : p.i+2]
if op[0] == '=' {
op = "="
} else if op[1] != '=' {
- return nil, fmt.Errorf(`expected equality operator, found "%s" instead`, op)
+ return attrSelector{}, fmt.Errorf(`expected equality operator, found "%s" instead`, op)
}
p.i += len(op)
p.skipWhitespace()
if p.i >= len(p.s) {
- return nil, errors.New("unexpected EOF in attribute selector")
+ return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
var val string
var rx *regexp.Regexp
@@ -380,88 +402,84 @@ func (p *parser) parseAttributeSelector() (Selector, error) {
}
}
if err != nil {
- return nil, err
+ return attrSelector{}, err
}
p.skipWhitespace()
if p.i >= len(p.s) {
- return nil, errors.New("unexpected EOF in attribute selector")
+ return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
if p.s[p.i] != ']' {
- return nil, fmt.Errorf("expected ']', found '%c' instead", p.s[p.i])
+ return attrSelector{}, fmt.Errorf("expected ']', found '%c' instead", p.s[p.i])
}
p.i++
switch op {
- case "=":
- return attributeEqualsSelector(key, val), nil
- case "!=":
- return attributeNotEqualSelector(key, val), nil
- case "~=":
- return attributeIncludesSelector(key, val), nil
- case "|=":
- return attributeDashmatchSelector(key, val), nil
- case "^=":
- return attributePrefixSelector(key, val), nil
- case "$=":
- return attributeSuffixSelector(key, val), nil
- case "*=":
- return attributeSubstringSelector(key, val), nil
- case "#=":
- return attributeRegexSelector(key, rx), nil
+ case "=", "!=", "~=", "|=", "^=", "$=", "*=", "#=":
+ return attrSelector{key: key, val: val, operation: op, regexp: rx}, nil
+ default:
+ return attrSelector{}, fmt.Errorf("attribute operator %q is not supported", op)
}
-
- return nil, fmt.Errorf("attribute operator %q is not supported", op)
}
var errExpectedParenthesis = errors.New("expected '(' but didn't find it")
var errExpectedClosingParenthesis = errors.New("expected ')' but didn't find it")
var errUnmatchedParenthesis = errors.New("unmatched '('")
-// parsePseudoclassSelector parses a pseudoclass selector like :not(p).
-func (p *parser) parsePseudoclassSelector() (Selector, error) {
+// parsePseudoclassSelector parses a pseudoclass selector like :not(p) or a pseudo-element
+// For backwards compatibility, both ':' and '::' prefix are allowed for pseudo-elements.
+// https://drafts.csswg.org/selectors-3/#pseudo-elements
+// Returning a nil `Sel` (and a nil `error`) means we found a pseudo-element.
+func (p *parser) parsePseudoclassSelector() (out Sel, pseudoElement string, err error) {
if p.i >= len(p.s) {
- return nil, fmt.Errorf("expected pseudoclass selector (:pseudoclass), found EOF instead")
+ return nil, "", fmt.Errorf("expected pseudoclass selector (:pseudoclass), found EOF instead")
}
if p.s[p.i] != ':' {
- return nil, fmt.Errorf("expected attribute selector (:pseudoclass), found '%c' instead", p.s[p.i])
+ return nil, "", fmt.Errorf("expected attribute selector (:pseudoclass), found '%c' instead", p.s[p.i])
}
p.i++
+ var mustBePseudoElement bool
+ if p.i >= len(p.s) {
+ return nil, "", fmt.Errorf("got empty pseudoclass (or pseudoelement)")
+ }
+ if p.s[p.i] == ':' { // we found a pseudo-element
+ mustBePseudoElement = true
+ p.i++
+ }
+
name, err := p.parseIdentifier()
if err != nil {
- return nil, err
+ return
}
name = toLowerASCII(name)
+ if mustBePseudoElement && (name != "after" && name != "backdrop" && name != "before" &&
+ name != "cue" && name != "first-letter" && name != "first-line" && name != "grammar-error" &&
+ name != "marker" && name != "placeholder" && name != "selection" && name != "spelling-error") {
+ return out, "", fmt.Errorf("unknown pseudoelement :%s", name)
+ }
switch name {
case "not", "has", "haschild":
if !p.consumeParenthesis() {
- return nil, errExpectedParenthesis
+ return out, "", errExpectedParenthesis
}
sel, parseErr := p.parseSelectorGroup()
if parseErr != nil {
- return nil, parseErr
+ return out, "", parseErr
}
if !p.consumeClosingParenthesis() {
- return nil, errExpectedClosingParenthesis
+ return out, "", errExpectedClosingParenthesis
}
- switch name {
- case "not":
- return negatedSelector(sel), nil
- case "has":
- return hasDescendantSelector(sel), nil
- case "haschild":
- return hasChildSelector(sel), nil
- }
+ out = relativePseudoClassSelector{name: name, match: sel}
case "contains", "containsown":
if !p.consumeParenthesis() {
- return nil, errExpectedParenthesis
+ return out, "", errExpectedParenthesis
}
if p.i == len(p.s) {
- return nil, errUnmatchedParenthesis
+ return out, "", errUnmatchedParenthesis
}
var val string
switch p.s[p.i] {
@@ -471,95 +489,75 @@ func (p *parser) parsePseudoclassSelector() (Selector, error) {
val, err = p.parseIdentifier()
}
if err != nil {
- return nil, err
+ return out, "", err
}
val = strings.ToLower(val)
p.skipWhitespace()
if p.i >= len(p.s) {
- return nil, errors.New("unexpected EOF in pseudo selector")
+ return out, "", errors.New("unexpected EOF in pseudo selector")
}
if !p.consumeClosingParenthesis() {
- return nil, errExpectedClosingParenthesis
+ return out, "", errExpectedClosingParenthesis
}
- switch name {
- case "contains":
- return textSubstrSelector(val), nil
- case "containsown":
- return ownTextSubstrSelector(val), nil
- }
+ out = containsPseudoClassSelector{own: name == "containsown", value: val}
case "matches", "matchesown":
if !p.consumeParenthesis() {
- return nil, errExpectedParenthesis
+ return out, "", errExpectedParenthesis
}
rx, err := p.parseRegex()
if err != nil {
- return nil, err
+ return out, "", err
}
if p.i >= len(p.s) {
- return nil, errors.New("unexpected EOF in pseudo selector")
+ return out, "", errors.New("unexpected EOF in pseudo selector")
}
if !p.consumeClosingParenthesis() {
- return nil, errExpectedClosingParenthesis
+ return out, "", errExpectedClosingParenthesis
}
- switch name {
- case "matches":
- return textRegexSelector(rx), nil
- case "matchesown":
- return ownTextRegexSelector(rx), nil
- }
+ out = regexpPseudoClassSelector{own: name == "matchesown", regexp: rx}
case "nth-child", "nth-last-child", "nth-of-type", "nth-last-of-type":
if !p.consumeParenthesis() {
- return nil, errExpectedParenthesis
+ return out, "", errExpectedParenthesis
}
a, b, err := p.parseNth()
if err != nil {
- return nil, err
+ return out, "", err
}
if !p.consumeClosingParenthesis() {
- return nil, errExpectedClosingParenthesis
+ return out, "", errExpectedClosingParenthesis
}
- if a == 0 {
- switch name {
- case "nth-child":
- return simpleNthChildSelector(b, false), nil
- case "nth-of-type":
- return simpleNthChildSelector(b, true), nil
- case "nth-last-child":
- return simpleNthLastChildSelector(b, false), nil
- case "nth-last-of-type":
- return simpleNthLastChildSelector(b, true), nil
- }
- }
- return nthChildSelector(a, b,
- name == "nth-last-child" || name == "nth-last-of-type",
- name == "nth-of-type" || name == "nth-last-of-type"),
- nil
+ last := name == "nth-last-child" || name == "nth-last-of-type"
+ ofType := name == "nth-of-type" || name == "nth-last-of-type"
+ out = nthPseudoClassSelector{a: a, b: b, last: last, ofType: ofType}
case "first-child":
- return simpleNthChildSelector(1, false), nil
+ out = nthPseudoClassSelector{a: 0, b: 1, ofType: false, last: false}
case "last-child":
- return simpleNthLastChildSelector(1, false), nil
+ out = nthPseudoClassSelector{a: 0, b: 1, ofType: false, last: true}
case "first-of-type":
- return simpleNthChildSelector(1, true), nil
+ out = nthPseudoClassSelector{a: 0, b: 1, ofType: true, last: false}
case "last-of-type":
- return simpleNthLastChildSelector(1, true), nil
+ out = nthPseudoClassSelector{a: 0, b: 1, ofType: true, last: true}
case "only-child":
- return onlyChildSelector(false), nil
+ out = onlyChildPseudoClassSelector{ofType: false}
case "only-of-type":
- return onlyChildSelector(true), nil
+ out = onlyChildPseudoClassSelector{ofType: true}
case "input":
- return inputSelector, nil
+ out = inputPseudoClassSelector{}
case "empty":
- return emptyElementSelector, nil
+ out = emptyElementPseudoClassSelector{}
case "root":
- return rootSelector, nil
+ out = rootPseudoClassSelector{}
+ case "after", "backdrop", "before", "cue", "first-letter", "first-line", "grammar-error", "marker", "placeholder", "selection", "spelling-error":
+ return nil, name, nil
+ default:
+ return out, "", fmt.Errorf("unknown pseudoclass or pseudoelement :%s", name)
}
-
- return nil, fmt.Errorf("unknown pseudoclass :%s", name)
+ return
}
// parseInteger parses a decimal integer.
@@ -705,8 +703,8 @@ invalid:
// parseSimpleSelectorSequence parses a selector sequence that applies to
// a single element.
-func (p *parser) parseSimpleSelectorSequence() (Selector, error) {
- var result Selector
+func (p *parser) parseSimpleSelectorSequence() (Sel, error) {
+ var selectors []Sel
if p.i >= len(p.s) {
return nil, errors.New("expected selector, found EOF instead")
@@ -723,13 +721,17 @@ func (p *parser) parseSimpleSelectorSequence() (Selector, error) {
if err != nil {
return nil, err
}
- result = r
+ selectors = append(selectors, r)
}
+ var pseudoElement string
loop:
for p.i < len(p.s) {
- var ns Selector
- var err error
+ var (
+ ns Sel
+ newPseudoElement string
+ err error
+ )
switch p.s[p.i] {
case '#':
ns, err = p.parseIDSelector()
@@ -738,44 +740,57 @@ loop:
case '[':
ns, err = p.parseAttributeSelector()
case ':':
- ns, err = p.parsePseudoclassSelector()
+ ns, newPseudoElement, err = p.parsePseudoclassSelector()
default:
break loop
}
if err != nil {
return nil, err
}
- if result == nil {
- result = ns
+ // From https://drafts.csswg.org/selectors-3/#pseudo-elements :
+ // "Only one pseudo-element may appear per selector, and if present
+ // it must appear after the sequence of simple selectors that
+ // represents the subjects of the selector.""
+ if ns == nil { // we found a pseudo-element
+ if pseudoElement != "" {
+ return nil, fmt.Errorf("only one pseudo-element is accepted per selector, got %s and %s", pseudoElement, newPseudoElement)
+ }
+ if !p.acceptPseudoElements {
+ return nil, fmt.Errorf("pseudo-element %s found, but pseudo-elements support is disabled", newPseudoElement)
+ }
+ pseudoElement = newPseudoElement
} else {
- result = intersectionSelector(result, ns)
+ if pseudoElement != "" {
+ return nil, fmt.Errorf("pseudo-element %s must be at the end of selector", pseudoElement)
+ }
+ selectors = append(selectors, ns)
}
- }
- if result == nil {
- result = func(n *html.Node) bool {
- return n.Type == html.ElementNode
- }
}
-
- return result, nil
+ if len(selectors) == 1 && pseudoElement == "" { // no need wrap the selectors in compoundSelector
+ return selectors[0], nil
+ }
+ return compoundSelector{selectors: selectors, pseudoElement: pseudoElement}, nil
}
// parseSelector parses a selector that may include combinators.
-func (p *parser) parseSelector() (result Selector, err error) {
+func (p *parser) parseSelector() (Sel, error) {
p.skipWhitespace()
- result, err = p.parseSimpleSelectorSequence()
+ result, err := p.parseSimpleSelectorSequence()
if err != nil {
- return
+ return nil, err
}
for {
- var combinator byte
+ var (
+ combinator byte
+ c Sel
+ )
if p.skipWhitespace() {
combinator = ' '
}
if p.i >= len(p.s) {
- return
+ return result, nil
}
switch p.s[p.i] {
@@ -785,51 +800,39 @@ func (p *parser) parseSelector() (result Selector, err error) {
p.skipWhitespace()
case ',', ')':
// These characters can't begin a selector, but they can legally occur after one.
- return
+ return result, nil
}
if combinator == 0 {
- return
+ return result, nil
}
- c, err := p.parseSimpleSelectorSequence()
+ c, err = p.parseSimpleSelectorSequence()
if err != nil {
return nil, err
}
-
- switch combinator {
- case ' ':
- result = descendantSelector(result, c)
- case '>':
- result = childSelector(result, c)
- case '+':
- result = siblingSelector(result, c, true)
- case '~':
- result = siblingSelector(result, c, false)
- }
+ result = combinedSelector{first: result, combinator: combinator, second: c}
}
-
- panic("unreachable")
}
// parseSelectorGroup parses a group of selectors, separated by commas.
-func (p *parser) parseSelectorGroup() (result Selector, err error) {
- result, err = p.parseSelector()
+func (p *parser) parseSelectorGroup() (SelectorGroup, error) {
+ current, err := p.parseSelector()
if err != nil {
- return
+ return nil, err
}
+ result := SelectorGroup{current}
for p.i < len(p.s) {
if p.s[p.i] != ',' {
- return result, nil
+ break
}
p.i++
c, err := p.parseSelector()
if err != nil {
return nil, err
}
- result = unionSelector(result, c)
+ result = append(result, c)
}
-
- return
+ return result, nil
}
diff --git a/vendor/github.com/andybalholm/cascadia/selector.go b/vendor/github.com/andybalholm/cascadia/selector.go
old mode 100644
new mode 100755
index 9fb05cc..e2a6dc4
--- a/vendor/github.com/andybalholm/cascadia/selector.go
+++ b/vendor/github.com/andybalholm/cascadia/selector.go
@@ -9,36 +9,60 @@ import (
"golang.org/x/net/html"
)
-// the Selector type, and functions for creating them
-
-// A Selector is a function which tells whether a node matches or not.
-type Selector func(*html.Node) bool
-
-// hasChildMatch returns whether n has any child that matches a.
-func hasChildMatch(n *html.Node, a Selector) bool {
- for c := n.FirstChild; c != nil; c = c.NextSibling {
- if a(c) {
- return true
- }
- }
- return false
+// Matcher is the interface for basic selector functionality.
+// Match returns whether a selector matches n.
+type Matcher interface {
+ Match(n *html.Node) bool
}
-// hasDescendantMatch performs a depth-first search of n's descendants,
-// testing whether any of them match a. It returns true as soon as a match is
-// found, or false if no match is found.
-func hasDescendantMatch(n *html.Node, a Selector) bool {
- for c := n.FirstChild; c != nil; c = c.NextSibling {
- if a(c) || (c.Type == html.ElementNode && hasDescendantMatch(c, a)) {
- return true
- }
- }
- return false
+// Sel is the interface for all the functionality provided by selectors.
+type Sel interface {
+ Matcher
+ Specificity() Specificity
+
+ // Returns a CSS input compiling to this selector.
+ String() string
+
+ // Returns a pseudo-element, or an empty string.
+ PseudoElement() string
}
-// Compile parses a selector and returns, if successful, a Selector object
-// that can be used to match against html.Node objects.
-func Compile(sel string) (Selector, error) {
+// Parse parses a selector. Use `ParseWithPseudoElement`
+// if you need support for pseudo-elements.
+func Parse(sel string) (Sel, error) {
+ p := &parser{s: sel}
+ compiled, err := p.parseSelector()
+ if err != nil {
+ return nil, err
+ }
+
+ if p.i < len(sel) {
+ return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
+ }
+
+ return compiled, nil
+}
+
+// ParseWithPseudoElement parses a single selector,
+// with support for pseudo-element.
+func ParseWithPseudoElement(sel string) (Sel, error) {
+ p := &parser{s: sel, acceptPseudoElements: true}
+ compiled, err := p.parseSelector()
+ if err != nil {
+ return nil, err
+ }
+
+ if p.i < len(sel) {
+ return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
+ }
+
+ return compiled, nil
+}
+
+// ParseGroup parses a selector, or a group of selectors separated by commas.
+// Use `ParseGroupWithPseudoElements`
+// if you need support for pseudo-elements.
+func ParseGroup(sel string) (SelectorGroup, error) {
p := &parser{s: sel}
compiled, err := p.parseSelectorGroup()
if err != nil {
@@ -52,6 +76,39 @@ func Compile(sel string) (Selector, error) {
return compiled, nil
}
+// ParseGroupWithPseudoElements parses a selector, or a group of selectors separated by commas.
+// It supports pseudo-elements.
+func ParseGroupWithPseudoElements(sel string) (SelectorGroup, error) {
+ p := &parser{s: sel, acceptPseudoElements: true}
+ compiled, err := p.parseSelectorGroup()
+ if err != nil {
+ return nil, err
+ }
+
+ if p.i < len(sel) {
+ return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
+ }
+
+ return compiled, nil
+}
+
+// A Selector is a function which tells whether a node matches or not.
+//
+// This type is maintained for compatibility; I recommend using the newer and
+// more idiomatic interfaces Sel and Matcher.
+type Selector func(*html.Node) bool
+
+// Compile parses a selector and returns, if successful, a Selector object
+// that can be used to match against html.Node objects.
+func Compile(sel string) (Selector, error) {
+ compiled, err := ParseGroup(sel)
+ if err != nil {
+ return nil, err
+ }
+
+ return Selector(compiled.Match), nil
+}
+
// MustCompile is like Compile, but panics instead of returning an error.
func MustCompile(sel string) Selector {
compiled, err := Compile(sel)
@@ -79,6 +136,23 @@ func (s Selector) matchAllInto(n *html.Node, storage []*html.Node) []*html.Node
return storage
}
+func queryInto(n *html.Node, m Matcher, storage []*html.Node) []*html.Node {
+ for child := n.FirstChild; child != nil; child = child.NextSibling {
+ if m.Match(child) {
+ storage = append(storage, child)
+ }
+ storage = queryInto(child, m, storage)
+ }
+
+ return storage
+}
+
+// QueryAll returns a slice of all the nodes that match m, from the descendants
+// of n.
+func QueryAll(n *html.Node, m Matcher) []*html.Node {
+ return queryInto(n, m, nil)
+}
+
// Match returns true if the node matches the selector.
func (s Selector) Match(n *html.Node) bool {
return s(n)
@@ -99,6 +173,21 @@ func (s Selector) MatchFirst(n *html.Node) *html.Node {
return nil
}
+// Query returns the first node that matches m, from the descendants of n.
+// If none matches, it returns nil.
+func Query(n *html.Node, m Matcher) *html.Node {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if m.Match(c) {
+ return c
+ }
+ if matched := Query(c, m); matched != nil {
+ return matched
+ }
+ }
+
+ return nil
+}
+
// Filter returns the nodes in nodes that match the selector.
func (s Selector) Filter(nodes []*html.Node) (result []*html.Node) {
for _, n := range nodes {
@@ -109,106 +198,148 @@ func (s Selector) Filter(nodes []*html.Node) (result []*html.Node) {
return result
}
-// typeSelector returns a Selector that matches elements with a given tag name.
-func typeSelector(tag string) Selector {
- tag = toLowerASCII(tag)
- return func(n *html.Node) bool {
- return n.Type == html.ElementNode && n.Data == tag
+// Filter returns the nodes that match m.
+func Filter(nodes []*html.Node, m Matcher) (result []*html.Node) {
+ for _, n := range nodes {
+ if m.Match(n) {
+ result = append(result, n)
+ }
+ }
+ return result
+}
+
+type tagSelector struct {
+ tag string
+}
+
+// Matches elements with a given tag name.
+func (t tagSelector) Match(n *html.Node) bool {
+ return n.Type == html.ElementNode && n.Data == t.tag
+}
+
+func (c tagSelector) Specificity() Specificity {
+ return Specificity{0, 0, 1}
+}
+
+func (c tagSelector) PseudoElement() string {
+ return ""
+}
+
+type classSelector struct {
+ class string
+}
+
+// Matches elements by class attribute.
+func (t classSelector) Match(n *html.Node) bool {
+ return matchAttribute(n, "class", func(s string) bool {
+ return matchInclude(t.class, s)
+ })
+}
+
+func (c classSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
+}
+
+func (c classSelector) PseudoElement() string {
+ return ""
+}
+
+type idSelector struct {
+ id string
+}
+
+// Matches elements by id attribute.
+func (t idSelector) Match(n *html.Node) bool {
+ return matchAttribute(n, "id", func(s string) bool {
+ return s == t.id
+ })
+}
+
+func (c idSelector) Specificity() Specificity {
+ return Specificity{1, 0, 0}
+}
+
+func (c idSelector) PseudoElement() string {
+ return ""
+}
+
+type attrSelector struct {
+ key, val, operation string
+ regexp *regexp.Regexp
+}
+
+// Matches elements by attribute value.
+func (t attrSelector) Match(n *html.Node) bool {
+ switch t.operation {
+ case "":
+ return matchAttribute(n, t.key, func(string) bool { return true })
+ case "=":
+ return matchAttribute(n, t.key, func(s string) bool { return s == t.val })
+ case "!=":
+ return attributeNotEqualMatch(t.key, t.val, n)
+ case "~=":
+ // matches elements where the attribute named key is a whitespace-separated list that includes val.
+ return matchAttribute(n, t.key, func(s string) bool { return matchInclude(t.val, s) })
+ case "|=":
+ return attributeDashMatch(t.key, t.val, n)
+ case "^=":
+ return attributePrefixMatch(t.key, t.val, n)
+ case "$=":
+ return attributeSuffixMatch(t.key, t.val, n)
+ case "*=":
+ return attributeSubstringMatch(t.key, t.val, n)
+ case "#=":
+ return attributeRegexMatch(t.key, t.regexp, n)
+ default:
+ panic(fmt.Sprintf("unsuported operation : %s", t.operation))
}
}
-// toLowerASCII returns s with all ASCII capital letters lowercased.
-func toLowerASCII(s string) string {
- var b []byte
- for i := 0; i < len(s); i++ {
- if c := s[i]; 'A' <= c && c <= 'Z' {
- if b == nil {
- b = make([]byte, len(s))
- copy(b, s)
- }
- b[i] = s[i] + ('a' - 'A')
- }
- }
-
- if b == nil {
- return s
- }
-
- return string(b)
-}
-
-// attributeSelector returns a Selector that matches elements
-// where the attribute named key satisifes the function f.
-func attributeSelector(key string, f func(string) bool) Selector {
- key = toLowerASCII(key)
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
- }
- for _, a := range n.Attr {
- if a.Key == key && f(a.Val) {
- return true
- }
- }
+// matches elements where the attribute named key satisifes the function f.
+func matchAttribute(n *html.Node, key string, f func(string) bool) bool {
+ if n.Type != html.ElementNode {
return false
}
-}
-
-// attributeExistsSelector returns a Selector that matches elements that have
-// an attribute named key.
-func attributeExistsSelector(key string) Selector {
- return attributeSelector(key, func(string) bool { return true })
-}
-
-// attributeEqualsSelector returns a Selector that matches elements where
-// the attribute named key has the value val.
-func attributeEqualsSelector(key, val string) Selector {
- return attributeSelector(key,
- func(s string) bool {
- return s == val
- })
-}
-
-// attributeNotEqualSelector returns a Selector that matches elements where
-// the attribute named key does not have the value val.
-func attributeNotEqualSelector(key, val string) Selector {
- key = toLowerASCII(key)
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
+ for _, a := range n.Attr {
+ if a.Key == key && f(a.Val) {
+ return true
}
- for _, a := range n.Attr {
- if a.Key == key && a.Val == val {
- return false
- }
- }
- return true
}
+ return false
}
-// attributeIncludesSelector returns a Selector that matches elements where
-// the attribute named key is a whitespace-separated list that includes val.
-func attributeIncludesSelector(key, val string) Selector {
- return attributeSelector(key,
- func(s string) bool {
- for s != "" {
- i := strings.IndexAny(s, " \t\r\n\f")
- if i == -1 {
- return s == val
- }
- if s[:i] == val {
- return true
- }
- s = s[i+1:]
- }
+// attributeNotEqualMatch matches elements where
+// the attribute named key does not have the value val.
+func attributeNotEqualMatch(key, val string, n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ for _, a := range n.Attr {
+ if a.Key == key && a.Val == val {
return false
- })
+ }
+ }
+ return true
}
-// attributeDashmatchSelector returns a Selector that matches elements where
-// the attribute named key equals val or starts with val plus a hyphen.
-func attributeDashmatchSelector(key, val string) Selector {
- return attributeSelector(key,
+// returns true if s is a whitespace-separated list that includes val.
+func matchInclude(val, s string) bool {
+ for s != "" {
+ i := strings.IndexAny(s, " \t\r\n\f")
+ if i == -1 {
+ return s == val
+ }
+ if s[:i] == val {
+ return true
+ }
+ s = s[i+1:]
+ }
+ return false
+}
+
+// matches elements where the attribute named key equals val or starts with val plus a hyphen.
+func attributeDashMatch(key, val string, n *html.Node) bool {
+ return matchAttribute(n, key,
func(s string) bool {
if s == val {
return true
@@ -223,10 +354,10 @@ func attributeDashmatchSelector(key, val string) Selector {
})
}
-// attributePrefixSelector returns a Selector that matches elements where
+// attributePrefixMatch returns a Selector that matches elements where
// the attribute named key starts with val.
-func attributePrefixSelector(key, val string) Selector {
- return attributeSelector(key,
+func attributePrefixMatch(key, val string, n *html.Node) bool {
+ return matchAttribute(n, key,
func(s string) bool {
if strings.TrimSpace(s) == "" {
return false
@@ -235,10 +366,10 @@ func attributePrefixSelector(key, val string) Selector {
})
}
-// attributeSuffixSelector returns a Selector that matches elements where
+// attributeSuffixMatch matches elements where
// the attribute named key ends with val.
-func attributeSuffixSelector(key, val string) Selector {
- return attributeSelector(key,
+func attributeSuffixMatch(key, val string, n *html.Node) bool {
+ return matchAttribute(n, key,
func(s string) bool {
if strings.TrimSpace(s) == "" {
return false
@@ -247,10 +378,10 @@ func attributeSuffixSelector(key, val string) Selector {
})
}
-// attributeSubstringSelector returns a Selector that matches nodes where
+// attributeSubstringMatch matches nodes where
// the attribute named key contains val.
-func attributeSubstringSelector(key, val string) Selector {
- return attributeSelector(key,
+func attributeSubstringMatch(key, val string, n *html.Node) bool {
+ return matchAttribute(n, key,
func(s string) bool {
if strings.TrimSpace(s) == "" {
return false
@@ -259,39 +390,130 @@ func attributeSubstringSelector(key, val string) Selector {
})
}
-// attributeRegexSelector returns a Selector that matches nodes where
+// attributeRegexMatch matches nodes where
// the attribute named key matches the regular expression rx
-func attributeRegexSelector(key string, rx *regexp.Regexp) Selector {
- return attributeSelector(key,
+func attributeRegexMatch(key string, rx *regexp.Regexp, n *html.Node) bool {
+ return matchAttribute(n, key,
func(s string) bool {
return rx.MatchString(s)
})
}
-// intersectionSelector returns a selector that matches nodes that match
-// both a and b.
-func intersectionSelector(a, b Selector) Selector {
- return func(n *html.Node) bool {
- return a(n) && b(n)
+func (c attrSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
+}
+
+func (c attrSelector) PseudoElement() string {
+ return ""
+}
+
+// ---------------- Pseudo class selectors ----------------
+// we use severals concrete types of pseudo-class selectors
+
+type relativePseudoClassSelector struct {
+ name string // one of "not", "has", "haschild"
+ match SelectorGroup
+}
+
+func (s relativePseudoClassSelector) Match(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+ switch s.name {
+ case "not":
+ // matches elements that do not match a.
+ return !s.match.Match(n)
+ case "has":
+ // matches elements with any descendant that matches a.
+ return hasDescendantMatch(n, s.match)
+ case "haschild":
+ // matches elements with a child that matches a.
+ return hasChildMatch(n, s.match)
+ default:
+ panic(fmt.Sprintf("unsupported relative pseudo class selector : %s", s.name))
}
}
-// unionSelector returns a selector that matches elements that match
-// either a or b.
-func unionSelector(a, b Selector) Selector {
- return func(n *html.Node) bool {
- return a(n) || b(n)
- }
-}
-
-// negatedSelector returns a selector that matches elements that do not match a.
-func negatedSelector(a Selector) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
+// hasChildMatch returns whether n has any child that matches a.
+func hasChildMatch(n *html.Node, a Matcher) bool {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if a.Match(c) {
+ return true
}
- return !a(n)
}
+ return false
+}
+
+// hasDescendantMatch performs a depth-first search of n's descendants,
+// testing whether any of them match a. It returns true as soon as a match is
+// found, or false if no match is found.
+func hasDescendantMatch(n *html.Node, a Matcher) bool {
+ for c := n.FirstChild; c != nil; c = c.NextSibling {
+ if a.Match(c) || (c.Type == html.ElementNode && hasDescendantMatch(c, a)) {
+ return true
+ }
+ }
+ return false
+}
+
+// Specificity returns the specificity of the most specific selectors
+// in the pseudo-class arguments.
+// See https://www.w3.org/TR/selectors/#specificity-rules
+func (s relativePseudoClassSelector) Specificity() Specificity {
+ var max Specificity
+ for _, sel := range s.match {
+ newSpe := sel.Specificity()
+ if max.Less(newSpe) {
+ max = newSpe
+ }
+ }
+ return max
+}
+
+func (c relativePseudoClassSelector) PseudoElement() string {
+ return ""
+}
+
+type containsPseudoClassSelector struct {
+ own bool
+ value string
+}
+
+func (s containsPseudoClassSelector) Match(n *html.Node) bool {
+ var text string
+ if s.own {
+ // matches nodes that directly contain the given text
+ text = strings.ToLower(nodeOwnText(n))
+ } else {
+ // matches nodes that contain the given text.
+ text = strings.ToLower(nodeText(n))
+ }
+ return strings.Contains(text, s.value)
+}
+
+func (s containsPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
+}
+
+func (c containsPseudoClassSelector) PseudoElement() string {
+ return ""
+}
+
+type regexpPseudoClassSelector struct {
+ own bool
+ regexp *regexp.Regexp
+}
+
+func (s regexpPseudoClassSelector) Match(n *html.Node) bool {
+ var text string
+ if s.own {
+ // matches nodes whose text directly matches the specified regular expression
+ text = nodeOwnText(n)
+ } else {
+ // matches nodes whose text matches the specified regular expression
+ text = nodeText(n)
+ }
+ return s.regexp.MatchString(text)
}
// writeNodeText writes the text contained in n and its descendants to b.
@@ -325,221 +547,214 @@ func nodeOwnText(n *html.Node) string {
return b.String()
}
-// textSubstrSelector returns a selector that matches nodes that
-// contain the given text.
-func textSubstrSelector(val string) Selector {
- return func(n *html.Node) bool {
- text := strings.ToLower(nodeText(n))
- return strings.Contains(text, val)
- }
+func (s regexpPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
}
-// ownTextSubstrSelector returns a selector that matches nodes that
-// directly contain the given text
-func ownTextSubstrSelector(val string) Selector {
- return func(n *html.Node) bool {
- text := strings.ToLower(nodeOwnText(n))
- return strings.Contains(text, val)
- }
+func (c regexpPseudoClassSelector) PseudoElement() string {
+ return ""
}
-// textRegexSelector returns a selector that matches nodes whose text matches
-// the specified regular expression
-func textRegexSelector(rx *regexp.Regexp) Selector {
- return func(n *html.Node) bool {
- return rx.MatchString(nodeText(n))
- }
+type nthPseudoClassSelector struct {
+ a, b int
+ last, ofType bool
}
-// ownTextRegexSelector returns a selector that matches nodes whose text
-// directly matches the specified regular expression
-func ownTextRegexSelector(rx *regexp.Regexp) Selector {
- return func(n *html.Node) bool {
- return rx.MatchString(nodeOwnText(n))
- }
-}
-
-// hasChildSelector returns a selector that matches elements
-// with a child that matches a.
-func hasChildSelector(a Selector) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
+func (s nthPseudoClassSelector) Match(n *html.Node) bool {
+ if s.a == 0 {
+ if s.last {
+ return simpleNthLastChildMatch(s.b, s.ofType, n)
+ } else {
+ return simpleNthChildMatch(s.b, s.ofType, n)
}
- return hasChildMatch(n, a)
}
+ return nthChildMatch(s.a, s.b, s.last, s.ofType, n)
}
-// hasDescendantSelector returns a selector that matches elements
-// with any descendant that matches a.
-func hasDescendantSelector(a Selector) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
- }
- return hasDescendantMatch(n, a)
- }
-}
-
-// nthChildSelector returns a selector that implements :nth-child(an+b).
+// nthChildMatch implements :nth-child(an+b).
// If last is true, implements :nth-last-child instead.
// If ofType is true, implements :nth-of-type instead.
-func nthChildSelector(a, b int, last, ofType bool) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
- }
-
- parent := n.Parent
- if parent == nil {
- return false
- }
-
- if parent.Type == html.DocumentNode {
- return false
- }
-
- i := -1
- count := 0
- for c := parent.FirstChild; c != nil; c = c.NextSibling {
- if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) {
- continue
- }
- count++
- if c == n {
- i = count
- if !last {
- break
- }
- }
- }
-
- if i == -1 {
- // This shouldn't happen, since n should always be one of its parent's children.
- return false
- }
-
- if last {
- i = count - i + 1
- }
-
- i -= b
- if a == 0 {
- return i == 0
- }
-
- return i%a == 0 && i/a >= 0
+func nthChildMatch(a, b int, last, ofType bool, n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
}
+
+ parent := n.Parent
+ if parent == nil {
+ return false
+ }
+
+ if parent.Type == html.DocumentNode {
+ return false
+ }
+
+ i := -1
+ count := 0
+ for c := parent.FirstChild; c != nil; c = c.NextSibling {
+ if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) {
+ continue
+ }
+ count++
+ if c == n {
+ i = count
+ if !last {
+ break
+ }
+ }
+ }
+
+ if i == -1 {
+ // This shouldn't happen, since n should always be one of its parent's children.
+ return false
+ }
+
+ if last {
+ i = count - i + 1
+ }
+
+ i -= b
+ if a == 0 {
+ return i == 0
+ }
+
+ return i%a == 0 && i/a >= 0
}
-// simpleNthChildSelector returns a selector that implements :nth-child(b).
+// simpleNthChildMatch implements :nth-child(b).
// If ofType is true, implements :nth-of-type instead.
-func simpleNthChildSelector(b int, ofType bool) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
- }
-
- parent := n.Parent
- if parent == nil {
- return false
- }
-
- if parent.Type == html.DocumentNode {
- return false
- }
-
- count := 0
- for c := parent.FirstChild; c != nil; c = c.NextSibling {
- if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
- continue
- }
- count++
- if c == n {
- return count == b
- }
- if count >= b {
- return false
- }
- }
+func simpleNthChildMatch(b int, ofType bool, n *html.Node) bool {
+ if n.Type != html.ElementNode {
return false
}
-}
-// simpleNthLastChildSelector returns a selector that implements
-// :nth-last-child(b). If ofType is true, implements :nth-last-of-type
-// instead.
-func simpleNthLastChildSelector(b int, ofType bool) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
- }
-
- parent := n.Parent
- if parent == nil {
- return false
- }
-
- if parent.Type == html.DocumentNode {
- return false
- }
-
- count := 0
- for c := parent.LastChild; c != nil; c = c.PrevSibling {
- if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
- continue
- }
- count++
- if c == n {
- return count == b
- }
- if count >= b {
- return false
- }
- }
+ parent := n.Parent
+ if parent == nil {
return false
}
-}
-// onlyChildSelector returns a selector that implements :only-child.
-// If ofType is true, it implements :only-of-type instead.
-func onlyChildSelector(ofType bool) Selector {
- return func(n *html.Node) bool {
- if n.Type != html.ElementNode {
- return false
- }
-
- parent := n.Parent
- if parent == nil {
- return false
- }
-
- if parent.Type == html.DocumentNode {
- return false
- }
-
- count := 0
- for c := parent.FirstChild; c != nil; c = c.NextSibling {
- if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) {
- continue
- }
- count++
- if count > 1 {
- return false
- }
- }
-
- return count == 1
+ if parent.Type == html.DocumentNode {
+ return false
}
+
+ count := 0
+ for c := parent.FirstChild; c != nil; c = c.NextSibling {
+ if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
+ continue
+ }
+ count++
+ if c == n {
+ return count == b
+ }
+ if count >= b {
+ return false
+ }
+ }
+ return false
}
-// inputSelector is a Selector that matches input, select, textarea and button elements.
-func inputSelector(n *html.Node) bool {
+// simpleNthLastChildMatch implements :nth-last-child(b).
+// If ofType is true, implements :nth-last-of-type instead.
+func simpleNthLastChildMatch(b int, ofType bool, n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+
+ parent := n.Parent
+ if parent == nil {
+ return false
+ }
+
+ if parent.Type == html.DocumentNode {
+ return false
+ }
+
+ count := 0
+ for c := parent.LastChild; c != nil; c = c.PrevSibling {
+ if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
+ continue
+ }
+ count++
+ if c == n {
+ return count == b
+ }
+ if count >= b {
+ return false
+ }
+ }
+ return false
+}
+
+// Specificity for nth-child pseudo-class.
+// Does not support a list of selectors
+func (s nthPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
+}
+
+func (c nthPseudoClassSelector) PseudoElement() string {
+ return ""
+}
+
+type onlyChildPseudoClassSelector struct {
+ ofType bool
+}
+
+// Match implements :only-child.
+// If `ofType` is true, it implements :only-of-type instead.
+func (s onlyChildPseudoClassSelector) Match(n *html.Node) bool {
+ if n.Type != html.ElementNode {
+ return false
+ }
+
+ parent := n.Parent
+ if parent == nil {
+ return false
+ }
+
+ if parent.Type == html.DocumentNode {
+ return false
+ }
+
+ count := 0
+ for c := parent.FirstChild; c != nil; c = c.NextSibling {
+ if (c.Type != html.ElementNode) || (s.ofType && c.Data != n.Data) {
+ continue
+ }
+ count++
+ if count > 1 {
+ return false
+ }
+ }
+
+ return count == 1
+}
+
+func (s onlyChildPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
+}
+
+func (c onlyChildPseudoClassSelector) PseudoElement() string {
+ return ""
+}
+
+type inputPseudoClassSelector struct{}
+
+// Matches input, select, textarea and button elements.
+func (s inputPseudoClassSelector) Match(n *html.Node) bool {
return n.Type == html.ElementNode && (n.Data == "input" || n.Data == "select" || n.Data == "textarea" || n.Data == "button")
}
-// emptyElementSelector is a Selector that matches empty elements.
-func emptyElementSelector(n *html.Node) bool {
+func (s inputPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
+}
+
+func (c inputPseudoClassSelector) PseudoElement() string {
+ return ""
+}
+
+type emptyElementPseudoClassSelector struct{}
+
+// Matches empty elements.
+func (s emptyElementPseudoClassSelector) Match(n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
@@ -554,64 +769,18 @@ func emptyElementSelector(n *html.Node) bool {
return true
}
-// descendantSelector returns a Selector that matches an element if
-// it matches d and has an ancestor that matches a.
-func descendantSelector(a, d Selector) Selector {
- return func(n *html.Node) bool {
- if !d(n) {
- return false
- }
-
- for p := n.Parent; p != nil; p = p.Parent {
- if a(p) {
- return true
- }
- }
-
- return false
- }
+func (s emptyElementPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
}
-// childSelector returns a Selector that matches an element if
-// it matches d and its parent matches a.
-func childSelector(a, d Selector) Selector {
- return func(n *html.Node) bool {
- return d(n) && n.Parent != nil && a(n.Parent)
- }
+func (c emptyElementPseudoClassSelector) PseudoElement() string {
+ return ""
}
-// siblingSelector returns a Selector that matches an element
-// if it matches s2 and in is preceded by an element that matches s1.
-// If adjacent is true, the sibling must be immediately before the element.
-func siblingSelector(s1, s2 Selector, adjacent bool) Selector {
- return func(n *html.Node) bool {
- if !s2(n) {
- return false
- }
+type rootPseudoClassSelector struct{}
- if adjacent {
- for n = n.PrevSibling; n != nil; n = n.PrevSibling {
- if n.Type == html.TextNode || n.Type == html.CommentNode {
- continue
- }
- return s1(n)
- }
- return false
- }
-
- // Walk backwards looking for element that matches s1
- for c := n.PrevSibling; c != nil; c = c.PrevSibling {
- if s1(c) {
- return true
- }
- }
-
- return false
- }
-}
-
-// rootSelector implements :root
-func rootSelector(n *html.Node) bool {
+// Match implements :root
+func (s rootPseudoClassSelector) Match(n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
@@ -620,3 +789,150 @@ func rootSelector(n *html.Node) bool {
}
return n.Parent.Type == html.DocumentNode
}
+
+func (s rootPseudoClassSelector) Specificity() Specificity {
+ return Specificity{0, 1, 0}
+}
+
+func (c rootPseudoClassSelector) PseudoElement() string {
+ return ""
+}
+
+type compoundSelector struct {
+ selectors []Sel
+ pseudoElement string
+}
+
+// Matches elements if each sub-selectors matches.
+func (t compoundSelector) Match(n *html.Node) bool {
+ if len(t.selectors) == 0 {
+ return n.Type == html.ElementNode
+ }
+
+ for _, sel := range t.selectors {
+ if !sel.Match(n) {
+ return false
+ }
+ }
+ return true
+}
+
+func (s compoundSelector) Specificity() Specificity {
+ var out Specificity
+ for _, sel := range s.selectors {
+ out = out.Add(sel.Specificity())
+ }
+ if s.pseudoElement != "" {
+ // https://drafts.csswg.org/selectors-3/#specificity
+ out = out.Add(Specificity{0, 0, 1})
+ }
+ return out
+}
+
+func (c compoundSelector) PseudoElement() string {
+ return c.pseudoElement
+}
+
+type combinedSelector struct {
+ first Sel
+ combinator byte
+ second Sel
+}
+
+func (t combinedSelector) Match(n *html.Node) bool {
+ if t.first == nil {
+ return false // maybe we should panic
+ }
+ switch t.combinator {
+ case 0:
+ return t.first.Match(n)
+ case ' ':
+ return descendantMatch(t.first, t.second, n)
+ case '>':
+ return childMatch(t.first, t.second, n)
+ case '+':
+ return siblingMatch(t.first, t.second, true, n)
+ case '~':
+ return siblingMatch(t.first, t.second, false, n)
+ default:
+ panic("unknown combinator")
+ }
+}
+
+// matches an element if it matches d and has an ancestor that matches a.
+func descendantMatch(a, d Matcher, n *html.Node) bool {
+ if !d.Match(n) {
+ return false
+ }
+
+ for p := n.Parent; p != nil; p = p.Parent {
+ if a.Match(p) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// matches an element if it matches d and its parent matches a.
+func childMatch(a, d Matcher, n *html.Node) bool {
+ return d.Match(n) && n.Parent != nil && a.Match(n.Parent)
+}
+
+// matches an element if it matches s2 and is preceded by an element that matches s1.
+// If adjacent is true, the sibling must be immediately before the element.
+func siblingMatch(s1, s2 Matcher, adjacent bool, n *html.Node) bool {
+ if !s2.Match(n) {
+ return false
+ }
+
+ if adjacent {
+ for n = n.PrevSibling; n != nil; n = n.PrevSibling {
+ if n.Type == html.TextNode || n.Type == html.CommentNode {
+ continue
+ }
+ return s1.Match(n)
+ }
+ return false
+ }
+
+ // Walk backwards looking for element that matches s1
+ for c := n.PrevSibling; c != nil; c = c.PrevSibling {
+ if s1.Match(c) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (s combinedSelector) Specificity() Specificity {
+ spec := s.first.Specificity()
+ if s.second != nil {
+ spec = spec.Add(s.second.Specificity())
+ }
+ return spec
+}
+
+// on combinedSelector, a pseudo-element only makes sens on the last
+// selector, although others increase specificity.
+func (c combinedSelector) PseudoElement() string {
+ if c.second == nil {
+ return ""
+ }
+ return c.second.PseudoElement()
+}
+
+// A SelectorGroup is a list of selectors, which matches if any of the
+// individual selectors matches.
+type SelectorGroup []Sel
+
+// Match returns true if the node matches one of the single selectors.
+func (s SelectorGroup) Match(n *html.Node) bool {
+ for _, sel := range s {
+ if sel.Match(n) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/andybalholm/cascadia/serialize.go b/vendor/github.com/andybalholm/cascadia/serialize.go
new file mode 100755
index 0000000..f15b079
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/serialize.go
@@ -0,0 +1,120 @@
+package cascadia
+
+import (
+ "fmt"
+ "strings"
+)
+
+// implements the reverse operation Sel -> string
+
+func (c tagSelector) String() string {
+ return c.tag
+}
+
+func (c idSelector) String() string {
+ return "#" + c.id
+}
+
+func (c classSelector) String() string {
+ return "." + c.class
+}
+
+func (c attrSelector) String() string {
+ val := c.val
+ if c.operation == "#=" {
+ val = c.regexp.String()
+ } else if c.operation != "" {
+ val = fmt.Sprintf(`"%s"`, val)
+ }
+ return fmt.Sprintf(`[%s%s%s]`, c.key, c.operation, val)
+}
+
+func (c relativePseudoClassSelector) String() string {
+ return fmt.Sprintf(":%s(%s)", c.name, c.match.String())
+}
+func (c containsPseudoClassSelector) String() string {
+ s := "contains"
+ if c.own {
+ s += "Own"
+ }
+ return fmt.Sprintf(`:%s("%s")`, s, c.value)
+}
+func (c regexpPseudoClassSelector) String() string {
+ s := "matches"
+ if c.own {
+ s += "Own"
+ }
+ return fmt.Sprintf(":%s(%s)", s, c.regexp.String())
+}
+func (c nthPseudoClassSelector) String() string {
+ if c.a == 0 && c.b == 1 { // special cases
+ s := ":first-"
+ if c.last {
+ s = ":last-"
+ }
+ if c.ofType {
+ s += "of-type"
+ } else {
+ s += "child"
+ }
+ return s
+ }
+ var name string
+ switch [2]bool{c.last, c.ofType} {
+ case [2]bool{true, true}:
+ name = "nth-last-of-type"
+ case [2]bool{true, false}:
+ name = "nth-last-child"
+ case [2]bool{false, true}:
+ name = "nth-of-type"
+ case [2]bool{false, false}:
+ name = "nth-child"
+ }
+ return fmt.Sprintf(":%s(%dn+%d)", name, c.a, c.b)
+}
+func (c onlyChildPseudoClassSelector) String() string {
+ if c.ofType {
+ return ":only-of-type"
+ }
+ return ":only-child"
+}
+func (c inputPseudoClassSelector) String() string {
+ return ":input"
+}
+func (c emptyElementPseudoClassSelector) String() string {
+ return ":empty"
+}
+func (c rootPseudoClassSelector) String() string {
+ return ":root"
+}
+
+func (c compoundSelector) String() string {
+ if len(c.selectors) == 0 && c.pseudoElement == "" {
+ return "*"
+ }
+ chunks := make([]string, len(c.selectors))
+ for i, sel := range c.selectors {
+ chunks[i] = sel.String()
+ }
+ s := strings.Join(chunks, "")
+ if c.pseudoElement != "" {
+ s += "::" + c.pseudoElement
+ }
+ return s
+}
+
+func (c combinedSelector) String() string {
+ start := c.first.String()
+ if c.second != nil {
+ start += fmt.Sprintf(" %s %s", string(c.combinator), c.second.String())
+ }
+ return start
+}
+
+func (c SelectorGroup) String() string {
+ ck := make([]string, len(c))
+ for i, s := range c {
+ ck[i] = s.String()
+ }
+ return strings.Join(ck, ", ")
+}
diff --git a/vendor/github.com/andybalholm/cascadia/specificity.go b/vendor/github.com/andybalholm/cascadia/specificity.go
new file mode 100755
index 0000000..8db864f
--- /dev/null
+++ b/vendor/github.com/andybalholm/cascadia/specificity.go
@@ -0,0 +1,26 @@
+package cascadia
+
+// Specificity is the CSS specificity as defined in
+// https://www.w3.org/TR/selectors/#specificity-rules
+// with the convention Specificity = [A,B,C].
+type Specificity [3]int
+
+// returns `true` if s < other (strictly), false otherwise
+func (s Specificity) Less(other Specificity) bool {
+ for i := range s {
+ if s[i] < other[i] {
+ return true
+ }
+ if s[i] > other[i] {
+ return false
+ }
+ }
+ return false
+}
+
+func (s Specificity) Add(other Specificity) Specificity {
+ for i, sp := range other {
+ s[i] += sp
+ }
+ return s
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
new file mode 100755
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
new file mode 100755
index 0000000..899129e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
@@ -0,0 +1,3 @@
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
new file mode 100755
index 0000000..56fdfc2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
@@ -0,0 +1,145 @@
+// Package awserr represents API error interface accessors for the SDK.
+package awserr
+
+// An Error wraps lower level errors with code, message and an original error.
+// The underlying concrete error type may also satisfy other interfaces which
+// can be to used to obtain more specific information about the error.
+//
+// Calling Error() or String() will always include the full information about
+// an error based on its underlying type.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Get error details
+// log.Println("Error:", awsErr.Code(), awsErr.Message())
+//
+// // Prints out full error message, including original error if there was one.
+// log.Println("Error:", awsErr.Error())
+//
+// // Get original error
+// if origErr := awsErr.OrigErr(); origErr != nil {
+// // operate on original error.
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type Error interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErr() error
+}
+
+// BatchError is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Deprecated: Replaced with BatchedErrors. Only defined for backwards
+// compatibility.
+type BatchError interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// BatchedErrors is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Replaces BatchError
+type BatchedErrors interface {
+ // Satisfy the base Error interface.
+ Error
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// New returns an Error object described by the code, message, and origErr.
+//
+// If origErr satisfies the Error interface it will not be wrapped within a new
+// Error object and will instead be returned.
+func New(code, message string, origErr error) Error {
+ var errs []error
+ if origErr != nil {
+ errs = append(errs, origErr)
+ }
+ return newBaseError(code, message, errs)
+}
+
+// NewBatchError returns an BatchedErrors with a collection of errors as an
+// array of errors.
+func NewBatchError(code, message string, errs []error) BatchedErrors {
+ return newBaseError(code, message, errs)
+}
+
+// A RequestFailure is an interface to extract request failure information from
+// an Error such as the request ID of the failed request returned by a service.
+// RequestFailures may not always have a requestID value if the request failed
+// prior to reaching the service such as a connection error.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if reqerr, ok := err.(RequestFailure); ok {
+// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
+// } else {
+// log.Println("Error:", err.Error())
+// }
+// }
+//
+// Combined with awserr.Error:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Generic AWS Error with Code, Message, and original error (if any)
+// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+//
+// if reqErr, ok := err.(awserr.RequestFailure); ok {
+// // A service error occurred
+// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type RequestFailure interface {
+ Error
+
+ // The status code of the HTTP response.
+ StatusCode() int
+
+ // The request ID returned by the service for a request failure. This will
+ // be empty if no request ID is available such as the request failed due
+ // to a connection error.
+ RequestID() string
+}
+
+// NewRequestFailure returns a new request error wrapper for the given Error
+// provided.
+func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
+ return newRequestError(err, statusCode, reqID)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
new file mode 100755
index 0000000..0202a00
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
@@ -0,0 +1,194 @@
+package awserr
+
+import "fmt"
+
+// SprintError returns a string of the formatted error code.
+//
+// Both extra and origErr are optional. If they are included their lines
+// will be added, but if they are not included their lines will be ignored.
+func SprintError(code, message, extra string, origErr error) string {
+ msg := fmt.Sprintf("%s: %s", code, message)
+ if extra != "" {
+ msg = fmt.Sprintf("%s\n\t%s", msg, extra)
+ }
+ if origErr != nil {
+ msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
+ }
+ return msg
+}
+
+// A baseError wraps the code and message which defines an error. It also
+// can be used to wrap an original error object.
+//
+// Should be used as the root for errors satisfying the awserr.Error. Also
+// for any error which does not fit into a specific error wrapper type.
+type baseError struct {
+ // Classification of error
+ code string
+
+ // Detailed information about error
+ message string
+
+ // Optional original error this error is based off of. Allows building
+ // chained errors.
+ errs []error
+}
+
+// newBaseError returns an error object for the code, message, and errors.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the
+// error.
+//
+// origErrs is the error objects which will be nested under the new errors to
+// be returned.
+func newBaseError(code, message string, origErrs []error) *baseError {
+ b := &baseError{
+ code: code,
+ message: message,
+ errs: origErrs,
+ }
+
+ return b
+}
+
+// Error returns the string representation of the error.
+//
+// See ErrorWithExtra for formatting.
+//
+// Satisfies the error interface.
+func (b baseError) Error() string {
+ size := len(b.errs)
+ if size > 0 {
+ return SprintError(b.code, b.message, "", errorList(b.errs))
+ }
+
+ return SprintError(b.code, b.message, "", nil)
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (b baseError) String() string {
+ return b.Error()
+}
+
+// Code returns the short phrase depicting the classification of the error.
+func (b baseError) Code() string {
+ return b.code
+}
+
+// Message returns the error details message.
+func (b baseError) Message() string {
+ return b.message
+}
+
+// OrigErr returns the original error if one was set. Nil is returned if no
+// error was set. This only returns the first element in the list. If the full
+// list is needed, use BatchedErrors.
+func (b baseError) OrigErr() error {
+ switch len(b.errs) {
+ case 0:
+ return nil
+ case 1:
+ return b.errs[0]
+ default:
+ if err, ok := b.errs[0].(Error); ok {
+ return NewBatchError(err.Code(), err.Message(), b.errs[1:])
+ }
+ return NewBatchError("BatchedErrors",
+ "multiple errors occurred", b.errs)
+ }
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (b baseError) OrigErrs() []error {
+ return b.errs
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError Error
+
+// A requestError wraps a request or service error.
+//
+// Composed of baseError for code, message, and original error.
+type requestError struct {
+ awsError
+ statusCode int
+ requestID string
+}
+
+// newRequestError returns a wrapped error with additional information for
+// request status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+//
+// Also wraps original errors via the baseError.
+func newRequestError(err Error, statusCode int, requestID string) *requestError {
+ return &requestError{
+ awsError: err,
+ statusCode: statusCode,
+ requestID: requestID,
+ }
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (r requestError) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s",
+ r.statusCode, r.requestID)
+ return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (r requestError) String() string {
+ return r.Error()
+}
+
+// StatusCode returns the wrapped status code for the error
+func (r requestError) StatusCode() int {
+ return r.statusCode
+}
+
+// RequestID returns the wrapped requestID
+func (r requestError) RequestID() string {
+ return r.requestID
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (r requestError) OrigErrs() []error {
+ if b, ok := r.awsError.(BatchedErrors); ok {
+ return b.OrigErrs()
+ }
+ return []error{r.OrigErr()}
+}
+
+// An error list that satisfies the golang interface
+type errorList []error
+
+// Error returns the string representation of the error.
+//
+// Satisfies the error interface.
+func (e errorList) Error() string {
+ msg := ""
+ // How do we want to handle the array size being zero
+ if size := len(e); size > 0 {
+ for i := 0; i < size; i++ {
+ msg += fmt.Sprintf("%s", e[i].Error())
+ // We check the next index to see if it is within the slice.
+ // If it is, then we append a newline. We do this, because unit tests
+ // could be broken with the additional '\n'
+ if i+1 < size {
+ msg += "\n"
+ }
+ }
+ }
+ return msg
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
new file mode 100755
index 0000000..1a3d106
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
@@ -0,0 +1,108 @@
+package awsutil
+
+import (
+ "io"
+ "reflect"
+ "time"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+ dstval := reflect.ValueOf(dst)
+ if !dstval.IsValid() {
+ panic("Copy dst cannot be nil")
+ }
+
+ rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+ dsti := reflect.New(reflect.TypeOf(src).Elem())
+ dst = dsti.Interface()
+ rcopy(dsti, reflect.ValueOf(src), true)
+ return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+ if !src.IsValid() {
+ return
+ }
+
+ switch src.Kind() {
+ case reflect.Ptr:
+ if _, ok := src.Interface().(io.Reader); ok {
+ if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+ dst.Elem().Set(src)
+ } else if dst.CanSet() {
+ dst.Set(src)
+ }
+ } else {
+ e := src.Type().Elem()
+ if dst.CanSet() && !src.IsNil() {
+ if _, ok := src.Interface().(*time.Time); !ok {
+ dst.Set(reflect.New(e))
+ } else {
+ tempValue := reflect.New(e)
+ tempValue.Elem().Set(src.Elem())
+ // Sets time.Time's unexported values
+ dst.Set(tempValue)
+ }
+ }
+ if src.Elem().IsValid() {
+ // Keep the current root state since the depth hasn't changed
+ rcopy(dst.Elem(), src.Elem(), root)
+ }
+ }
+ case reflect.Struct:
+ t := dst.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ srcVal := src.FieldByName(name)
+ dstVal := dst.FieldByName(name)
+ if srcVal.IsValid() && dstVal.CanSet() {
+ rcopy(dstVal, srcVal, false)
+ }
+ }
+ case reflect.Slice:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ dst.Set(s)
+ for i := 0; i < src.Len(); i++ {
+ rcopy(dst.Index(i), src.Index(i), false)
+ }
+ case reflect.Map:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeMap(src.Type())
+ dst.Set(s)
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ v2 := reflect.New(v.Type()).Elem()
+ rcopy(v2, v, false)
+ dst.SetMapIndex(k, v2)
+ }
+ default:
+ // Assign the value if possible. If its not assignable, the value would
+ // need to be converted and the impact of that may be unexpected, or is
+ // not compatible with the dst type.
+ if src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
new file mode 100755
index 0000000..142a7a0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
@@ -0,0 +1,27 @@
+package awsutil
+
+import (
+ "reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+ ra := reflect.Indirect(reflect.ValueOf(a))
+ rb := reflect.Indirect(reflect.ValueOf(b))
+
+ if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+ // If the elements are both nil, and of the same type they are equal
+ // If they are of different types they are not equal
+ return reflect.TypeOf(a) == reflect.TypeOf(b)
+ } else if raValid != rbValid {
+ // Both values must be valid to be equal
+ return false
+ }
+
+ return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
new file mode 100755
index 0000000..11c52c3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
@@ -0,0 +1,222 @@
+package awsutil
+
+import (
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jmespath/go-jmespath"
+)
+
+var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
+
+// rValuesAtPath returns a slice of values found in value v. The values
+// in v are explored recursively so all nested values are collected.
+func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
+ pathparts := strings.Split(path, "||")
+ if len(pathparts) > 1 {
+ for _, pathpart := range pathparts {
+ vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
+ if len(vals) > 0 {
+ return vals
+ }
+ }
+ return nil
+ }
+
+ values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
+ components := strings.Split(path, ".")
+ for len(values) > 0 && len(components) > 0 {
+ var index *int64
+ var indexStar bool
+ c := strings.TrimSpace(components[0])
+ if c == "" { // no actual component, illegal syntax
+ return nil
+ } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
+ // TODO normalize case for user
+ return nil // don't support unexported fields
+ }
+
+ // parse this component
+ if m := indexRe.FindStringSubmatch(c); m != nil {
+ c = m[1]
+ if m[2] == "" {
+ index = nil
+ indexStar = true
+ } else {
+ i, _ := strconv.ParseInt(m[2], 10, 32)
+ index = &i
+ indexStar = false
+ }
+ }
+
+ nextvals := []reflect.Value{}
+ for _, value := range values {
+ // pull component name out of struct member
+ if value.Kind() != reflect.Struct {
+ continue
+ }
+
+ if c == "*" { // pull all members
+ for i := 0; i < value.NumField(); i++ {
+ if f := reflect.Indirect(value.Field(i)); f.IsValid() {
+ nextvals = append(nextvals, f)
+ }
+ }
+ continue
+ }
+
+ value = value.FieldByNameFunc(func(name string) bool {
+ if c == name {
+ return true
+ } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
+ return true
+ }
+ return false
+ })
+
+ if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
+ if !value.IsNil() {
+ value.Set(reflect.Zero(value.Type()))
+ }
+ return []reflect.Value{value}
+ }
+
+ if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
+ // TODO if the value is the terminus it should not be created
+ // if the value to be set to its position is nil.
+ value.Set(reflect.New(value.Type().Elem()))
+ value = value.Elem()
+ } else {
+ value = reflect.Indirect(value)
+ }
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+
+ if indexStar || index != nil {
+ nextvals = []reflect.Value{}
+ for _, valItem := range values {
+ value := reflect.Indirect(valItem)
+ if value.Kind() != reflect.Slice {
+ continue
+ }
+
+ if indexStar { // grab all indices
+ for i := 0; i < value.Len(); i++ {
+ idx := reflect.Indirect(value.Index(i))
+ if idx.IsValid() {
+ nextvals = append(nextvals, idx)
+ }
+ }
+ continue
+ }
+
+ // pull out index
+ i := int(*index)
+ if i >= value.Len() { // check out of bounds
+ if createPath {
+ // TODO resize slice
+ } else {
+ continue
+ }
+ } else if i < 0 { // support negative indexing
+ i = value.Len() + i
+ }
+ value = reflect.Indirect(value.Index(i))
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+ }
+
+ components = components[1:]
+ }
+ return values
+}
+
+// ValuesAtPath returns a list of values at the case insensitive lexical
+// path inside of a structure.
+func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
+ result, err := jmespath.Search(path, i)
+ if err != nil {
+ return nil, err
+ }
+
+ v := reflect.ValueOf(result)
+ if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return nil, nil
+ }
+ if s, ok := result.([]interface{}); ok {
+ return s, err
+ }
+ if v.Kind() == reflect.Map && v.Len() == 0 {
+ return nil, nil
+ }
+ if v.Kind() == reflect.Slice {
+ out := make([]interface{}, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ out[i] = v.Index(i).Interface()
+ }
+ return out, nil
+ }
+
+ return []interface{}{result}, nil
+}
+
+// SetValueAtPath sets a value at the case insensitive lexical path inside
+// of a structure.
+func SetValueAtPath(i interface{}, path string, v interface{}) {
+ if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
+ for _, rval := range rvals {
+ if rval.Kind() == reflect.Ptr && rval.IsNil() {
+ continue
+ }
+ setValue(rval, v)
+ }
+ }
+}
+
+func setValue(dstVal reflect.Value, src interface{}) {
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal = reflect.Indirect(dstVal)
+ }
+ srcVal := reflect.ValueOf(src)
+
+ if !srcVal.IsValid() { // src is literal nil
+ if dstVal.CanAddr() {
+ // Convert to pointer so that pointer's value can be nil'ed
+ // dstVal = dstVal.Addr()
+ }
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+
+ } else if srcVal.Kind() == reflect.Ptr {
+ if srcVal.IsNil() {
+ srcVal = reflect.Zero(dstVal.Type())
+ } else {
+ srcVal = reflect.ValueOf(src).Elem()
+ }
+ dstVal.Set(srcVal)
+ } else {
+ dstVal.Set(srcVal)
+ }
+
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
new file mode 100755
index 0000000..710eb43
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
@@ -0,0 +1,113 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// Prettify returns the string representation of a value.
+func Prettify(i interface{}) string {
+ var buf bytes.Buffer
+ prettify(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+// prettify will recursively walk value v to build a textual
+// representation of the value.
+func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ strtype := v.Type().String()
+ if strtype == "time.Time" {
+ fmt.Fprintf(buf, "%s", v.Interface())
+ break
+ } else if strings.HasPrefix(strtype, "io.") {
+ buf.WriteString("")
+ break
+ }
+
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ prettify(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ strtype := v.Type().String()
+ if strtype == "[]uint8" {
+ fmt.Fprintf(buf, " len %d", v.Len())
+ break
+ }
+
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ prettify(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ prettify(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ if !v.IsValid() {
+ fmt.Fprint(buf, "")
+ return
+ }
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ case io.ReadSeeker, io.Reader:
+ format = "buffer(%p)"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
new file mode 100755
index 0000000..645df24
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
@@ -0,0 +1,88 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// StringValue returns the string representation of a value.
+func StringValue(i interface{}) string {
+ var buf bytes.Buffer
+ stringValue(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ buf.WriteString("{\n")
+
+ for i := 0; i < v.Type().NumField(); i++ {
+ ft := v.Type().Field(i)
+ fv := v.Field(i)
+
+ if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
+ continue // ignore unset fields
+ }
+
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(ft.Name + ": ")
+
+ if tag := ft.Tag.Get("sensitive"); tag == "true" {
+ buf.WriteString("")
+ } else {
+ stringValue(fv, indent+2, buf)
+ }
+
+ buf.WriteString(",\n")
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ stringValue(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ stringValue(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
new file mode 100755
index 0000000..7096053
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -0,0 +1,96 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Config provides configuration to a service client instance.
+type Config struct {
+ Config *aws.Config
+ Handlers request.Handlers
+ Endpoint string
+ SigningRegion string
+ SigningName string
+
+ // States that the signing name did not come from a modeled source but
+ // was derived based on other data. Used by service client constructors
+ // to determine if the signin name can be overridden based on metadata the
+ // service has.
+ SigningNameDerived bool
+}
+
+// ConfigProvider provides a generic way for a service client to receive
+// the ClientConfig without circular dependencies.
+type ConfigProvider interface {
+ ClientConfig(serviceName string, cfgs ...*aws.Config) Config
+}
+
+// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
+// resolve the endpoint automatically. The service client's endpoint must be
+// provided via the aws.Config.Endpoint field.
+type ConfigNoResolveEndpointProvider interface {
+ ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
+}
+
+// A Client implements the base client request and response handling
+// used by all service clients.
+type Client struct {
+ request.Retryer
+ metadata.ClientInfo
+
+ Config aws.Config
+ Handlers request.Handlers
+}
+
+// New will return a pointer to a new initialized service client.
+func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
+ svc := &Client{
+ Config: cfg,
+ ClientInfo: info,
+ Handlers: handlers.Copy(),
+ }
+
+ switch retryer, ok := cfg.Retryer.(request.Retryer); {
+ case ok:
+ svc.Retryer = retryer
+ case cfg.Retryer != nil && cfg.Logger != nil:
+ s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
+ cfg.Logger.Log(s)
+ fallthrough
+ default:
+ maxRetries := aws.IntValue(cfg.MaxRetries)
+ if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
+ maxRetries = 3
+ }
+ svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
+ }
+
+ svc.AddDebugHandlers()
+
+ for _, option := range options {
+ option(svc)
+ }
+
+ return svc
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
+ return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (c *Client) AddDebugHandlers() {
+ if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
+ return
+ }
+
+ c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
+ c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
new file mode 100755
index 0000000..a397b0d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
@@ -0,0 +1,116 @@
+package client
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkrand"
+)
+
+// DefaultRetryer implements basic retry logic using exponential backoff for
+// most services. If you want to implement custom retry logic, implement the
+// request.Retryer interface or create a structure type that composes this
+// struct and override the specific methods. For example, to override only
+// the MaxRetries method:
+//
+// type retryer struct {
+// client.DefaultRetryer
+// }
+//
+// // This implementation always has 100 max retries
+// func (d retryer) MaxRetries() int { return 100 }
+type DefaultRetryer struct {
+ NumMaxRetries int
+}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API request.
+func (d DefaultRetryer) MaxRetries() int {
+ return d.NumMaxRetries
+}
+
+// RetryRules returns the delay duration before retrying this request again
+func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
+ // Set the upper limit of delay in retrying at ~five minutes
+ minTime := 30
+ throttle := d.shouldThrottle(r)
+ if throttle {
+ if delay, ok := getRetryDelay(r); ok {
+ return delay
+ }
+
+ minTime = 500
+ }
+
+ retryCount := r.RetryCount
+ if throttle && retryCount > 8 {
+ retryCount = 8
+ } else if retryCount > 13 {
+ retryCount = 13
+ }
+
+ delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
+ return time.Duration(delay) * time.Millisecond
+}
+
+// ShouldRetry returns true if the request should be retried.
+func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable != nil {
+ return *r.Retryable
+ }
+
+ if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
+ return true
+ }
+ return r.IsErrorRetryable() || d.shouldThrottle(r)
+}
+
+// ShouldThrottle returns true if the request should be throttled.
+func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
+ switch r.HTTPResponse.StatusCode {
+ case 429:
+ case 502:
+ case 503:
+ case 504:
+ default:
+ return r.IsErrorThrottle()
+ }
+
+ return true
+}
+
+// This will look in the Retry-After header, RFC 7231, for how long
+// it will wait before attempting another request
+func getRetryDelay(r *request.Request) (time.Duration, bool) {
+ if !canUseRetryAfterHeader(r) {
+ return 0, false
+ }
+
+ delayStr := r.HTTPResponse.Header.Get("Retry-After")
+ if len(delayStr) == 0 {
+ return 0, false
+ }
+
+ delay, err := strconv.Atoi(delayStr)
+ if err != nil {
+ return 0, false
+ }
+
+ return time.Duration(delay) * time.Second, true
+}
+
+// Will look at the status code to see if the retry header pertains to
+// the status code.
+func canUseRetryAfterHeader(r *request.Request) bool {
+ switch r.HTTPResponse.StatusCode {
+ case 429:
+ case 503:
+ default:
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
new file mode 100755
index 0000000..7b5e127
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
@@ -0,0 +1,190 @@
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http/httputil"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
+---[ REQUEST DUMP ERROR ]-----------------------------
+%s
+------------------------------------------------------`
+
+type logWriter struct {
+ // Logger is what we will use to log the payload of a response.
+ Logger aws.Logger
+ // buf stores the contents of what has been read
+ buf *bytes.Buffer
+}
+
+func (logger *logWriter) Write(b []byte) (int, error) {
+ return logger.buf.Write(b)
+}
+
+type teeReaderCloser struct {
+ // io.Reader will be a tee reader that is used during logging.
+ // This structure will read from a body and write the contents to a logger.
+ io.Reader
+ // Source is used just to close when we are done reading.
+ Source io.ReadCloser
+}
+
+func (reader *teeReaderCloser) Close() error {
+ return reader.Source.Close()
+}
+
+// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
+// to a service. Will include the HTTP request body if the LogLevel of the
+// request matches LogDebugWithHTTPBody.
+var LogHTTPRequestHandler = request.NamedHandler{
+ Name: "awssdk.client.LogRequest",
+ Fn: logRequest,
+}
+
+func logRequest(r *request.Request) {
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ bodySeekable := aws.IsReaderSeekable(r.Body)
+
+ b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ if logBody {
+ if !bodySeekable {
+ r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
+ }
+ // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
+ // Body as a NoOpCloser and will not be reset after read by the HTTP
+ // client reader.
+ r.ResetBody()
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
+
+// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
+// to a service. Will only log the HTTP request's headers. The request payload
+// will not be read.
+var LogHTTPRequestHeaderHandler = request.NamedHandler{
+ Name: "awssdk.client.LogRequestHeader",
+ Fn: logRequestHeader,
+}
+
+func logRequestHeader(r *request.Request) {
+ b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
+---[ RESPONSE DUMP ERROR ]-----------------------------
+%s
+-----------------------------------------------------`
+
+// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
+// received from a service. Will include the HTTP response body if the LogLevel
+// of the request matches LogDebugWithHTTPBody.
+var LogHTTPResponseHandler = request.NamedHandler{
+ Name: "awssdk.client.LogResponse",
+ Fn: logResponse,
+}
+
+func logResponse(r *request.Request) {
+ lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
+
+ if r.HTTPResponse == nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil"))
+ return
+ }
+
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ if logBody {
+ r.HTTPResponse.Body = &teeReaderCloser{
+ Reader: io.TeeReader(r.HTTPResponse.Body, lw),
+ Source: r.HTTPResponse.Body,
+ }
+ }
+
+ handlerFn := func(req *request.Request) {
+ b, err := httputil.DumpResponse(req.HTTPResponse, false)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, err))
+ return
+ }
+
+ lw.Logger.Log(fmt.Sprintf(logRespMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
+
+ if logBody {
+ b, err := ioutil.ReadAll(lw.buf)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, err))
+ return
+ }
+
+ lw.Logger.Log(string(b))
+ }
+ }
+
+ const handlerName = "awsdk.client.LogResponse.ResponseBody"
+
+ r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+ r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+}
+
+// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
+// response received from a service. Will only log the HTTP response's headers.
+// The response payload will not be read.
+var LogHTTPResponseHeaderHandler = request.NamedHandler{
+ Name: "awssdk.client.LogResponseHeader",
+ Fn: logResponseHeader,
+}
+
+func logResponseHeader(r *request.Request) {
+ if r.Config.Logger == nil {
+ return
+ }
+
+ b, err := httputil.DumpResponse(r.HTTPResponse, false)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
new file mode 100755
index 0000000..920e9fd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
@@ -0,0 +1,13 @@
+package metadata
+
+// ClientInfo wraps immutable data from the client.Client structure.
+type ClientInfo struct {
+ ServiceName string
+ ServiceID string
+ APIVersion string
+ Endpoint string
+ SigningName string
+ SigningRegion string
+ JSONVersion string
+ TargetPrefix string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
new file mode 100755
index 0000000..10634d1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -0,0 +1,536 @@
+package aws
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+)
+
+// UseServiceDefaultRetries instructs the config to use the service's own
+// default number of retries. This will be the default action if
+// Config.MaxRetries is nil also.
+const UseServiceDefaultRetries = -1
+
+// RequestRetryer is an alias for a type that implements the request.Retryer
+// interface.
+type RequestRetryer interface{}
+
+// A Config provides service configuration for service clients. By default,
+// all clients will use the defaults.DefaultConfig structure.
+//
+// // Create Session with MaxRetry configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(&aws.Config{
+// MaxRetries: aws.Int(3),
+// }))
+//
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, &aws.Config{
+// Region: aws.String("us-west-2"),
+// })
+type Config struct {
+ // Enables verbose error printing of all credential chain errors.
+ // Should be used when wanting to see all errors while attempting to
+ // retrieve credentials.
+ CredentialsChainVerboseErrors *bool
+
+ // The credentials object to use when signing requests. Defaults to a
+ // chain of credential providers to search for credentials in environment
+ // variables, shared credential file, and EC2 Instance Roles.
+ Credentials *credentials.Credentials
+
+ // An optional endpoint URL (hostname only or fully qualified URI)
+ // that overrides the default generated endpoint for a client. Set this
+ // to `""` to use the default generated endpoint.
+ //
+ // Note: You must still provide a `Region` value when specifying an
+ // endpoint for a client.
+ Endpoint *string
+
+ // The resolver to use for looking up endpoints for AWS service clients
+ // to use based on region.
+ EndpointResolver endpoints.Resolver
+
+ // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
+ // ShouldRetry regardless of whether or not if request.Retryable is set.
+ // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
+ // is not set, then ShouldRetry will only be called if request.Retryable is nil.
+ // Proper handling of the request.Retryable field is important when setting this field.
+ EnforceShouldRetryCheck *bool
+
+ // The region to send requests to. This parameter is required and must
+ // be configured globally or on a per-client basis unless otherwise
+ // noted. A full list of regions is found in the "Regions and Endpoints"
+ // document.
+ //
+ // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
+ // Regions and Endpoints.
+ Region *string
+
+ // Set this to `true` to disable SSL when sending requests. Defaults
+ // to `false`.
+ DisableSSL *bool
+
+ // The HTTP client to use when sending requests. Defaults to
+ // `http.DefaultClient`.
+ HTTPClient *http.Client
+
+ // An integer value representing the logging level. The default log level
+ // is zero (LogOff), which represents no logging. To enable logging set
+ // to a LogLevel Value.
+ LogLevel *LogLevelType
+
+ // The logger writer interface to write logging messages to. Defaults to
+ // standard out.
+ Logger Logger
+
+ // The maximum number of times that a request will be retried for failures.
+ // Defaults to -1, which defers the max retry setting to the service
+ // specific configuration.
+ MaxRetries *int
+
+ // Retryer guides how HTTP requests should be retried in case of
+ // recoverable failures.
+ //
+ // When nil or the value does not implement the request.Retryer interface,
+ // the client.DefaultRetryer will be used.
+ //
+ // When both Retryer and MaxRetries are non-nil, the former is used and
+ // the latter ignored.
+ //
+ // To set the Retryer field in a type-safe manner and with chaining, use
+ // the request.WithRetryer helper function:
+ //
+ // cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
+ //
+ Retryer RequestRetryer
+
+ // Disables semantic parameter validation, which validates input for
+ // missing required fields and/or other semantic request input errors.
+ DisableParamValidation *bool
+
+ // Disables the computation of request and response checksums, e.g.,
+ // CRC32 checksums in Amazon DynamoDB.
+ DisableComputeChecksums *bool
+
+ // Set this to `true` to force the request to use path-style addressing,
+ // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
+ // will use virtual hosted bucket addressing when possible
+ // (`http://BUCKET.s3.amazonaws.com/KEY`).
+ //
+ // Note: This configuration option is specific to the Amazon S3 service.
+ //
+ // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
+ // for Amazon S3: Virtual Hosting of Buckets
+ S3ForcePathStyle *bool
+
+ // Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
+ // header to PUT requests over 2MB of content. 100-Continue instructs the
+ // HTTP client not to send the body until the service responds with a
+ // `continue` status. This is useful to prevent sending the request body
+ // until after the request is authenticated, and validated.
+ //
+ // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
+ //
+ // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
+ // `ExpectContinueTimeout` for information on adjusting the continue wait
+ // timeout. https://golang.org/pkg/net/http/#Transport
+ //
+ // You should use this flag to disble 100-Continue if you experience issues
+ // with proxies or third party S3 compatible services.
+ S3Disable100Continue *bool
+
+ // Set this to `true` to enable S3 Accelerate feature. For all operations
+ // compatible with S3 Accelerate will use the accelerate endpoint for
+ // requests. Requests not compatible will fall back to normal S3 requests.
+ //
+ // The bucket must be enable for accelerate to be used with S3 client with
+ // accelerate enabled. If the bucket is not enabled for accelerate an error
+ // will be returned. The bucket name must be DNS compatible to also work
+ // with accelerate.
+ S3UseAccelerate *bool
+
+ // S3DisableContentMD5Validation config option is temporarily disabled,
+ // For S3 GetObject API calls, #1837.
+ //
+ // Set this to `true` to disable the S3 service client from automatically
+ // adding the ContentMD5 to S3 Object Put and Upload API calls. This option
+ // will also disable the SDK from performing object ContentMD5 validation
+ // on GetObject API calls.
+ S3DisableContentMD5Validation *bool
+
+ // Set this to `true` to disable the EC2Metadata client from overriding the
+ // default http.Client's Timeout. This is helpful if you do not want the
+ // EC2Metadata client to create a new http.Client. This options is only
+ // meaningful if you're not already using a custom HTTP client with the
+ // SDK. Enabled by default.
+ //
+ // Must be set and provided to the session.NewSession() in order to disable
+ // the EC2Metadata overriding the timeout for default credentials chain.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(aws.NewConfig()
+ // .WithEC2MetadataDiableTimeoutOverride(true)))
+ //
+ // svc := s3.New(sess)
+ //
+ EC2MetadataDisableTimeoutOverride *bool
+
+ // Instructs the endpoint to be generated for a service client to
+ // be the dual stack endpoint. The dual stack endpoint will support
+ // both IPv4 and IPv6 addressing.
+ //
+ // Setting this for a service which does not support dual stack will fail
+ // to make requets. It is not recommended to set this value on the session
+ // as it will apply to all service clients created with the session. Even
+ // services which don't support dual stack endpoints.
+ //
+ // If the Endpoint config value is also provided the UseDualStack flag
+ // will be ignored.
+ //
+ // Only supported with.
+ //
+ // sess := session.Must(session.NewSession())
+ //
+ // svc := s3.New(sess, &aws.Config{
+ // UseDualStack: aws.Bool(true),
+ // })
+ UseDualStack *bool
+
+ // SleepDelay is an override for the func the SDK will call when sleeping
+ // during the lifecycle of a request. Specifically this will be used for
+ // request delays. This value should only be used for testing. To adjust
+ // the delay of a request see the aws/client.DefaultRetryer and
+ // aws/request.Retryer.
+ //
+ // SleepDelay will prevent any Context from being used for canceling retry
+ // delay of an API operation. It is recommended to not use SleepDelay at all
+ // and specify a Retryer instead.
+ SleepDelay func(time.Duration)
+
+ // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
+ // Will default to false. This would only be used for empty directory names in s3 requests.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(&aws.Config{
+ // DisableRestProtocolURICleaning: aws.Bool(true),
+ // }))
+ //
+ // svc := s3.New(sess)
+ // out, err := svc.GetObject(&s3.GetObjectInput {
+ // Bucket: aws.String("bucketname"),
+ // Key: aws.String("//foo//bar//moo"),
+ // })
+ DisableRestProtocolURICleaning *bool
+
+ // EnableEndpointDiscovery will allow for endpoint discovery on operations that
+ // have the definition in its model. By default, endpoint discovery is off.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(&aws.Config{
+ // EnableEndpointDiscovery: aws.Bool(true),
+ // }))
+ //
+ // svc := s3.New(sess)
+ // out, err := svc.GetObject(&s3.GetObjectInput {
+ // Bucket: aws.String("bucketname"),
+ // Key: aws.String("/foo/bar/moo"),
+ // })
+ EnableEndpointDiscovery *bool
+
+ // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
+ // request endpoint hosts with modeled information.
+ //
+ // Disabling this feature is useful when you want to use local endpoints
+ // for testing that do not support the modeled host prefix pattern.
+ DisableEndpointHostPrefix *bool
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder
+// methods to set multiple configuration values inline without using pointers.
+//
+// // Create Session with MaxRetry configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(aws.NewConfig().
+// WithMaxRetries(3),
+// ))
+//
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, aws.NewConfig().
+// WithRegion("us-west-2"),
+// )
+func NewConfig() *Config {
+ return &Config{}
+}
+
+// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
+// a Config pointer.
+func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
+ c.CredentialsChainVerboseErrors = &verboseErrs
+ return c
+}
+
+// WithCredentials sets a config Credentials value returning a Config pointer
+// for chaining.
+func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
+ c.Credentials = creds
+ return c
+}
+
+// WithEndpoint sets a config Endpoint value returning a Config pointer for
+// chaining.
+func (c *Config) WithEndpoint(endpoint string) *Config {
+ c.Endpoint = &endpoint
+ return c
+}
+
+// WithEndpointResolver sets a config EndpointResolver value returning a
+// Config pointer for chaining.
+func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
+ c.EndpointResolver = resolver
+ return c
+}
+
+// WithRegion sets a config Region value returning a Config pointer for
+// chaining.
+func (c *Config) WithRegion(region string) *Config {
+ c.Region = ®ion
+ return c
+}
+
+// WithDisableSSL sets a config DisableSSL value returning a Config pointer
+// for chaining.
+func (c *Config) WithDisableSSL(disable bool) *Config {
+ c.DisableSSL = &disable
+ return c
+}
+
+// WithHTTPClient sets a config HTTPClient value returning a Config pointer
+// for chaining.
+func (c *Config) WithHTTPClient(client *http.Client) *Config {
+ c.HTTPClient = client
+ return c
+}
+
+// WithMaxRetries sets a config MaxRetries value returning a Config pointer
+// for chaining.
+func (c *Config) WithMaxRetries(max int) *Config {
+ c.MaxRetries = &max
+ return c
+}
+
+// WithDisableParamValidation sets a config DisableParamValidation value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableParamValidation(disable bool) *Config {
+ c.DisableParamValidation = &disable
+ return c
+}
+
+// WithDisableComputeChecksums sets a config DisableComputeChecksums value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
+ c.DisableComputeChecksums = &disable
+ return c
+}
+
+// WithLogLevel sets a config LogLevel value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogLevel(level LogLevelType) *Config {
+ c.LogLevel = &level
+ return c
+}
+
+// WithLogger sets a config Logger value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogger(logger Logger) *Config {
+ c.Logger = logger
+ return c
+}
+
+// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3ForcePathStyle(force bool) *Config {
+ c.S3ForcePathStyle = &force
+ return c
+}
+
+// WithS3Disable100Continue sets a config S3Disable100Continue value returning
+// a Config pointer for chaining.
+func (c *Config) WithS3Disable100Continue(disable bool) *Config {
+ c.S3Disable100Continue = &disable
+ return c
+}
+
+// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3UseAccelerate(enable bool) *Config {
+ c.S3UseAccelerate = &enable
+ return c
+
+}
+
+// WithS3DisableContentMD5Validation sets a config
+// S3DisableContentMD5Validation value returning a Config pointer for chaining.
+func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
+ c.S3DisableContentMD5Validation = &enable
+ return c
+
+}
+
+// WithUseDualStack sets a config UseDualStack value returning a Config
+// pointer for chaining.
+func (c *Config) WithUseDualStack(enable bool) *Config {
+ c.UseDualStack = &enable
+ return c
+}
+
+// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
+// returning a Config pointer for chaining.
+func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
+ c.EC2MetadataDisableTimeoutOverride = &enable
+ return c
+}
+
+// WithSleepDelay overrides the function used to sleep while waiting for the
+// next retry. Defaults to time.Sleep.
+func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
+ c.SleepDelay = fn
+ return c
+}
+
+// WithEndpointDiscovery will set whether or not to use endpoint discovery.
+func (c *Config) WithEndpointDiscovery(t bool) *Config {
+ c.EnableEndpointDiscovery = &t
+ return c
+}
+
+// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
+// when making requests.
+func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
+ c.DisableEndpointHostPrefix = &t
+ return c
+}
+
+// MergeIn merges the passed in configs into the existing config object.
+func (c *Config) MergeIn(cfgs ...*Config) {
+ for _, other := range cfgs {
+ mergeInConfig(c, other)
+ }
+}
+
+func mergeInConfig(dst *Config, other *Config) {
+ if other == nil {
+ return
+ }
+
+ if other.CredentialsChainVerboseErrors != nil {
+ dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
+ }
+
+ if other.Credentials != nil {
+ dst.Credentials = other.Credentials
+ }
+
+ if other.Endpoint != nil {
+ dst.Endpoint = other.Endpoint
+ }
+
+ if other.EndpointResolver != nil {
+ dst.EndpointResolver = other.EndpointResolver
+ }
+
+ if other.Region != nil {
+ dst.Region = other.Region
+ }
+
+ if other.DisableSSL != nil {
+ dst.DisableSSL = other.DisableSSL
+ }
+
+ if other.HTTPClient != nil {
+ dst.HTTPClient = other.HTTPClient
+ }
+
+ if other.LogLevel != nil {
+ dst.LogLevel = other.LogLevel
+ }
+
+ if other.Logger != nil {
+ dst.Logger = other.Logger
+ }
+
+ if other.MaxRetries != nil {
+ dst.MaxRetries = other.MaxRetries
+ }
+
+ if other.Retryer != nil {
+ dst.Retryer = other.Retryer
+ }
+
+ if other.DisableParamValidation != nil {
+ dst.DisableParamValidation = other.DisableParamValidation
+ }
+
+ if other.DisableComputeChecksums != nil {
+ dst.DisableComputeChecksums = other.DisableComputeChecksums
+ }
+
+ if other.S3ForcePathStyle != nil {
+ dst.S3ForcePathStyle = other.S3ForcePathStyle
+ }
+
+ if other.S3Disable100Continue != nil {
+ dst.S3Disable100Continue = other.S3Disable100Continue
+ }
+
+ if other.S3UseAccelerate != nil {
+ dst.S3UseAccelerate = other.S3UseAccelerate
+ }
+
+ if other.S3DisableContentMD5Validation != nil {
+ dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
+ }
+
+ if other.UseDualStack != nil {
+ dst.UseDualStack = other.UseDualStack
+ }
+
+ if other.EC2MetadataDisableTimeoutOverride != nil {
+ dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
+ }
+
+ if other.SleepDelay != nil {
+ dst.SleepDelay = other.SleepDelay
+ }
+
+ if other.DisableRestProtocolURICleaning != nil {
+ dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
+ }
+
+ if other.EnforceShouldRetryCheck != nil {
+ dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
+ }
+
+ if other.EnableEndpointDiscovery != nil {
+ dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
+ }
+
+ if other.DisableEndpointHostPrefix != nil {
+ dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
+ }
+}
+
+// Copy will return a shallow copy of the Config object. If any additional
+// configurations are provided they will be merged into the new config returned.
+func (c *Config) Copy(cfgs ...*Config) *Config {
+ dst := &Config{}
+ dst.MergeIn(c)
+
+ for _, cfg := range cfgs {
+ dst.MergeIn(cfg)
+ }
+
+ return dst
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
new file mode 100755
index 0000000..2866f9a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
@@ -0,0 +1,37 @@
+// +build !go1.9
+
+package aws
+
+import "time"
+
+// Context is an copy of the Go v1.7 stdlib's context.Context interface.
+// It is represented as a SDK interface to enable you to use the "WithContext"
+// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ Value(key interface{}) interface{}
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
new file mode 100755
index 0000000..3718b26
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
@@ -0,0 +1,11 @@
+// +build go1.9
+
+package aws
+
+import "context"
+
+// Context is an alias of the Go stdlib's context.Context interface.
+// It can be used within the SDK's API operation "WithContext" methods.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context = context.Context
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
new file mode 100755
index 0000000..66c5945
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
@@ -0,0 +1,56 @@
+// +build !go1.7
+
+package aws
+
+import "time"
+
+// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
+// provide a 1.6 and 1.5 safe version of context that is compatible with Go
+// 1.7's Context.
+//
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case backgroundCtx:
+ return "aws.BackgroundContext"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ backgroundCtx = new(emptyCtx)
+)
+
+// BackgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func BackgroundContext() Context {
+ return backgroundCtx
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
new file mode 100755
index 0000000..9c29f29
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
@@ -0,0 +1,20 @@
+// +build go1.7
+
+package aws
+
+import "context"
+
+// BackgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func BackgroundContext() Context {
+ return context.Background()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
new file mode 100755
index 0000000..304fd15
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
@@ -0,0 +1,24 @@
+package aws
+
+import (
+ "time"
+)
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+//
+// Expects Context to always return a non-nil error if the Done channel is closed.
+func SleepWithContext(ctx Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ break
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
new file mode 100755
index 0000000..ff5d58e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
@@ -0,0 +1,387 @@
+package aws
+
+import "time"
+
+// String returns a pointer to the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+ if v != nil {
+ return *v
+ }
+ return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+ dst := make([]*string, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+ dst := make([]string, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+ dst := make(map[string]*string)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+ dst := make(map[string]string)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Bool returns a pointer to the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+ if v != nil {
+ return *v
+ }
+ return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+ dst := make([]*bool, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+ dst := make([]bool, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+ dst := make(map[string]*bool)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+ dst := make(map[string]bool)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int returns a pointer to the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+ dst := make([]*int, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+ dst := make([]int, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+ dst := make(map[string]*int)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+ dst := make(map[string]int)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int64 returns a pointer to the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+ dst := make([]*int64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+ dst := make([]int64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+ dst := make(map[string]*int64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+ dst := make(map[string]int64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float64 returns a pointer to the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+ dst := make([]*float64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+ dst := make([]float64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+ dst := make(map[string]*float64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+ dst := make(map[string]float64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Time returns a pointer to the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+ if v != nil {
+ return *v
+ }
+ return time.Time{}
+}
+
+// SecondsTimeValue converts an int64 pointer to a time.Time value
+// representing seconds since Epoch or time.Time{} if the pointer is nil.
+func SecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix((*v / 1000), 0)
+ }
+ return time.Time{}
+}
+
+// MillisecondsTimeValue converts an int64 pointer to a time.Time value
+// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
+func MillisecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix(0, (*v * 1000000))
+ }
+ return time.Time{}
+}
+
+// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
+// The result is undefined if the Unix time cannot be represented by an int64.
+// Which includes calling TimeUnixMilli on a zero Time is undefined.
+//
+// This utility is useful for service API's such as CloudWatch Logs which require
+// their unix time values to be in milliseconds.
+//
+// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
+func TimeUnixMilli(t time.Time) int64 {
+ return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+ dst := make([]*time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+ dst := make([]time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+ dst := make(map[string]*time.Time)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+ dst := make(map[string]time.Time)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
new file mode 100755
index 0000000..f8853d7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -0,0 +1,228 @@
+package corehandlers
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// Interface for matching types which also have a Len method.
+type lener interface {
+ Len() int
+}
+
+// BuildContentLengthHandler builds the content length of a request based on the body,
+// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
+// to determine request body length and no "Content-Length" was specified it will panic.
+//
+// The Content-Length will only be added to the request if the length of the body
+// is greater than 0. If the body is empty or the current `Content-Length`
+// header is <= 0, the header will also be stripped.
+var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
+ var length int64
+
+ if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
+ length, _ = strconv.ParseInt(slength, 10, 64)
+ } else {
+ if r.Body != nil {
+ var err error
+ length, err = aws.SeekerLen(r.Body)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
+ return
+ }
+ }
+ }
+
+ if length > 0 {
+ r.HTTPRequest.ContentLength = length
+ r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
+ } else {
+ r.HTTPRequest.ContentLength = 0
+ r.HTTPRequest.Header.Del("Content-Length")
+ }
+}}
+
+var reStatusCode = regexp.MustCompile(`^(\d{3})`)
+
+// ValidateReqSigHandler is a request handler to ensure that the request's
+// signature doesn't expire before it is sent. This can happen when a request
+// is built and signed significantly before it is sent. Or significant delays
+// occur when retrying requests that would cause the signature to expire.
+var ValidateReqSigHandler = request.NamedHandler{
+ Name: "core.ValidateReqSigHandler",
+ Fn: func(r *request.Request) {
+ // Unsigned requests are not signed
+ if r.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ signedTime := r.Time
+ if !r.LastSignedAt.IsZero() {
+ signedTime = r.LastSignedAt
+ }
+
+ // 5 minutes to allow for some clock skew/delays in transmission.
+ // Would be improved with aws/aws-sdk-go#423
+ if signedTime.Add(5 * time.Minute).After(time.Now()) {
+ return
+ }
+
+ fmt.Println("request expired, resigning")
+ r.Sign()
+ },
+}
+
+// SendHandler is a request handler to send service request using HTTP client.
+var SendHandler = request.NamedHandler{
+ Name: "core.SendHandler",
+ Fn: func(r *request.Request) {
+ sender := sendFollowRedirects
+ if r.DisableFollowRedirects {
+ sender = sendWithoutFollowRedirects
+ }
+
+ if request.NoBody == r.HTTPRequest.Body {
+ // Strip off the request body if the NoBody reader was used as a
+ // place holder for a request body. This prevents the SDK from
+ // making requests with a request body when it would be invalid
+ // to do so.
+ //
+ // Use a shallow copy of the http.Request to ensure the race condition
+ // of transport on Body will not trigger
+ reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest
+ reqCopy.Body = nil
+ r.HTTPRequest = &reqCopy
+ defer func() {
+ r.HTTPRequest = reqOrig
+ }()
+ }
+
+ var err error
+ r.HTTPResponse, err = sender(r)
+ if err != nil {
+ handleSendError(r, err)
+ }
+ },
+}
+
+func sendFollowRedirects(r *request.Request) (*http.Response, error) {
+ return r.Config.HTTPClient.Do(r.HTTPRequest)
+}
+
+func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
+ transport := r.Config.HTTPClient.Transport
+ if transport == nil {
+ transport = http.DefaultTransport
+ }
+
+ return transport.RoundTrip(r.HTTPRequest)
+}
+
+func handleSendError(r *request.Request, err error) {
+ // Prevent leaking if an HTTPResponse was returned. Clean up
+ // the body.
+ if r.HTTPResponse != nil {
+ r.HTTPResponse.Body.Close()
+ }
+ // Capture the case where url.Error is returned for error processing
+ // response. e.g. 301 without location header comes back as string
+ // error and r.HTTPResponse is nil. Other URL redirect errors will
+ // comeback in a similar method.
+ if e, ok := err.(*url.Error); ok && e.Err != nil {
+ if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
+ code, _ := strconv.ParseInt(s[1], 10, 64)
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(code),
+ Status: http.StatusText(int(code)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ return
+ }
+ }
+ if r.HTTPResponse == nil {
+ // Add a dummy request response object to ensure the HTTPResponse
+ // value is consistent.
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(0),
+ Status: http.StatusText(int(0)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ }
+ // Catch all other request errors.
+ r.Error = awserr.New("RequestError", "send request failed", err)
+ r.Retryable = aws.Bool(true) // network errors are retryable
+
+ // Override the error with a context canceled error, if that was canceled.
+ ctx := r.Context()
+ select {
+ case <-ctx.Done():
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", ctx.Err())
+ r.Retryable = aws.Bool(false)
+ default:
+ }
+}
+
+// ValidateResponseHandler is a request handler to validate service response.
+var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
+ if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
+ // this may be replaced by an UnmarshalError handler
+ r.Error = awserr.New("UnknownError", "unknown error", nil)
+ }
+}}
+
+// AfterRetryHandler performs final checks to determine if the request should
+// be retried and how long to delay.
+var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
+ r.Retryable = aws.Bool(r.ShouldRetry(r))
+ }
+
+ if r.WillRetry() {
+ r.RetryDelay = r.RetryRules(r)
+
+ if sleepFn := r.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(r.RetryDelay)
+ } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", err)
+ r.Retryable = aws.Bool(false)
+ return
+ }
+
+ // when the expired token exception occurs the credentials
+ // need to be expired locally so that the next request to
+ // get credentials will trigger a credentials refresh.
+ if r.IsErrorExpired() {
+ r.Config.Credentials.Expire()
+ }
+
+ r.RetryCount++
+ r.Error = nil
+ }
+}}
+
+// ValidateEndpointHandler is a request handler to validate a request had the
+// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
+// region is not valid.
+var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
+ if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
+ r.Error = aws.ErrMissingRegion
+ } else if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
new file mode 100755
index 0000000..7d50b15
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
@@ -0,0 +1,17 @@
+package corehandlers
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+// ValidateParametersHandler is a request handler to validate the input parameters.
+// Validating parameters only has meaning if done prior to the request being sent.
+var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
+ if !r.ParamsFilled() {
+ return
+ }
+
+ if v, ok := r.Params.(request.Validator); ok {
+ if err := v.Validate(); err != nil {
+ r.Error = err
+ }
+ }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
new file mode 100755
index 0000000..ab69c7a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
@@ -0,0 +1,37 @@
+package corehandlers
+
+import (
+ "os"
+ "runtime"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
+// to the user agent.
+var SDKVersionUserAgentHandler = request.NamedHandler{
+ Name: "core.SDKVersionUserAgentHandler",
+ Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
+ runtime.Version(), runtime.GOOS, runtime.GOARCH),
+}
+
+const execEnvVar = `AWS_EXECUTION_ENV`
+const execEnvUAKey = `exec-env`
+
+// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
+// execution environment to the user agent.
+//
+// If the environment variable AWS_EXECUTION_ENV is set, its value will be
+// appended to the user agent string.
+var AddHostExecEnvUserAgentHander = request.NamedHandler{
+ Name: "core.AddHostExecEnvUserAgentHander",
+ Fn: func(r *request.Request) {
+ v := os.Getenv(execEnvVar)
+ if len(v) == 0 {
+ return
+ }
+
+ request.AddToUserAgent(r, execEnvUAKey+"/"+v)
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
new file mode 100755
index 0000000..3ad1e79
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
@@ -0,0 +1,100 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+ // ErrNoValidProvidersFoundInChain Is returned when there are no valid
+ // providers in the ChainProvider.
+ //
+ // This has been deprecated. For verbose error messaging set
+ // aws.Config.CredentialsChainVerboseErrors to true.
+ ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
+ `no valid providers in chain. Deprecated.
+ For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
+ nil)
+)
+
+// A ChainProvider will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The ChainProvider provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the Providers
+// in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
+// In this example EnvProvider will first check if any credentials are available
+// via the environment variables. If there are none ChainProvider will check
+// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
+// does not return any credentials ChainProvider will return the error
+// ErrNoValidProvidersFoundInChain
+//
+// creds := credentials.NewChainCredentials(
+// []credentials.Provider{
+// &credentials.EnvProvider{},
+// &ec2rolecreds.EC2RoleProvider{
+// Client: ec2metadata.New(sess),
+// },
+// })
+//
+// // Usage of ChainCredentials with aws.Config
+// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: creds,
+// })))
+//
+type ChainProvider struct {
+ Providers []Provider
+ curr Provider
+ VerboseErrors bool
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return NewCredentials(&ChainProvider{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *ChainProvider) Retrieve() (Value, error) {
+ var errs []error
+ for _, p := range c.Providers {
+ creds, err := p.Retrieve()
+ if err == nil {
+ c.curr = p
+ return creds, nil
+ }
+ errs = append(errs, err)
+ }
+ c.curr = nil
+
+ var err error
+ err = ErrNoValidProvidersFoundInChain
+ if c.VerboseErrors {
+ err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
+ }
+ return Value{}, err
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *ChainProvider) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
new file mode 100755
index 0000000..894bbc7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -0,0 +1,292 @@
+// Package credentials provides credential retrieval and management
+//
+// The Credentials is the primary method of getting access to and managing
+// credentials Values. Using dependency injection retrieval of the credential
+// values is handled by a object which satisfies the Provider interface.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials Value have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := credentials.NewEnvCredentials()
+//
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := credentials.NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+//
+package credentials
+
+import (
+ "fmt"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "sync"
+ "time"
+)
+
+// AnonymousCredentials is an empty Credential object that can be used as
+// dummy placeholder credentials for requests that do not need signed.
+//
+// This Credentials can be used to configure a service to not sign requests
+// when making service API calls. For example, when accessing public
+// s3 buckets.
+//
+// svc := s3.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: credentials.AnonymousCredentials,
+// })))
+// // Access public S3 buckets.
+var AnonymousCredentials = NewStaticCredentials("", "", "")
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Provider used to get credentials
+ ProviderName string
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+//
+// The Provider should not need to implement its own mutexes, because
+// that will be managed by Credentials.
+type Provider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// An Expirer is an interface that Providers can implement to expose the expiration
+// time, if known. If the Provider cannot accurately provide this info,
+// it should not implement this interface.
+type Expirer interface {
+ // The time at which the credentials are no longer valid
+ ExpiresAt() time.Time
+}
+
+// An ErrorProvider is a stub credentials provider that always returns an error
+// this is used by the SDK when construction a known provider is not possible
+// due to an error.
+type ErrorProvider struct {
+ // The error to be returned from Retrieve
+ Err error
+
+ // The provider name to set on the Retrieved returned Value
+ ProviderName string
+}
+
+// Retrieve will always return the error that the ErrorProvider was created with.
+func (p ErrorProvider) Retrieve() (Value, error) {
+ return Value{ProviderName: p.ProviderName}, p.Err
+}
+
+// IsExpired will always return not expired.
+func (p ErrorProvider) IsExpired() bool {
+ return false
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+// type EC2RoleProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set. Available for testing
+ // to be able to mock out the current time.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ e.expiration = expiration
+ if window > 0 {
+ e.expiration = e.expiration.Add(-window)
+ }
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ curTime := e.CurrentTime
+ if curTime == nil {
+ curTime = time.Now
+ }
+ return e.expiration.Before(curTime())
+}
+
+// ExpiresAt returns the expiration time of the credential
+func (e *Expiry) ExpiresAt() time.Time {
+ return e.expiration
+}
+
+// A Credentials provides concurrency safe retrieval of AWS credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ creds Value
+ forceRefresh bool
+
+ m sync.RWMutex
+
+ provider Provider
+}
+
+// NewCredentials returns a pointer to a new Credentials with the provider set.
+func NewCredentials(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ // Check the cached credentials first with just the read lock.
+ c.m.RLock()
+ if !c.isExpired() {
+ creds := c.creds
+ c.m.RUnlock()
+ return creds, nil
+ }
+ c.m.RUnlock()
+
+ // Credentials are expired need to retrieve the credentials taking the full
+ // lock.
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if c.isExpired() {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be retrieved.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+ return c.forceRefresh || c.provider.IsExpired()
+}
+
+// ExpiresAt provides access to the functionality of the Expirer interface of
+// the underlying Provider, if it supports that interface. Otherwise, it returns
+// an error.
+func (c *Credentials) ExpiresAt() (time.Time, error) {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ expirer, ok := c.provider.(Expirer)
+ if !ok {
+ return time.Time{}, awserr.New("ProviderNotExpirer",
+ fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName),
+ nil)
+ }
+ if c.forceRefresh {
+ // set expiration time to the distant past
+ return time.Time{}, nil
+ }
+ return expirer.ExpiresAt(), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
new file mode 100755
index 0000000..0ed791b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
@@ -0,0 +1,178 @@
+package ec2rolecreds
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/internal/sdkuri"
+)
+
+// ProviderName provides a name of EC2Role provider
+const ProviderName = "EC2RoleProvider"
+
+// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
+// or ExpiryWindow
+//
+// p := &ec2rolecreds.EC2RoleProvider{
+// // Pass in a custom timeout to be used when requesting
+// // IAM EC2 Role credentials.
+// Client: ec2metadata.New(sess, aws.Config{
+// HTTPClient: &http.Client{Timeout: 10 * time.Second},
+// }),
+//
+// // Do not use early expiry of credentials. If a non zero value is
+// // specified the credentials will be expired early
+// ExpiryWindow: 0,
+// }
+type EC2RoleProvider struct {
+ credentials.Expiry
+
+ // Required EC2Metadata client to use when connecting to EC2 metadata service.
+ Client *ec2metadata.EC2Metadata
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
+// The ConfigProvider is satisfied by the session.Session type.
+func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: ec2metadata.New(c),
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
+// metadata service.
+func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: client,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired credentials.
+func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
+ credsList, err := requestCredList(m.Client)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ if len(credsList) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
+ }
+ credsName := credsList[0]
+
+ roleCreds, err := requestCred(m.Client, credsName)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ ProviderName: ProviderName,
+ }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+}
+
+const iamSecurityCredsPath = "iam/security-credentials/"
+
+// requestCredList requests a list of credentials from the EC2 service.
+// If there are no credentials, or there is an error making or receiving the request
+func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
+ resp, err := client.GetMetadata(iamSecurityCredsPath)
+ if err != nil {
+ return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
+ }
+
+ credsList := []string{}
+ s := bufio.NewScanner(strings.NewReader(resp))
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
+ }
+
+ return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
+ resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
+ if err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("EC2RoleRequestError",
+ fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("SerializationError",
+ fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ if respCreds.Code != "Success" {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
+ }
+
+ return respCreds, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
new file mode 100755
index 0000000..ace5131
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
@@ -0,0 +1,198 @@
+// Package endpointcreds provides support for retrieving credentials from an
+// arbitrary HTTP endpoint.
+//
+// The credentials endpoint Provider can receive both static and refreshable
+// credentials that will expire. Credentials are static when an "Expiration"
+// value is not provided in the endpoint's response.
+//
+// Static credentials will never expire once they have been retrieved. The format
+// of the static credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// }
+//
+// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
+// value in the response. The format of the refreshable credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// "Token" : "AQoDY....=",
+// "Expiration" : "2016-02-25T06:03:31Z"
+// }
+//
+// Errors should be returned in the following format and only returned with 400
+// or 500 HTTP status codes.
+// {
+// "code": "ErrorCode",
+// "message": "Helpful error message."
+// }
+package endpointcreds
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ProviderName is the name of the credentials provider.
+const ProviderName = `CredentialsEndpointProvider`
+
+// Provider satisfies the credentials.Provider interface, and is a client to
+// retrieve credentials from an arbitrary endpoint.
+type Provider struct {
+ staticCreds bool
+ credentials.Expiry
+
+ // Requires a AWS Client to make HTTP requests to the endpoint with.
+ // the Endpoint the request will be made to is provided by the aws.Config's
+ // Endpoint value.
+ Client *client.Client
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // Optional authorization token value if set will be used as the value of
+ // the Authorization header of the endpoint credential request.
+ AuthorizationToken string
+}
+
+// NewProviderClient returns a credentials Provider for retrieving AWS credentials
+// from arbitrary endpoint.
+func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
+ p := &Provider{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "CredentialsEndpoint",
+ Endpoint: endpoint,
+ },
+ handlers,
+ ),
+ }
+
+ p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
+ p.Client.Handlers.Validate.Clear()
+ p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return p
+}
+
+// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
+// from an arbitrary endpoint concurrently. The client will request the
+func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
+ return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
+}
+
+// IsExpired returns true if the credentials retrieved are expired, or not yet
+// retrieved.
+func (p *Provider) IsExpired() bool {
+ if p.staticCreds {
+ return false
+ }
+ return p.Expiry.IsExpired()
+}
+
+// Retrieve will attempt to request the credentials from the endpoint the Provider
+// was configured for. And error will be returned if the retrieval fails.
+func (p *Provider) Retrieve() (credentials.Value, error) {
+ resp, err := p.getCredentials()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName},
+ awserr.New("CredentialsEndpointError", "failed to load credentials", err)
+ }
+
+ if resp.Expiration != nil {
+ p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
+ } else {
+ p.staticCreds = true
+ }
+
+ return credentials.Value{
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.Token,
+ ProviderName: ProviderName,
+ }, nil
+}
+
+type getCredentialsOutput struct {
+ Expiration *time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+}
+
+type errorOutput struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
+
+func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
+ op := &request.Operation{
+ Name: "GetCredentials",
+ HTTPMethod: "GET",
+ }
+
+ out := &getCredentialsOutput{}
+ req := p.Client.NewRequest(op, nil, out)
+ req.HTTPRequest.Header.Set("Accept", "application/json")
+ if authToken := p.AuthorizationToken; len(authToken) != 0 {
+ req.HTTPRequest.Header.Set("Authorization", authToken)
+ }
+
+ return out, req.Send()
+}
+
+func validateEndpointHandler(r *request.Request) {
+ if len(r.ClientInfo.Endpoint) == 0 {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
+
+func unmarshalHandler(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ out := r.Data.(*getCredentialsOutput)
+ if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
+ r.Error = awserr.New("SerializationError",
+ "failed to decode endpoint credentials",
+ err,
+ )
+ }
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ var errOut errorOutput
+ if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
+ r.Error = awserr.New("SerializationError",
+ "failed to decode endpoint credentials",
+ err,
+ )
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New(errOut.Code, errOut.Message, nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
new file mode 100755
index 0000000..54c5cf7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
@@ -0,0 +1,74 @@
+package credentials
+
+import (
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// EnvProviderName provides a name of Env provider
+const EnvProviderName = "EnvProvider"
+
+var (
+ // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
+ // found in the process's environment.
+ ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
+
+ // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
+ // can't be found in the process's environment.
+ ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
+)
+
+// A EnvProvider retrieves credentials from the environment variables of the
+// running process. Environment credentials never expire.
+//
+// Environment variables used:
+//
+// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
+//
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
+type EnvProvider struct {
+ retrieved bool
+}
+
+// NewEnvCredentials returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvCredentials() *Credentials {
+ return NewCredentials(&EnvProvider{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvProvider) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ if id == "" {
+ return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
+ }
+
+ if secret == "" {
+ return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ ProviderName: EnvProviderName,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvProvider) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
new file mode 100755
index 0000000..7fc91d9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
@@ -0,0 +1,12 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+
+[with_colon]
+aws_access_key_id: accessKey
+aws_secret_access_key: secret
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
new file mode 100755
index 0000000..1980c8c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
@@ -0,0 +1,425 @@
+/*
+Package processcreds is a credential Provider to retrieve `credential_process`
+credentials.
+
+WARNING: The following describes a method of sourcing credentials from an external
+process. This can potentially be dangerous, so proceed with caution. Other
+credential providers should be preferred if at all possible. If using this
+option, you should make sure that the config file is as locked down as possible
+using security best practices for your operating system.
+
+You can use credentials from a `credential_process` in a variety of ways.
+
+One way is to setup your shared config file, located in the default
+location, with the `credential_process` key and the command you want to be
+called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
+(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
+
+ [default]
+ credential_process = /command/to/call
+
+Creating a new session will use the credential process to retrieve credentials.
+NOTE: If there are credentials in the profile you are using, the credential
+process will not be used.
+
+ // Initialize a session to load credentials.
+ sess, _ := session.NewSession(&aws.Config{
+ Region: aws.String("us-east-1")},
+ )
+
+ // Create S3 service client to use the credentials.
+ svc := s3.New(sess)
+
+Another way to use the `credential_process` method is by using
+`credentials.NewCredentials()` and providing a command to be executed to
+retrieve credentials:
+
+ // Create credentials using the ProcessProvider.
+ creds := processcreds.NewCredentials("/path/to/command")
+
+ // Create service client value configured for credentials.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+You can set a non-default timeout for the `credential_process` with another
+constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To
+set a one minute timeout:
+
+ // Create credentials using the ProcessProvider.
+ creds := processcreds.NewCredentialsTimeout(
+ "/path/to/command",
+ time.Duration(500) * time.Millisecond)
+
+If you need more control, you can set any configurable options in the
+credentials using one or more option functions. For example, you can set a two
+minute timeout, a credential duration of 60 minutes, and a maximum stdout
+buffer size of 2k.
+
+ creds := processcreds.NewCredentials(
+ "/path/to/command",
+ func(opt *ProcessProvider) {
+ opt.Timeout = time.Duration(2) * time.Minute
+ opt.Duration = time.Duration(60) * time.Minute
+ opt.MaxBufSize = 2048
+ })
+
+You can also use your own `exec.Cmd`:
+
+ // Create an exec.Cmd
+ myCommand := exec.Command("/path/to/command")
+
+ // Create credentials using your exec.Cmd and custom timeout
+ creds := processcreds.NewCredentialsCommand(
+ myCommand,
+ func(opt *processcreds.ProcessProvider) {
+ opt.Timeout = time.Duration(1) * time.Second
+ })
+*/
+package processcreds
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+)
+
+const (
+ // ProviderName is the name this credentials provider will label any
+ // returned credentials Value with.
+ ProviderName = `ProcessProvider`
+
+ // ErrCodeProcessProviderParse error parsing process output
+ ErrCodeProcessProviderParse = "ProcessProviderParseError"
+
+ // ErrCodeProcessProviderVersion version error in output
+ ErrCodeProcessProviderVersion = "ProcessProviderVersionError"
+
+ // ErrCodeProcessProviderRequired required attribute missing in output
+ ErrCodeProcessProviderRequired = "ProcessProviderRequiredError"
+
+ // ErrCodeProcessProviderExecution execution of command failed
+ ErrCodeProcessProviderExecution = "ProcessProviderExecutionError"
+
+ // errMsgProcessProviderTimeout process took longer than allowed
+ errMsgProcessProviderTimeout = "credential process timed out"
+
+ // errMsgProcessProviderProcess process error
+ errMsgProcessProviderProcess = "error in credential_process"
+
+ // errMsgProcessProviderParse problem parsing output
+ errMsgProcessProviderParse = "parse failed of credential_process output"
+
+ // errMsgProcessProviderVersion version error in output
+ errMsgProcessProviderVersion = "wrong version in process output (not 1)"
+
+ // errMsgProcessProviderMissKey missing access key id in output
+ errMsgProcessProviderMissKey = "missing AccessKeyId in process output"
+
+ // errMsgProcessProviderMissSecret missing secret acess key in output
+ errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output"
+
+ // errMsgProcessProviderPrepareCmd prepare of command failed
+ errMsgProcessProviderPrepareCmd = "failed to prepare command"
+
+ // errMsgProcessProviderEmptyCmd command must not be empty
+ errMsgProcessProviderEmptyCmd = "command must not be empty"
+
+ // errMsgProcessProviderPipe failed to initialize pipe
+ errMsgProcessProviderPipe = "failed to initialize pipe"
+
+ // DefaultDuration is the default amount of time in minutes that the
+ // credentials will be valid for.
+ DefaultDuration = time.Duration(15) * time.Minute
+
+ // DefaultBufSize limits buffer size from growing to an enormous
+ // amount due to a faulty process.
+ DefaultBufSize = 1024
+
+ // DefaultTimeout default limit on time a process can run.
+ DefaultTimeout = time.Duration(1) * time.Minute
+)
+
+// ProcessProvider satisfies the credentials.Provider interface, and is a
+// client to retrieve credentials from a process.
+type ProcessProvider struct {
+ staticCreds bool
+ credentials.Expiry
+ originalCommand []string
+
+ // Expiry duration of the credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // A string representing an os command that should return a JSON with
+ // credential information.
+ command *exec.Cmd
+
+ // MaxBufSize limits memory usage from growing to an enormous
+ // amount due to a faulty process.
+ MaxBufSize int
+
+ // Timeout limits the time a process can run.
+ Timeout time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping the
+// ProcessProvider. The credentials will expire every 15 minutes by default.
+func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials {
+ p := &ProcessProvider{
+ command: exec.Command(command),
+ Duration: DefaultDuration,
+ Timeout: DefaultTimeout,
+ MaxBufSize: DefaultBufSize,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsTimeout returns a pointer to a new Credentials object with
+// the specified command and timeout, and default duration and max buffer size.
+func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials {
+ p := NewCredentials(command, func(opt *ProcessProvider) {
+ opt.Timeout = timeout
+ })
+
+ return p
+}
+
+// NewCredentialsCommand returns a pointer to a new Credentials object with
+// the specified command, and default timeout, duration and max buffer size.
+func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials {
+ p := &ProcessProvider{
+ command: command,
+ Duration: DefaultDuration,
+ Timeout: DefaultTimeout,
+ MaxBufSize: DefaultBufSize,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+type credentialProcessResponse struct {
+ Version int
+ AccessKeyID string `json:"AccessKeyId"`
+ SecretAccessKey string
+ SessionToken string
+ Expiration *time.Time
+}
+
+// Retrieve executes the 'credential_process' and returns the credentials.
+func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
+ out, err := p.executeCredentialProcess()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ // Serialize and validate response
+ resp := &credentialProcessResponse{}
+ if err = json.Unmarshal(out, resp); err != nil {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderParse,
+ fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)),
+ err)
+ }
+
+ if resp.Version != 1 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderVersion,
+ errMsgProcessProviderVersion,
+ nil)
+ }
+
+ if len(resp.AccessKeyID) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderRequired,
+ errMsgProcessProviderMissKey,
+ nil)
+ }
+
+ if len(resp.SecretAccessKey) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderRequired,
+ errMsgProcessProviderMissSecret,
+ nil)
+ }
+
+ // Handle expiration
+ p.staticCreds = resp.Expiration == nil
+ if resp.Expiration != nil {
+ p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
+ }
+
+ return credentials.Value{
+ ProviderName: ProviderName,
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.SessionToken,
+ }, nil
+}
+
+// IsExpired returns true if the credentials retrieved are expired, or not yet
+// retrieved.
+func (p *ProcessProvider) IsExpired() bool {
+ if p.staticCreds {
+ return false
+ }
+ return p.Expiry.IsExpired()
+}
+
+// prepareCommand prepares the command to be executed.
+func (p *ProcessProvider) prepareCommand() error {
+
+ var cmdArgs []string
+ if runtime.GOOS == "windows" {
+ cmdArgs = []string{"cmd.exe", "/C"}
+ } else {
+ cmdArgs = []string{"sh", "-c"}
+ }
+
+ if len(p.originalCommand) == 0 {
+ p.originalCommand = make([]string, len(p.command.Args))
+ copy(p.originalCommand, p.command.Args)
+
+ // check for empty command because it succeeds
+ if len(strings.TrimSpace(p.originalCommand[0])) < 1 {
+ return awserr.New(
+ ErrCodeProcessProviderExecution,
+ fmt.Sprintf(
+ "%s: %s",
+ errMsgProcessProviderPrepareCmd,
+ errMsgProcessProviderEmptyCmd),
+ nil)
+ }
+ }
+
+ cmdArgs = append(cmdArgs, p.originalCommand...)
+ p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...)
+ p.command.Env = os.Environ()
+
+ return nil
+}
+
+// executeCredentialProcess starts the credential process on the OS and
+// returns the results or an error.
+func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) {
+
+ if err := p.prepareCommand(); err != nil {
+ return nil, err
+ }
+
+ // Setup the pipes
+ outReadPipe, outWritePipe, err := os.Pipe()
+ if err != nil {
+ return nil, awserr.New(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderPipe,
+ err)
+ }
+
+ p.command.Stderr = os.Stderr // display stderr on console for MFA
+ p.command.Stdout = outWritePipe // get creds json on process's stdout
+ p.command.Stdin = os.Stdin // enable stdin for MFA
+
+ output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize))
+
+ stdoutCh := make(chan error, 1)
+ go readInput(
+ io.LimitReader(outReadPipe, int64(p.MaxBufSize)),
+ output,
+ stdoutCh)
+
+ execCh := make(chan error, 1)
+ go executeCommand(*p.command, execCh)
+
+ finished := false
+ var errors []error
+ for !finished {
+ select {
+ case readError := <-stdoutCh:
+ errors = appendError(errors, readError)
+ finished = true
+ case execError := <-execCh:
+ err := outWritePipe.Close()
+ errors = appendError(errors, err)
+ errors = appendError(errors, execError)
+ if errors != nil {
+ return output.Bytes(), awserr.NewBatchError(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderProcess,
+ errors)
+ }
+ case <-time.After(p.Timeout):
+ finished = true
+ return output.Bytes(), awserr.NewBatchError(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderTimeout,
+ errors) // errors can be nil
+ }
+ }
+
+ out := output.Bytes()
+
+ if runtime.GOOS == "windows" {
+ // windows adds slashes to quotes
+ out = []byte(strings.Replace(string(out), `\"`, `"`, -1))
+ }
+
+ return out, nil
+}
+
+// appendError conveniently checks for nil before appending slice
+func appendError(errors []error, err error) []error {
+ if err != nil {
+ return append(errors, err)
+ }
+ return errors
+}
+
+func executeCommand(cmd exec.Cmd, exec chan error) {
+ // Start the command
+ err := cmd.Start()
+ if err == nil {
+ err = cmd.Wait()
+ }
+
+ exec <- err
+}
+
+func readInput(r io.Reader, w io.Writer, read chan error) {
+ tee := io.TeeReader(r, w)
+
+ _, err := ioutil.ReadAll(tee)
+
+ if err == io.EOF {
+ err = nil
+ }
+
+ read <- err // will only arrive here when write end of pipe is closed
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
new file mode 100755
index 0000000..e155149
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
@@ -0,0 +1,150 @@
+package credentials
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/internal/ini"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+// SharedCredsProviderName provides a name of SharedCreds provider
+const SharedCredsProviderName = "SharedCredentialsProvider"
+
+var (
+ // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
+ ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
+)
+
+// A SharedCredentialsProvider retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type SharedCredentialsProvider struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.aws/credentials"
+ // Windows: "%USERPROFILE%\.aws\credentials"
+ Filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ Profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewSharedCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewSharedCredentials(filename, profile string) *Credentials {
+ return NewCredentials(&SharedCredentialsProvider{
+ Filename: filename,
+ Profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
+ p.retrieved = false
+
+ filename, err := p.filename()
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ creds, err := loadProfile(filename, p.profile())
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ p.retrieved = true
+ return creds, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *SharedCredentialsProvider) IsExpired() bool {
+ return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (Value, error) {
+ config, err := ini.OpenFile(filename)
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
+ }
+
+ iniProfile, ok := config.GetSection(profile)
+ if !ok {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil)
+ }
+
+ id := iniProfile.String("aws_access_key_id")
+ if len(id) == 0 {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
+ nil)
+ }
+
+ secret := iniProfile.String("aws_secret_access_key")
+ if len(secret) == 0 {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
+ nil)
+ }
+
+ // Default to empty string if not found
+ token := iniProfile.String("aws_session_token")
+
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ ProviderName: SharedCredsProviderName,
+ }, nil
+}
+
+// filename returns the filename to use to read AWS shared credentials.
+//
+// Will return an error if the user's home directory path cannot be found.
+func (p *SharedCredentialsProvider) filename() (string, error) {
+ if len(p.Filename) != 0 {
+ return p.Filename, nil
+ }
+
+ if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 {
+ return p.Filename, nil
+ }
+
+ if home := shareddefaults.UserHomeDir(); len(home) == 0 {
+ // Backwards compatibility of home directly not found error being returned.
+ // This error is too verbose, failure when opening the file would of been
+ // a better error to return.
+ return "", ErrSharedCredentialsHomeNotFound
+ }
+
+ p.Filename = shareddefaults.SharedCredentialsFilename()
+
+ return p.Filename, nil
+}
+
+// profile returns the AWS shared credentials profile. If empty will read
+// environment variable "AWS_PROFILE". If that is not set profile will
+// return "default".
+func (p *SharedCredentialsProvider) profile() string {
+ if p.Profile == "" {
+ p.Profile = os.Getenv("AWS_PROFILE")
+ }
+ if p.Profile == "" {
+ p.Profile = "default"
+ }
+
+ return p.Profile
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
new file mode 100755
index 0000000..531139e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
@@ -0,0 +1,55 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// StaticProviderName provides a name of Static provider
+const StaticProviderName = "StaticProvider"
+
+var (
+ // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
+ ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
+)
+
+// A StaticProvider is a set of credentials which are set programmatically,
+// and will never expire.
+type StaticProvider struct {
+ Value
+}
+
+// NewStaticCredentials returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStaticCredentials(id, secret, token string) *Credentials {
+ return NewCredentials(&StaticProvider{Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ }})
+}
+
+// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
+// wrapping the static credentials value provide. Same as NewStaticCredentials
+// but takes the creds Value instead of individual fields
+func NewStaticCredentialsFromCreds(creds Value) *Credentials {
+ return NewCredentials(&StaticProvider{Value: creds})
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s *StaticProvider) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
+ }
+
+ if len(s.Value.ProviderName) == 0 {
+ s.Value.ProviderName = StaticProviderName
+ }
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For StaticProvider, the credentials never expired.
+func (s *StaticProvider) IsExpired() bool {
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
new file mode 100755
index 0000000..b6dbfd2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -0,0 +1,313 @@
+/*
+Package stscreds are credential Providers to retrieve STS AWS credentials.
+
+STS provides multiple ways to retrieve credentials which can be used when making
+future AWS service API operation calls.
+
+The SDK will ensure that per instance of credentials.Credentials all requests
+to refresh the credentials will be synchronized. But, the SDK is unable to
+ensure synchronous usage of the AssumeRoleProvider if the value is shared
+between multiple Credentials, Sessions or service clients.
+
+Assume Role
+
+To assume an IAM role using STS with the SDK you can create a new Credentials
+with the SDKs's stscreds package.
+
+ // Initial credentials loaded from SDK's default credential chain. Such as
+ // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
+ // Role. These credentials will be used to to make the STS Assume Role API.
+ sess := session.Must(session.NewSession())
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN.
+ creds := stscreds.NewCredentials(sess, "myRoleArn")
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with static MFA Token
+
+To assume an IAM role with a MFA token you can either specify a MFA token code
+directly or provide a function to prompt the user each time the credentials
+need to refresh the role's credentials. Specifying the TokenCode should be used
+for short lived operations that will not need to be refreshed, and when you do
+not want to have direct control over the user provides their MFA token.
+
+With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
+credentials.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN using the MFA token code provided.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenCode = aws.String("00000000")
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with MFA Token Provider
+
+To assume an IAM role with MFA for longer running tasks where the credentials
+may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
+will allow the credential provider to prompt for new MFA token code when the
+role's credentials need to be refreshed.
+
+The StdinTokenProvider function is available to prompt on stdin to retrieve
+the MFA token code from the user. You can also implement custom prompts by
+satisfing the TokenProvider function signature.
+
+Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+have undesirable results as the StdinTokenProvider will not be synchronized. A
+single Credentials with an AssumeRoleProvider can be shared safely.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenProvider = stscreds.StdinTokenProvider
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+*/
+package stscreds
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/internal/sdkrand"
+ "github.com/aws/aws-sdk-go/service/sts"
+)
+
+// StdinTokenProvider will prompt on stderr and read from stdin for a string value.
+// An error is returned if reading from stdin fails.
+//
+// Use this function go read MFA tokens from stdin. The function makes no attempt
+// to make atomic prompts from stdin across multiple gorouties.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely
+//
+// Will wait forever until something is provided on the stdin.
+func StdinTokenProvider() (string, error) {
+ var v string
+ fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ")
+ _, err := fmt.Scanln(&v)
+
+ return v, err
+}
+
+// ProviderName provides a name of AssumeRole provider
+const ProviderName = "AssumeRoleProvider"
+
+// AssumeRoler represents the minimal subset of the STS client API used by this provider.
+type AssumeRoler interface {
+ AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
+}
+
+// DefaultDuration is the default amount of time in minutes that the credentials
+// will be valid for.
+var DefaultDuration = time.Duration(15) * time.Minute
+
+// AssumeRoleProvider retrieves temporary credentials from the STS service, and
+// keeps track of their expiration time.
+//
+// This credential provider will be used by the SDKs default credential change
+// when shared configuration is enabled, and the shared config or shared credentials
+// file configure assume role. See Session docs for how to do this.
+//
+// AssumeRoleProvider does not provide any synchronization and it is not safe
+// to share this value across multiple Credentials, Sessions, or service clients
+// without also sharing the same Credentials instance.
+type AssumeRoleProvider struct {
+ credentials.Expiry
+
+ // STS client to make assume role request with.
+ Client AssumeRoler
+
+ // Role to be assumed.
+ RoleARN string
+
+ // Session name, if you wish to reuse the credentials elsewhere.
+ RoleSessionName string
+
+ // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // Optional ExternalID to pass along, defaults to nil if not set.
+ ExternalID *string
+
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ SerialNumber *string
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests
+ // for MFA). If the role being assumed requires MFA and if the TokenCode value
+ // is missing or expired, the AssumeRole call returns an "access denied" error.
+ //
+ // If SerialNumber is set and neither TokenCode nor TokenProvider are also
+ // set an error will be returned.
+ TokenCode *string
+
+ // Async method of providing MFA token code for assuming an IAM role with MFA.
+ // The value returned by the function will be used as the TokenCode in the Retrieve
+ // call. See StdinTokenProvider for a provider that prompts and reads from stdin.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed when SerialNumber is also set and
+ // TokenCode is not set.
+ //
+ // If both TokenCode and TokenProvider is set, TokenProvider will be used and
+ // TokenCode is ignored.
+ TokenProvider func() (string, error)
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // MaxJitterFrac reduces the effective Duration of each credential requested
+ // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must
+ // have a value between 0 and 1. Any other value may lead to expected behavior.
+ // With a MaxJitterFrac value of 0, default) will no jitter will be used.
+ //
+ // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the
+ // AssumeRole call will be made with an arbitrary Duration between 27m and
+ // 30m.
+ //
+ // MaxJitterFrac should not be negative.
+ MaxJitterFrac float64
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes a Config provider to create the STS client. The ConfigProvider is
+// satisfied by the session.Session type.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
+func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: sts.New(c),
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes an AssumeRoler which can be satisfied by the STS client.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
+func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: svc,
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve generates a new set of temporary credentials using STS.
+func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
+
+ // Apply defaults where parameters are not set.
+ if p.RoleSessionName == "" {
+ // Try to work out a role name that will hopefully end up unique.
+ p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
+ }
+ if p.Duration == 0 {
+ // Expire as often as AWS permits.
+ p.Duration = DefaultDuration
+ }
+ jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration))
+ input := &sts.AssumeRoleInput{
+ DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)),
+ RoleArn: aws.String(p.RoleARN),
+ RoleSessionName: aws.String(p.RoleSessionName),
+ ExternalId: p.ExternalID,
+ }
+ if p.Policy != nil {
+ input.Policy = p.Policy
+ }
+ if p.SerialNumber != nil {
+ if p.TokenCode != nil {
+ input.SerialNumber = p.SerialNumber
+ input.TokenCode = p.TokenCode
+ } else if p.TokenProvider != nil {
+ input.SerialNumber = p.SerialNumber
+ code, err := p.TokenProvider()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+ input.TokenCode = aws.String(code)
+ } else {
+ return credentials.Value{ProviderName: ProviderName},
+ awserr.New("AssumeRoleTokenNotAvailable",
+ "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
+ }
+ }
+
+ roleOutput, err := p.Client.AssumeRole(input)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ // We will proactively generate new credentials before they expire.
+ p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: *roleOutput.Credentials.AccessKeyId,
+ SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
+ SessionToken: *roleOutput.Credentials.SessionToken,
+ ProviderName: ProviderName,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
new file mode 100755
index 0000000..152d785
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
@@ -0,0 +1,46 @@
+// Package csm provides Client Side Monitoring (CSM) which enables sending metrics
+// via UDP connection. Using the Start function will enable the reporting of
+// metrics on a given port. If Start is called, with different parameters, again,
+// a panic will occur.
+//
+// Pause can be called to pause any metrics publishing on a given port. Sessions
+// that have had their handlers modified via InjectHandlers may still be used.
+// However, the handlers will act as a no-op meaning no metrics will be published.
+//
+// Example:
+// r, err := csm.Start("clientID", ":31000")
+// if err != nil {
+// panic(fmt.Errorf("failed starting CSM: %v", err))
+// }
+//
+// sess, err := session.NewSession(&aws.Config{})
+// if err != nil {
+// panic(fmt.Errorf("failed loading session: %v", err))
+// }
+//
+// r.InjectHandlers(&sess.Handlers)
+//
+// client := s3.New(sess)
+// resp, err := client.GetObject(&s3.GetObjectInput{
+// Bucket: aws.String("bucket"),
+// Key: aws.String("key"),
+// })
+//
+// // Will pause monitoring
+// r.Pause()
+// resp, err = client.GetObject(&s3.GetObjectInput{
+// Bucket: aws.String("bucket"),
+// Key: aws.String("key"),
+// })
+//
+// // Resume monitoring
+// r.Continue()
+//
+// Start returns a Reporter that is used to enable or disable monitoring. If
+// access to the Reporter is required later, calling Get will return the Reporter
+// singleton.
+//
+// Example:
+// r := csm.Get()
+// r.Continue()
+package csm
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
new file mode 100755
index 0000000..2f0c6ea
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
@@ -0,0 +1,67 @@
+package csm
+
+import (
+ "fmt"
+ "sync"
+)
+
+var (
+ lock sync.Mutex
+)
+
+// Client side metric handler names
+const (
+ APICallMetricHandlerName = "awscsm.SendAPICallMetric"
+ APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
+)
+
+// Start will start the a long running go routine to capture
+// client side metrics. Calling start multiple time will only
+// start the metric listener once and will panic if a different
+// client ID or port is passed in.
+//
+// Example:
+// r, err := csm.Start("clientID", "127.0.0.1:8094")
+// if err != nil {
+// panic(fmt.Errorf("expected no error, but received %v", err))
+// }
+// sess := session.NewSession()
+// r.InjectHandlers(sess.Handlers)
+//
+// svc := s3.New(sess)
+// out, err := svc.GetObject(&s3.GetObjectInput{
+// Bucket: aws.String("bucket"),
+// Key: aws.String("key"),
+// })
+func Start(clientID string, url string) (*Reporter, error) {
+ lock.Lock()
+ defer lock.Unlock()
+
+ if sender == nil {
+ sender = newReporter(clientID, url)
+ } else {
+ if sender.clientID != clientID {
+ panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID))
+ }
+
+ if sender.url != url {
+ panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url))
+ }
+ }
+
+ if err := connect(url); err != nil {
+ sender = nil
+ return nil, err
+ }
+
+ return sender, nil
+}
+
+// Get will return a reporter if one exists, if one does not exist, nil will
+// be returned.
+func Get() *Reporter {
+ lock.Lock()
+ defer lock.Unlock()
+
+ return sender
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
new file mode 100755
index 0000000..5bacc79
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
@@ -0,0 +1,109 @@
+package csm
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+type metricTime time.Time
+
+func (t metricTime) MarshalJSON() ([]byte, error) {
+ ns := time.Duration(time.Time(t).UnixNano())
+ return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil
+}
+
+type metric struct {
+ ClientID *string `json:"ClientId,omitempty"`
+ API *string `json:"Api,omitempty"`
+ Service *string `json:"Service,omitempty"`
+ Timestamp *metricTime `json:"Timestamp,omitempty"`
+ Type *string `json:"Type,omitempty"`
+ Version *int `json:"Version,omitempty"`
+
+ AttemptCount *int `json:"AttemptCount,omitempty"`
+ Latency *int `json:"Latency,omitempty"`
+
+ Fqdn *string `json:"Fqdn,omitempty"`
+ UserAgent *string `json:"UserAgent,omitempty"`
+ AttemptLatency *int `json:"AttemptLatency,omitempty"`
+
+ SessionToken *string `json:"SessionToken,omitempty"`
+ Region *string `json:"Region,omitempty"`
+ AccessKey *string `json:"AccessKey,omitempty"`
+ HTTPStatusCode *int `json:"HttpStatusCode,omitempty"`
+ XAmzID2 *string `json:"XAmzId2,omitempty"`
+ XAmzRequestID *string `json:"XAmznRequestId,omitempty"`
+
+ AWSException *string `json:"AwsException,omitempty"`
+ AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"`
+ SDKException *string `json:"SdkException,omitempty"`
+ SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
+
+ FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"`
+ FinalAWSException *string `json:"FinalAwsException,omitempty"`
+ FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"`
+ FinalSDKException *string `json:"FinalSdkException,omitempty"`
+ FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"`
+
+ DestinationIP *string `json:"DestinationIp,omitempty"`
+ ConnectionReused *int `json:"ConnectionReused,omitempty"`
+
+ AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"`
+ ConnectLatency *int `json:"ConnectLatency,omitempty"`
+ RequestLatency *int `json:"RequestLatency,omitempty"`
+ DNSLatency *int `json:"DnsLatency,omitempty"`
+ TCPLatency *int `json:"TcpLatency,omitempty"`
+ SSLLatency *int `json:"SslLatency,omitempty"`
+
+ MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
+}
+
+func (m *metric) TruncateFields() {
+ m.ClientID = truncateString(m.ClientID, 255)
+ m.UserAgent = truncateString(m.UserAgent, 256)
+
+ m.AWSException = truncateString(m.AWSException, 128)
+ m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512)
+
+ m.SDKException = truncateString(m.SDKException, 128)
+ m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512)
+
+ m.FinalAWSException = truncateString(m.FinalAWSException, 128)
+ m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512)
+
+ m.FinalSDKException = truncateString(m.FinalSDKException, 128)
+ m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512)
+}
+
+func truncateString(v *string, l int) *string {
+ if v != nil && len(*v) > l {
+ nv := (*v)[:l]
+ return &nv
+ }
+
+ return v
+}
+
+func (m *metric) SetException(e metricException) {
+ switch te := e.(type) {
+ case awsException:
+ m.AWSException = aws.String(te.exception)
+ m.AWSExceptionMessage = aws.String(te.message)
+ case sdkException:
+ m.SDKException = aws.String(te.exception)
+ m.SDKExceptionMessage = aws.String(te.message)
+ }
+}
+
+func (m *metric) SetFinalException(e metricException) {
+ switch te := e.(type) {
+ case awsException:
+ m.FinalAWSException = aws.String(te.exception)
+ m.FinalAWSExceptionMessage = aws.String(te.message)
+ case sdkException:
+ m.FinalSDKException = aws.String(te.exception)
+ m.FinalSDKExceptionMessage = aws.String(te.message)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
new file mode 100755
index 0000000..514fc37
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
@@ -0,0 +1,54 @@
+package csm
+
+import (
+ "sync/atomic"
+)
+
+const (
+ runningEnum = iota
+ pausedEnum
+)
+
+var (
+ // MetricsChannelSize of metrics to hold in the channel
+ MetricsChannelSize = 100
+)
+
+type metricChan struct {
+ ch chan metric
+ paused int64
+}
+
+func newMetricChan(size int) metricChan {
+ return metricChan{
+ ch: make(chan metric, size),
+ }
+}
+
+func (ch *metricChan) Pause() {
+ atomic.StoreInt64(&ch.paused, pausedEnum)
+}
+
+func (ch *metricChan) Continue() {
+ atomic.StoreInt64(&ch.paused, runningEnum)
+}
+
+func (ch *metricChan) IsPaused() bool {
+ v := atomic.LoadInt64(&ch.paused)
+ return v == pausedEnum
+}
+
+// Push will push metrics to the metric channel if the channel
+// is not paused
+func (ch *metricChan) Push(m metric) bool {
+ if ch.IsPaused() {
+ return false
+ }
+
+ select {
+ case ch.ch <- m:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
new file mode 100755
index 0000000..54a9928
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
@@ -0,0 +1,26 @@
+package csm
+
+type metricException interface {
+ Exception() string
+ Message() string
+}
+
+type requestException struct {
+ exception string
+ message string
+}
+
+func (e requestException) Exception() string {
+ return e.exception
+}
+func (e requestException) Message() string {
+ return e.message
+}
+
+type awsException struct {
+ requestException
+}
+
+type sdkException struct {
+ requestException
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
new file mode 100755
index 0000000..0b5571a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
@@ -0,0 +1,260 @@
+package csm
+
+import (
+ "encoding/json"
+ "net"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const (
+ // DefaultPort is used when no port is specified
+ DefaultPort = "31000"
+)
+
+// Reporter will gather metrics of API requests made and
+// send those metrics to the CSM endpoint.
+type Reporter struct {
+ clientID string
+ url string
+ conn net.Conn
+ metricsCh metricChan
+ done chan struct{}
+}
+
+var (
+ sender *Reporter
+)
+
+func connect(url string) error {
+ const network = "udp"
+ if err := sender.connect(network, url); err != nil {
+ return err
+ }
+
+ if sender.done == nil {
+ sender.done = make(chan struct{})
+ go sender.start()
+ }
+
+ return nil
+}
+
+func newReporter(clientID, url string) *Reporter {
+ return &Reporter{
+ clientID: clientID,
+ url: url,
+ metricsCh: newMetricChan(MetricsChannelSize),
+ }
+}
+
+func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
+ if rep == nil {
+ return
+ }
+
+ now := time.Now()
+ creds, _ := r.Config.Credentials.Get()
+
+ m := metric{
+ ClientID: aws.String(rep.clientID),
+ API: aws.String(r.Operation.Name),
+ Service: aws.String(r.ClientInfo.ServiceID),
+ Timestamp: (*metricTime)(&now),
+ UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
+ Region: r.Config.Region,
+ Type: aws.String("ApiCallAttempt"),
+ Version: aws.Int(1),
+
+ XAmzRequestID: aws.String(r.RequestID),
+
+ AttemptCount: aws.Int(r.RetryCount + 1),
+ AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
+ AccessKey: aws.String(creds.AccessKeyID),
+ }
+
+ if r.HTTPResponse != nil {
+ m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
+ }
+
+ if r.Error != nil {
+ if awserr, ok := r.Error.(awserr.Error); ok {
+ m.SetException(getMetricException(awserr))
+ }
+ }
+
+ m.TruncateFields()
+ rep.metricsCh.Push(m)
+}
+
+func getMetricException(err awserr.Error) metricException {
+ msg := err.Error()
+ code := err.Code()
+
+ switch code {
+ case "RequestError",
+ "SerializationError",
+ request.CanceledErrorCode:
+ return sdkException{
+ requestException{exception: code, message: msg},
+ }
+ default:
+ return awsException{
+ requestException{exception: code, message: msg},
+ }
+ }
+}
+
+func (rep *Reporter) sendAPICallMetric(r *request.Request) {
+ if rep == nil {
+ return
+ }
+
+ now := time.Now()
+ m := metric{
+ ClientID: aws.String(rep.clientID),
+ API: aws.String(r.Operation.Name),
+ Service: aws.String(r.ClientInfo.ServiceID),
+ Timestamp: (*metricTime)(&now),
+ UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
+ Type: aws.String("ApiCall"),
+ AttemptCount: aws.Int(r.RetryCount + 1),
+ Region: r.Config.Region,
+ Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
+ XAmzRequestID: aws.String(r.RequestID),
+ MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
+ }
+
+ if r.HTTPResponse != nil {
+ m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
+ }
+
+ if r.Error != nil {
+ if awserr, ok := r.Error.(awserr.Error); ok {
+ m.SetFinalException(getMetricException(awserr))
+ }
+ }
+
+ m.TruncateFields()
+
+ // TODO: Probably want to figure something out for logging dropped
+ // metrics
+ rep.metricsCh.Push(m)
+}
+
+func (rep *Reporter) connect(network, url string) error {
+ if rep.conn != nil {
+ rep.conn.Close()
+ }
+
+ conn, err := net.Dial(network, url)
+ if err != nil {
+ return awserr.New("UDPError", "Could not connect", err)
+ }
+
+ rep.conn = conn
+
+ return nil
+}
+
+func (rep *Reporter) close() {
+ if rep.done != nil {
+ close(rep.done)
+ }
+
+ rep.metricsCh.Pause()
+}
+
+func (rep *Reporter) start() {
+ defer func() {
+ rep.metricsCh.Pause()
+ }()
+
+ for {
+ select {
+ case <-rep.done:
+ rep.done = nil
+ return
+ case m := <-rep.metricsCh.ch:
+ // TODO: What to do with this error? Probably should just log
+ b, err := json.Marshal(m)
+ if err != nil {
+ continue
+ }
+
+ rep.conn.Write(b)
+ }
+ }
+}
+
+// Pause will pause the metric channel preventing any new metrics from
+// being added.
+func (rep *Reporter) Pause() {
+ lock.Lock()
+ defer lock.Unlock()
+
+ if rep == nil {
+ return
+ }
+
+ rep.close()
+}
+
+// Continue will reopen the metric channel and allow for monitoring
+// to be resumed.
+func (rep *Reporter) Continue() {
+ lock.Lock()
+ defer lock.Unlock()
+ if rep == nil {
+ return
+ }
+
+ if !rep.metricsCh.IsPaused() {
+ return
+ }
+
+ rep.metricsCh.Continue()
+}
+
+// InjectHandlers will will enable client side metrics and inject the proper
+// handlers to handle how metrics are sent.
+//
+// Example:
+// // Start must be called in order to inject the correct handlers
+// r, err := csm.Start("clientID", "127.0.0.1:8094")
+// if err != nil {
+// panic(fmt.Errorf("expected no error, but received %v", err))
+// }
+//
+// sess := session.NewSession()
+// r.InjectHandlers(&sess.Handlers)
+//
+// // create a new service client with our client side metric session
+// svc := s3.New(sess)
+func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
+ if rep == nil {
+ return
+ }
+
+ handlers.Complete.PushFrontNamed(request.NamedHandler{
+ Name: APICallMetricHandlerName,
+ Fn: rep.sendAPICallMetric,
+ })
+
+ handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{
+ Name: APICallAttemptMetricHandlerName,
+ Fn: rep.sendAPICallAttemptMetric,
+ })
+}
+
+// boolIntValue return 1 for true and 0 for false.
+func boolIntValue(b bool) int {
+ if b {
+ return 1
+ }
+
+ return 0
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
new file mode 100755
index 0000000..23bb639
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -0,0 +1,207 @@
+// Package defaults is a collection of helpers to retrieve the SDK's default
+// configuration and handlers.
+//
+// Generally this package shouldn't be used directly, but session.Session
+// instead. This package is useful when you need to reset the defaults
+// of a session or service client to the SDK defaults before setting
+// additional parameters.
+package defaults
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+// A Defaults provides a collection of default values for SDK clients.
+type Defaults struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// Get returns the SDK's default values with Config and handlers pre-configured.
+func Get() Defaults {
+ cfg := Config()
+ handlers := Handlers()
+ cfg.Credentials = CredChain(cfg, handlers)
+
+ return Defaults{
+ Config: cfg,
+ Handlers: handlers,
+ }
+}
+
+// Config returns the default configuration without credentials.
+// To retrieve a config with credentials also included use
+// `defaults.Get().Config` instead.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the configuration of an
+// existing service client or session.
+func Config() *aws.Config {
+ return aws.NewConfig().
+ WithCredentials(credentials.AnonymousCredentials).
+ WithRegion(os.Getenv("AWS_REGION")).
+ WithHTTPClient(http.DefaultClient).
+ WithMaxRetries(aws.UseServiceDefaultRetries).
+ WithLogger(aws.NewDefaultLogger()).
+ WithLogLevel(aws.LogOff).
+ WithEndpointResolver(endpoints.DefaultResolver())
+}
+
+// Handlers returns the default request handlers.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the request handlers of an
+// existing service client or session.
+func Handlers() request.Handlers {
+ var handlers request.Handlers
+
+ handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
+ handlers.Validate.AfterEachFn = request.HandlerListStopOnError
+ handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
+ handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
+ handlers.Build.AfterEachFn = request.HandlerListStopOnError
+ handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
+ handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
+ handlers.Send.PushBackNamed(corehandlers.SendHandler)
+ handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
+ handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
+
+ return handlers
+}
+
+// CredChain returns the default credential chain.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the credentials of an
+// existing service client or session's Config.
+func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
+ return credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: CredProviders(cfg, handlers),
+ })
+}
+
+// CredProviders returns the slice of providers used in
+// the default credential chain.
+//
+// For applications that need to use some other provider (for example use
+// different environment variables for legacy reasons) but still fall back
+// on the default chain of providers. This allows that default chaint to be
+// automatically updated
+func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
+ return []credentials.Provider{
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+ RemoteCredProvider(*cfg, handlers),
+ }
+}
+
+const (
+ httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
+ httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
+)
+
+// RemoteCredProvider returns a credentials provider for the default remote
+// endpoints such as EC2 or ECS Roles.
+func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
+ if u := os.Getenv(httpProviderEnvVar); len(u) > 0 {
+ return localHTTPCredProvider(cfg, handlers, u)
+ }
+
+ if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 {
+ u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri)
+ return httpCredProvider(cfg, handlers, u)
+ }
+
+ return ec2RoleProvider(cfg, handlers)
+}
+
+var lookupHostFn = net.LookupHost
+
+func isLoopbackHost(host string) (bool, error) {
+ ip := net.ParseIP(host)
+ if ip != nil {
+ return ip.IsLoopback(), nil
+ }
+
+ // Host is not an ip, perform lookup
+ addrs, err := lookupHostFn(host)
+ if err != nil {
+ return false, err
+ }
+ for _, addr := range addrs {
+ if !net.ParseIP(addr).IsLoopback() {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ var errMsg string
+
+ parsed, err := url.Parse(u)
+ if err != nil {
+ errMsg = fmt.Sprintf("invalid URL, %v", err)
+ } else {
+ host := aws.URLHostname(parsed)
+ if len(host) == 0 {
+ errMsg = "unable to parse host from local HTTP cred provider URL"
+ } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
+ errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
+ } else if !isLoopback {
+ errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
+ }
+ }
+
+ if len(errMsg) > 0 {
+ if cfg.Logger != nil {
+ cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
+ }
+ return credentials.ErrorProvider{
+ Err: awserr.New("CredentialsEndpointError", errMsg, err),
+ ProviderName: endpointcreds.ProviderName,
+ }
+ }
+
+ return httpCredProvider(cfg, handlers, u)
+}
+
+func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ return endpointcreds.NewProviderClient(cfg, handlers, u,
+ func(p *endpointcreds.Provider) {
+ p.ExpiryWindow = 5 * time.Minute
+ p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
+ },
+ )
+}
+
+func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
+ resolver := cfg.EndpointResolver
+ if resolver == nil {
+ resolver = endpoints.DefaultResolver()
+ }
+
+ e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "")
+ return &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion),
+ ExpiryWindow: 5 * time.Minute,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
new file mode 100755
index 0000000..ca0ee1d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
@@ -0,0 +1,27 @@
+package defaults
+
+import (
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return shareddefaults.SharedCredentialsFilename()
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return shareddefaults.SharedConfigFilename()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go
new file mode 100755
index 0000000..4fcb616
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go
@@ -0,0 +1,56 @@
+// Package aws provides the core SDK's utilities and shared types. Use this package's
+// utilities to simplify setting and reading API operations parameters.
+//
+// Value and Pointer Conversion Utilities
+//
+// This package includes a helper conversion utility for each scalar type the SDK's
+// API use. These utilities make getting a pointer of the scalar, and dereferencing
+// a pointer easier.
+//
+// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
+// The Pointer to value will safely dereference the pointer and return its value.
+// If the pointer was nil, the scalar's zero value will be returned.
+//
+// The value to pointer functions will be named after the scalar type. So get a
+// *string from a string value use the "String" function. This makes it easy to
+// to get pointer of a literal string value, because getting the address of a
+// literal requires assigning the value to a variable first.
+//
+// var strPtr *string
+//
+// // Without the SDK's conversion functions
+// str := "my string"
+// strPtr = &str
+//
+// // With the SDK's conversion functions
+// strPtr = aws.String("my string")
+//
+// // Convert *string to string value
+// str = aws.StringValue(strPtr)
+//
+// In addition to scalars the aws package also includes conversion utilities for
+// map and slice for commonly types used in API parameters. The map and slice
+// conversion functions use similar naming pattern as the scalar conversion
+// functions.
+//
+// var strPtrs []*string
+// var strs []string = []string{"Go", "Gophers", "Go"}
+//
+// // Convert []string to []*string
+// strPtrs = aws.StringSlice(strs)
+//
+// // Convert []*string to []string
+// strs = aws.StringValueSlice(strPtrs)
+//
+// SDK Default HTTP Client
+//
+// The SDK will use the http.DefaultClient if a HTTP client is not provided to
+// the SDK's Session, or service client constructor. This means that if the
+// http.DefaultClient is modified by other components of your application the
+// modifications will be picked up by the SDK as well.
+//
+// In some cases this might be intended, but it is a better practice to create
+// a custom HTTP Client to share explicitly through your application. You can
+// configure the SDK to use the custom HTTP Client by setting the HTTPClient
+// value of the SDK's Config type when creating a Session or service client.
+package aws
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
new file mode 100755
index 0000000..d57a1af
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -0,0 +1,169 @@
+package ec2metadata
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkuri"
+)
+
+// GetMetadata uses the path provided to request information from the EC2
+// instance metdata service. The content will be returned as a string, or
+// error if the request failed.
+func (c *EC2Metadata) GetMetadata(p string) (string, error) {
+ op := &request.Operation{
+ Name: "GetMetadata",
+ HTTPMethod: "GET",
+ HTTPPath: sdkuri.PathJoin("/meta-data", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ err := req.Send()
+
+ return output.Content, err
+}
+
+// GetUserData returns the userdata that was configured for the service. If
+// there is no user-data setup for the EC2 instance a "NotFoundError" error
+// code will be returned.
+func (c *EC2Metadata) GetUserData() (string, error) {
+ op := &request.Operation{
+ Name: "GetUserData",
+ HTTPMethod: "GET",
+ HTTPPath: "/user-data",
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
+ if r.HTTPResponse.StatusCode == http.StatusNotFound {
+ r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
+ }
+ })
+ err := req.Send()
+
+ return output.Content, err
+}
+
+// GetDynamicData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
+ op := &request.Operation{
+ Name: "GetDynamicData",
+ HTTPMethod: "GET",
+ HTTPPath: sdkuri.PathJoin("/dynamic", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ err := req.Send()
+
+ return output.Content, err
+}
+
+// GetInstanceIdentityDocument retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
+ resp, err := c.GetDynamicData("instance-identity/document")
+ if err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 instance identity document", err)
+ }
+
+ doc := EC2InstanceIdentityDocument{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New("SerializationError",
+ "failed to decode EC2 instance identity document", err)
+ }
+
+ return doc, nil
+}
+
+// IAMInfo retrieves IAM info from the metadata API
+func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
+ resp, err := c.GetMetadata("iam/info")
+ if err != nil {
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 IAM info", err)
+ }
+
+ info := EC2IAMInfo{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
+ return EC2IAMInfo{},
+ awserr.New("SerializationError",
+ "failed to decode EC2 IAM info", err)
+ }
+
+ if info.Code != "Success" {
+ errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataError", errMsg, nil)
+ }
+
+ return info, nil
+}
+
+// Region returns the region the instance is running in.
+func (c *EC2Metadata) Region() (string, error) {
+ resp, err := c.GetMetadata("placement/availability-zone")
+ if err != nil {
+ return "", err
+ }
+
+ if len(resp) == 0 {
+ return "", awserr.New("EC2MetadataError", "invalid Region response", nil)
+ }
+
+ // returns region without the suffix. Eg: us-west-2a becomes us-west-2
+ return resp[:len(resp)-1], nil
+}
+
+// Available returns if the application has access to the EC2 Metadata service.
+// Can be used to determine if application is running within an EC2 Instance and
+// the metadata service is available.
+func (c *EC2Metadata) Available() bool {
+ if _, err := c.GetMetadata("instance-id"); err != nil {
+ return false
+ }
+
+ return true
+}
+
+// An EC2IAMInfo provides the shape for unmarshaling
+// an IAM info from the metadata API
+type EC2IAMInfo struct {
+ Code string
+ LastUpdated time.Time
+ InstanceProfileArn string
+ InstanceProfileID string
+}
+
+// An EC2InstanceIdentityDocument provides the shape for unmarshaling
+// an instance identity document
+type EC2InstanceIdentityDocument struct {
+ DevpayProductCodes []string `json:"devpayProductCodes"`
+ AvailabilityZone string `json:"availabilityZone"`
+ PrivateIP string `json:"privateIp"`
+ Version string `json:"version"`
+ Region string `json:"region"`
+ InstanceID string `json:"instanceId"`
+ BillingProducts []string `json:"billingProducts"`
+ InstanceType string `json:"instanceType"`
+ AccountID string `json:"accountId"`
+ PendingTime time.Time `json:"pendingTime"`
+ ImageID string `json:"imageId"`
+ KernelID string `json:"kernelId"`
+ RamdiskID string `json:"ramdiskId"`
+ Architecture string `json:"architecture"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
new file mode 100755
index 0000000..f4438ea
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -0,0 +1,152 @@
+// Package ec2metadata provides the client for making API calls to the
+// EC2 Metadata service.
+//
+// This package's client can be disabled completely by setting the environment
+// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
+// true instructs the SDK to disable the EC2 Metadata client. The client cannot
+// be used while the environment variable is set to true, (case insensitive).
+package ec2metadata
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ServiceName is the name of the service.
+const ServiceName = "ec2metadata"
+const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
+
+// A EC2Metadata is an EC2 Metadata service Client.
+type EC2Metadata struct {
+ *client.Client
+}
+
+// New creates a new instance of the EC2Metadata client with a session.
+// This client is safe to use across multiple goroutines.
+//
+//
+// Example:
+// // Create a EC2Metadata client from just a session.
+// svc := ec2metadata.New(mySession)
+//
+// // Create a EC2Metadata client with additional configuration
+// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
+ c := p.ClientConfig(ServiceName, cfgs...)
+ return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// NewClient returns a new EC2Metadata client. Should be used to create
+// a client when not using a session. Generally using just New with a session
+// is preferred.
+//
+// If an unmodified HTTP client is provided from the stdlib default, or no client
+// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
+// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
+func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
+ if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
+ // If the http client is unmodified and this feature is not disabled
+ // set custom timeouts for EC2Metadata requests.
+ cfg.HTTPClient = &http.Client{
+ // use a shorter timeout than default because the metadata
+ // service is local if it is running, and to fail faster
+ // if not running on an ec2 instance.
+ Timeout: 5 * time.Second,
+ }
+ }
+
+ svc := &EC2Metadata{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceName,
+ Endpoint: endpoint,
+ APIVersion: "latest",
+ },
+ handlers,
+ ),
+ }
+
+ svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ svc.Handlers.UnmarshalError.PushBack(unmarshalError)
+ svc.Handlers.Validate.Clear()
+ svc.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ // Disable the EC2 Metadata service if the environment variable is set.
+ // This shortcirctes the service's functionality to always fail to send
+ // requests.
+ if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
+ svc.Handlers.Send.SwapNamed(request.NamedHandler{
+ Name: corehandlers.SendHandler.Name,
+ Fn: func(r *request.Request) {
+ r.HTTPResponse = &http.Response{
+ Header: http.Header{},
+ }
+ r.Error = awserr.New(
+ request.CanceledErrorCode,
+ "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
+ nil)
+ },
+ })
+ }
+
+ // Add additional options to the service config
+ for _, option := range opts {
+ option(svc.Client)
+ }
+
+ return svc
+}
+
+func httpClientZero(c *http.Client) bool {
+ return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
+}
+
+type metadataOutput struct {
+ Content string
+}
+
+func unmarshalHandler(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
+ return
+ }
+
+ if data, ok := r.Data.(*metadataOutput); ok {
+ data.Content = b.String()
+ }
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
+ return
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
+}
+
+func validateEndpointHandler(r *request.Request) {
+ if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
new file mode 100755
index 0000000..87b9ff3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
@@ -0,0 +1,188 @@
+package endpoints
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+type modelDefinition map[string]json.RawMessage
+
+// A DecodeModelOptions are the options for how the endpoints model definition
+// are decoded.
+type DecodeModelOptions struct {
+ SkipCustomizations bool
+}
+
+// Set combines all of the option functions together.
+func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// DecodeModel unmarshals a Regions and Endpoint model definition file into
+// a endpoint Resolver. If the file format is not supported, or an error occurs
+// when unmarshaling the model an error will be returned.
+//
+// Casting the return value of this func to a EnumPartitions will
+// allow you to get a list of the partitions in the order the endpoints
+// will be resolved in.
+//
+// resolver, err := endpoints.DecodeModel(reader)
+//
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
+ var opts DecodeModelOptions
+ opts.Set(optFns...)
+
+ // Get the version of the partition file to determine what
+ // unmarshaling model to use.
+ modelDef := modelDefinition{}
+ if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ var version string
+ if b, ok := modelDef["version"]; ok {
+ version = string(b)
+ } else {
+ return nil, newDecodeModelError("endpoints version not found in model", nil)
+ }
+
+ if version == "3" {
+ return decodeV3Endpoints(modelDef, opts)
+ }
+
+ return nil, newDecodeModelError(
+ fmt.Sprintf("endpoints version %s, not supported", version), nil)
+}
+
+func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
+ b, ok := modelDef["partitions"]
+ if !ok {
+ return nil, newDecodeModelError("endpoints model missing partitions", nil)
+ }
+
+ ps := partitions{}
+ if err := json.Unmarshal(b, &ps); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ if opts.SkipCustomizations {
+ return ps, nil
+ }
+
+ // Customization
+ for i := 0; i < len(ps); i++ {
+ p := &ps[i]
+ custAddEC2Metadata(p)
+ custAddS3DualStack(p)
+ custRmIotDataService(p)
+ custFixAppAutoscalingChina(p)
+ custFixAppAutoscalingUsGov(p)
+ }
+
+ return ps, nil
+}
+
+func custAddS3DualStack(p *partition) {
+ if p.ID != "aws" {
+ return
+ }
+
+ custAddDualstack(p, "s3")
+ custAddDualstack(p, "s3-control")
+}
+
+func custAddDualstack(p *partition, svcName string) {
+ s, ok := p.Services[svcName]
+ if !ok {
+ return
+ }
+
+ s.Defaults.HasDualStack = boxedTrue
+ s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
+
+ p.Services[svcName] = s
+}
+
+func custAddEC2Metadata(p *partition) {
+ p.Services["ec2metadata"] = service{
+ IsRegionalized: boxedFalse,
+ PartitionEndpoint: "aws-global",
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ }
+}
+
+func custRmIotDataService(p *partition) {
+ delete(p.Services, "data.iot")
+}
+
+func custFixAppAutoscalingChina(p *partition) {
+ if p.ID != "aws-cn" {
+ return
+ }
+
+ const serviceName = "application-autoscaling"
+ s, ok := p.Services[serviceName]
+ if !ok {
+ return
+ }
+
+ const expectHostname = `autoscaling.{region}.amazonaws.com`
+ if e, a := s.Defaults.Hostname, expectHostname; e != a {
+ fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a)
+ return
+ }
+
+ s.Defaults.Hostname = expectHostname + ".cn"
+ p.Services[serviceName] = s
+}
+
+func custFixAppAutoscalingUsGov(p *partition) {
+ if p.ID != "aws-us-gov" {
+ return
+ }
+
+ const serviceName = "application-autoscaling"
+ s, ok := p.Services[serviceName]
+ if !ok {
+ return
+ }
+
+ if a := s.Defaults.CredentialScope.Service; a != "" {
+ fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a)
+ return
+ }
+
+ if a := s.Defaults.Hostname; a != "" {
+ fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a)
+ return
+ }
+
+ s.Defaults.CredentialScope.Service = "application-autoscaling"
+ s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com"
+
+ p.Services[serviceName] = s
+}
+
+type decodeModelError struct {
+ awsError
+}
+
+func newDecodeModelError(msg string, err error) decodeModelError {
+ return decodeModelError{
+ awsError: awserr.New("DecodeEndpointsModelError", msg, err),
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
new file mode 100755
index 0000000..0aacb55
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -0,0 +1,4278 @@
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+// Partition identifiers
+const (
+ AwsPartitionID = "aws" // AWS Standard partition.
+ AwsCnPartitionID = "aws-cn" // AWS China partition.
+ AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition.
+)
+
+// AWS Standard partition's regions.
+const (
+ ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong).
+ ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo).
+ ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
+ ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
+ ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
+ ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
+ CaCentral1RegionID = "ca-central-1" // Canada (Central).
+ EuCentral1RegionID = "eu-central-1" // EU (Frankfurt).
+ EuNorth1RegionID = "eu-north-1" // EU (Stockholm).
+ EuWest1RegionID = "eu-west-1" // EU (Ireland).
+ EuWest2RegionID = "eu-west-2" // EU (London).
+ EuWest3RegionID = "eu-west-3" // EU (Paris).
+ SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
+ UsEast1RegionID = "us-east-1" // US East (N. Virginia).
+ UsEast2RegionID = "us-east-2" // US East (Ohio).
+ UsWest1RegionID = "us-west-1" // US West (N. California).
+ UsWest2RegionID = "us-west-2" // US West (Oregon).
+)
+
+// AWS China partition's regions.
+const (
+ CnNorth1RegionID = "cn-north-1" // China (Beijing).
+ CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia).
+)
+
+// AWS GovCloud (US) partition's regions.
+const (
+ UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East).
+ UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US).
+)
+
+// DefaultResolver returns an Endpoint resolver that will be able
+// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US).
+//
+// Use DefaultPartitions() to get the list of the default partitions.
+func DefaultResolver() Resolver {
+ return defaultPartitions
+}
+
+// DefaultPartitions returns a list of the partitions the SDK is bundled
+// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US).
+//
+// partitions := endpoints.DefaultPartitions
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
+}
+
+var defaultPartitions = partitions{
+ awsPartition,
+ awscnPartition,
+ awsusgovPartition,
+}
+
+// AwsPartition returns the Resolver for AWS Standard.
+func AwsPartition() Partition {
+ return awsPartition.Partition()
+}
+
+var awsPartition = partition{
+ ID: "aws",
+ Name: "AWS Standard",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "ap-east-1": region{
+ Description: "Asia Pacific (Hong Kong)",
+ },
+ "ap-northeast-1": region{
+ Description: "Asia Pacific (Tokyo)",
+ },
+ "ap-northeast-2": region{
+ Description: "Asia Pacific (Seoul)",
+ },
+ "ap-south-1": region{
+ Description: "Asia Pacific (Mumbai)",
+ },
+ "ap-southeast-1": region{
+ Description: "Asia Pacific (Singapore)",
+ },
+ "ap-southeast-2": region{
+ Description: "Asia Pacific (Sydney)",
+ },
+ "ca-central-1": region{
+ Description: "Canada (Central)",
+ },
+ "eu-central-1": region{
+ Description: "EU (Frankfurt)",
+ },
+ "eu-north-1": region{
+ Description: "EU (Stockholm)",
+ },
+ "eu-west-1": region{
+ Description: "EU (Ireland)",
+ },
+ "eu-west-2": region{
+ Description: "EU (London)",
+ },
+ "eu-west-3": region{
+ Description: "EU (Paris)",
+ },
+ "sa-east-1": region{
+ Description: "South America (Sao Paulo)",
+ },
+ "us-east-1": region{
+ Description: "US East (N. Virginia)",
+ },
+ "us-east-2": region{
+ Description: "US East (Ohio)",
+ },
+ "us-west-1": region{
+ Description: "US West (N. California)",
+ },
+ "us-west-2": region{
+ Description: "US West (Oregon)",
+ },
+ },
+ Services: services{
+ "a4b": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "acm": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "acm-pca": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{
+ Hostname: "api.ecr.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ "ap-northeast-1": endpoint{
+ Hostname: "api.ecr.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "api.ecr.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "api.ecr.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "api.ecr.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "api.ecr.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "ca-central-1": endpoint{
+ Hostname: "api.ecr.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "api.ecr.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-north-1": endpoint{
+ Hostname: "api.ecr.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "api.ecr.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "api.ecr.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "eu-west-3": endpoint{
+ Hostname: "api.ecr.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "api.ecr.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "api.ecr.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "api.ecr.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{
+ Hostname: "api.ecr.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "api.ecr.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "api.mediatailor": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "api.pricing": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "pricing",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-south-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "api.sagemaker": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appstream2": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Service: "appstream",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appsync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "athena": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "autoscaling-plans": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "autoscaling-plans",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "batch": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "budgets": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "budgets.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "ce": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "ce.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "chime": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Defaults: endpoint{
+ SSLCommonName: "service.chime.aws.amazon.com",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "service.chime.aws.amazon.com",
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloud9": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "clouddirectory": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudfront": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "cloudfront.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudhsmv2": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "cloudhsm",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudsearch": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codebuild": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "codebuild-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "codebuild-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "codebuild-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "codebuild-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "codecommit": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips": endpoint{
+ Hostname: "codecommit-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "codedeploy-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "codedeploy-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "codepipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codestar": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-identity": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-idp": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-sync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "comprehend": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "comprehendmedical": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cur": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "datapipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "datasync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "dax": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "devicefarm": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "discovery": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "docdb": service{
+
+ Endpoints: endpoints{
+ "eu-central-1": endpoint{
+ Hostname: "rds.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "rds.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "rds.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "rds.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "rds.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips": endpoint{
+ Hostname: "elasticache-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticfilesystem": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.{service}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elastictranscoder": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "email": service{
+
+ Endpoints: endpoints{
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "entitlement.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips": endpoint{
+ Hostname: "es-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "fms": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "fsx": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "gamelift": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "glue": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "greengrass": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "health": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "iam.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "importexport": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "importexport.amazonaws.com",
+ SignatureVersions: []string{"v2", "v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ Service: "IngestionService",
+ },
+ },
+ },
+ },
+ "inspector": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iotanalytics": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesisanalytics": service{
+
+ Endpoints: endpoints{
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesisvideo": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ProdFips": endpoint{
+ Hostname: "kms-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "license-manager": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lightsail": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "machinelearning": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "marketplacecommerceanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "mediaconnect": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediaconvert": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "medialive": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediapackage": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediastore": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mgh": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "mobileanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "models.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mq": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mturk-requester": service{
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "sandbox": endpoint{
+ Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
+ },
+ "us-east-1": endpoint{},
+ },
+ },
+ "neptune": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "rds.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "rds.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "rds.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "rds.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "rds.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "rds.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "rds.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "rds.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "rds.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "rds.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "opsworks": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "opsworks-cm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "organizations": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "organizations.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "pinpoint": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "mobiletargeting",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rekognition": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "resource-groups": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "robomaker": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "route53.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "route53domains": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "route53resolver": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "runtime.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "runtime.sagemaker": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "s3": service{
+ PartitionEndpoint: "us-east-1",
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{
+ Hostname: "s3.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{
+ Hostname: "s3.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "s3.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{
+ Hostname: "s3.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "s3-external-1": endpoint{
+ Hostname: "s3-external-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "s3.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-1": endpoint{
+ Hostname: "s3.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{
+ Hostname: "s3.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-west-2": endpoint{
+ Hostname: "s3.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "s3-control.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "s3-control.ap-northeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "s3-control.ap-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "s3-control.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "s3-control.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "ca-central-1": endpoint{
+ Hostname: "s3-control.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "s3-control.eu-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-north-1": endpoint{
+ Hostname: "s3-control.eu-north-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "s3-control.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "s3-control.eu-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "eu-west-3": endpoint{
+ Hostname: "s3-control.eu-west-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "s3-control.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "s3-control.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "s3-control.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-east-2-fips": endpoint{
+ Hostname: "s3-control-fips.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{
+ Hostname: "s3-control.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "s3-control.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-west-2-fips": endpoint{
+ Hostname: "s3-control-fips.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "sdb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v2"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ Hostname: "sdb.amazonaws.com",
+ },
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "secretsmanager": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "securityhub": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "serverlessrepo": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-northeast-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-south-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-southeast-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-southeast-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ca-central-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-central-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-north-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-3": endpoint{
+ Protocols: []string{"https"},
+ },
+ "sa-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-east-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-west-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "servicecatalog": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "servicediscovery": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "shield": service{
+ IsRegionalized: boxedFalse,
+ Defaults: endpoint{
+ SSLCommonName: "shield.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "sqs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "sqs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "sqs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "sqs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "queue.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sts": service{
+ PartitionEndpoint: "aws-global",
+ Defaults: endpoint{
+ Hostname: "sts.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{
+ Hostname: "sts.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{
+ Hostname: "sts.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "aws-global": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "sts-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "sts-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "sts-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "sts-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "support": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "transfer": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "translate-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "translate-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "translate-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "waf": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "waf.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "waf-regional": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workdocs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workmail": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workspaces": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "xray": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsCnPartition returns the Resolver for AWS China.
+func AwsCnPartition() Partition {
+ return awscnPartition.Partition()
+}
+
+var awscnPartition = partition{
+ ID: "aws-cn",
+ Name: "AWS China",
+ DNSSuffix: "amazonaws.com.cn",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "cn-north-1": region{
+ Description: "China (Beijing)",
+ },
+ "cn-northwest-1": region{
+ Description: "China (Ningxia)",
+ },
+ },
+ Services: services{
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Hostname: "api.ecr.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "cn-northwest-1": endpoint{
+ Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cloudfront": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "codebuild": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cognito-identity": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "gamelift": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "iam.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "mediaconvert": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "s3-control": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Hostname: "s3-control.cn-north-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "cn-northwest-1": endpoint{
+ Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsUsGovPartition returns the Resolver for AWS GovCloud (US).
+func AwsUsGovPartition() Partition {
+ return awsusgovPartition.Partition()
+}
+
+var awsusgovPartition = partition{
+ ID: "aws-us-gov",
+ Name: "AWS GovCloud (US)",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "us-gov-east-1": region{
+ Description: "AWS GovCloud (US-East)",
+ },
+ "us-gov-west-1": region{
+ Description: "AWS GovCloud (US)",
+ },
+ },
+ Services: services{
+ "acm": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Hostname: "api.ecr.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "api.ecr.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "api.sagemaker": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "athena": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "autoscaling": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "clouddirectory": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudhsmv2": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "cloudhsm",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "codecommit": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "comprehend": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "ec2": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticfilesystem": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "elasticmapreduce": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "es-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "glacier": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "glue": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-us-gov-global": endpoint{
+ Hostname: "iam.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "inspector": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ProdFips": endpoint{
+ Hostname: "kms-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "license-manager": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "mediaconvert": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "organizations": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-us-gov-global": endpoint{
+ Hostname: "organizations.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "rekognition": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "runtime.sagemaker": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "s3-fips-us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{
+ Hostname: "s3.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "s3.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Hostname: "s3-control.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "s3-control.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "sns": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "sqs": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "waf-regional": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "workspaces": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
new file mode 100755
index 0000000..000dd79
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
@@ -0,0 +1,141 @@
+package endpoints
+
+// Service identifiers
+//
+// Deprecated: Use client package's EndpointID value instead of these
+// ServiceIDs. These IDs are not maintained, and are out of date.
+const (
+ A4bServiceID = "a4b" // A4b.
+ AcmServiceID = "acm" // Acm.
+ AcmPcaServiceID = "acm-pca" // AcmPca.
+ ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
+ ApiPricingServiceID = "api.pricing" // ApiPricing.
+ ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
+ ApigatewayServiceID = "apigateway" // Apigateway.
+ ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
+ Appstream2ServiceID = "appstream2" // Appstream2.
+ AppsyncServiceID = "appsync" // Appsync.
+ AthenaServiceID = "athena" // Athena.
+ AutoscalingServiceID = "autoscaling" // Autoscaling.
+ AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
+ BatchServiceID = "batch" // Batch.
+ BudgetsServiceID = "budgets" // Budgets.
+ CeServiceID = "ce" // Ce.
+ ChimeServiceID = "chime" // Chime.
+ Cloud9ServiceID = "cloud9" // Cloud9.
+ ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
+ CloudformationServiceID = "cloudformation" // Cloudformation.
+ CloudfrontServiceID = "cloudfront" // Cloudfront.
+ CloudhsmServiceID = "cloudhsm" // Cloudhsm.
+ Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
+ CloudsearchServiceID = "cloudsearch" // Cloudsearch.
+ CloudtrailServiceID = "cloudtrail" // Cloudtrail.
+ CodebuildServiceID = "codebuild" // Codebuild.
+ CodecommitServiceID = "codecommit" // Codecommit.
+ CodedeployServiceID = "codedeploy" // Codedeploy.
+ CodepipelineServiceID = "codepipeline" // Codepipeline.
+ CodestarServiceID = "codestar" // Codestar.
+ CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
+ CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
+ CognitoSyncServiceID = "cognito-sync" // CognitoSync.
+ ComprehendServiceID = "comprehend" // Comprehend.
+ ConfigServiceID = "config" // Config.
+ CurServiceID = "cur" // Cur.
+ DatapipelineServiceID = "datapipeline" // Datapipeline.
+ DaxServiceID = "dax" // Dax.
+ DevicefarmServiceID = "devicefarm" // Devicefarm.
+ DirectconnectServiceID = "directconnect" // Directconnect.
+ DiscoveryServiceID = "discovery" // Discovery.
+ DmsServiceID = "dms" // Dms.
+ DsServiceID = "ds" // Ds.
+ DynamodbServiceID = "dynamodb" // Dynamodb.
+ Ec2ServiceID = "ec2" // Ec2.
+ Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
+ EcrServiceID = "ecr" // Ecr.
+ EcsServiceID = "ecs" // Ecs.
+ ElasticacheServiceID = "elasticache" // Elasticache.
+ ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
+ ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
+ ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
+ ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
+ ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
+ EmailServiceID = "email" // Email.
+ EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
+ EsServiceID = "es" // Es.
+ EventsServiceID = "events" // Events.
+ FirehoseServiceID = "firehose" // Firehose.
+ FmsServiceID = "fms" // Fms.
+ GameliftServiceID = "gamelift" // Gamelift.
+ GlacierServiceID = "glacier" // Glacier.
+ GlueServiceID = "glue" // Glue.
+ GreengrassServiceID = "greengrass" // Greengrass.
+ GuarddutyServiceID = "guardduty" // Guardduty.
+ HealthServiceID = "health" // Health.
+ IamServiceID = "iam" // Iam.
+ ImportexportServiceID = "importexport" // Importexport.
+ InspectorServiceID = "inspector" // Inspector.
+ IotServiceID = "iot" // Iot.
+ IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
+ KinesisServiceID = "kinesis" // Kinesis.
+ KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
+ KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
+ KmsServiceID = "kms" // Kms.
+ LambdaServiceID = "lambda" // Lambda.
+ LightsailServiceID = "lightsail" // Lightsail.
+ LogsServiceID = "logs" // Logs.
+ MachinelearningServiceID = "machinelearning" // Machinelearning.
+ MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
+ MediaconvertServiceID = "mediaconvert" // Mediaconvert.
+ MedialiveServiceID = "medialive" // Medialive.
+ MediapackageServiceID = "mediapackage" // Mediapackage.
+ MediastoreServiceID = "mediastore" // Mediastore.
+ MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
+ MghServiceID = "mgh" // Mgh.
+ MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
+ ModelsLexServiceID = "models.lex" // ModelsLex.
+ MonitoringServiceID = "monitoring" // Monitoring.
+ MturkRequesterServiceID = "mturk-requester" // MturkRequester.
+ NeptuneServiceID = "neptune" // Neptune.
+ OpsworksServiceID = "opsworks" // Opsworks.
+ OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
+ OrganizationsServiceID = "organizations" // Organizations.
+ PinpointServiceID = "pinpoint" // Pinpoint.
+ PollyServiceID = "polly" // Polly.
+ RdsServiceID = "rds" // Rds.
+ RedshiftServiceID = "redshift" // Redshift.
+ RekognitionServiceID = "rekognition" // Rekognition.
+ ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
+ Route53ServiceID = "route53" // Route53.
+ Route53domainsServiceID = "route53domains" // Route53domains.
+ RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
+ RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
+ S3ServiceID = "s3" // S3.
+ S3ControlServiceID = "s3-control" // S3Control.
+ SagemakerServiceID = "api.sagemaker" // Sagemaker.
+ SdbServiceID = "sdb" // Sdb.
+ SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
+ ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
+ ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
+ ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
+ ShieldServiceID = "shield" // Shield.
+ SmsServiceID = "sms" // Sms.
+ SnowballServiceID = "snowball" // Snowball.
+ SnsServiceID = "sns" // Sns.
+ SqsServiceID = "sqs" // Sqs.
+ SsmServiceID = "ssm" // Ssm.
+ StatesServiceID = "states" // States.
+ StoragegatewayServiceID = "storagegateway" // Storagegateway.
+ StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
+ StsServiceID = "sts" // Sts.
+ SupportServiceID = "support" // Support.
+ SwfServiceID = "swf" // Swf.
+ TaggingServiceID = "tagging" // Tagging.
+ TransferServiceID = "transfer" // Transfer.
+ TranslateServiceID = "translate" // Translate.
+ WafServiceID = "waf" // Waf.
+ WafRegionalServiceID = "waf-regional" // WafRegional.
+ WorkdocsServiceID = "workdocs" // Workdocs.
+ WorkmailServiceID = "workmail" // Workmail.
+ WorkspacesServiceID = "workspaces" // Workspaces.
+ XrayServiceID = "xray" // Xray.
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
new file mode 100755
index 0000000..84316b9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
@@ -0,0 +1,66 @@
+// Package endpoints provides the types and functionality for defining regions
+// and endpoints, as well as querying those definitions.
+//
+// The SDK's Regions and Endpoints metadata is code generated into the endpoints
+// package, and is accessible via the DefaultResolver function. This function
+// returns a endpoint Resolver will search the metadata and build an associated
+// endpoint if one is found. The default resolver will search all partitions
+// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
+// AWS GovCloud (US) (aws-us-gov).
+// .
+//
+// Enumerating Regions and Endpoint Metadata
+//
+// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
+// will allow you to get access to the list of underlying Partitions with the
+// Partitions method. This is helpful if you want to limit the SDK's endpoint
+// resolving to a single partition, or enumerate regions, services, and endpoints
+// in the partition.
+//
+// resolver := endpoints.DefaultResolver()
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+//
+// for _, p := range partitions {
+// fmt.Println("Regions for", p.ID())
+// for id, _ := range p.Regions() {
+// fmt.Println("*", id)
+// }
+//
+// fmt.Println("Services for", p.ID())
+// for id, _ := range p.Services() {
+// fmt.Println("*", id)
+// }
+// }
+//
+// Using Custom Endpoints
+//
+// The endpoints package also gives you the ability to use your own logic how
+// endpoints are resolved. This is a great way to define a custom endpoint
+// for select services, without passing that logic down through your code.
+//
+// If a type implements the Resolver interface it can be used to resolve
+// endpoints. To use this with the SDK's Session and Config set the value
+// of the type to the EndpointsResolver field of aws.Config when initializing
+// the session, or service client.
+//
+// In addition the ResolverFunc is a wrapper for a func matching the signature
+// of Resolver.EndpointFor, converting it to a type that satisfies the
+// Resolver interface.
+//
+//
+// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
+// if service == endpoints.S3ServiceID {
+// return endpoints.ResolvedEndpoint{
+// URL: "s3.custom.endpoint.com",
+// SigningRegion: "custom-signing-region",
+// }, nil
+// }
+//
+// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
+// }
+//
+// sess := session.Must(session.NewSession(&aws.Config{
+// Region: aws.String("us-west-2"),
+// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
+// }))
+package endpoints
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
new file mode 100755
index 0000000..f82babf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
@@ -0,0 +1,449 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Options provide the configuration needed to direct how the
+// endpoints will be resolved.
+type Options struct {
+ // DisableSSL forces the endpoint to be resolved as HTTP.
+ // instead of HTTPS if the service supports it.
+ DisableSSL bool
+
+ // Sets the resolver to resolve the endpoint as a dualstack endpoint
+ // for the service. If dualstack support for a service is not known and
+ // StrictMatching is not enabled a dualstack endpoint for the service will
+ // be returned. This endpoint may not be valid. If StrictMatching is
+ // enabled only services that are known to support dualstack will return
+ // dualstack endpoints.
+ UseDualStack bool
+
+ // Enables strict matching of services and regions resolved endpoints.
+ // If the partition doesn't enumerate the exact service and region an
+ // error will be returned. This option will prevent returning endpoints
+ // that look valid, but may not resolve to any real endpoint.
+ StrictMatching bool
+
+ // Enables resolving a service endpoint based on the region provided if the
+ // service does not exist. The service endpoint ID will be used as the service
+ // domain name prefix. By default the endpoint resolver requires the service
+ // to be known when resolving endpoints.
+ //
+ // If resolving an endpoint on the partition list the provided region will
+ // be used to determine which partition's domain name pattern to the service
+ // endpoint ID with. If both the service and region are unknown and resolving
+ // the endpoint on partition list an UnknownEndpointError error will be returned.
+ //
+ // If resolving and endpoint on a partition specific resolver that partition's
+ // domain name pattern will be used with the service endpoint ID. If both
+ // region and service do not exist when resolving an endpoint on a specific
+ // partition the partition's domain pattern will be used to combine the
+ // endpoint and region together.
+ //
+ // This option is ignored if StrictMatching is enabled.
+ ResolveUnknownService bool
+}
+
+// Set combines all of the option functions together.
+func (o *Options) Set(optFns ...func(*Options)) {
+ for _, fn := range optFns {
+ fn(o)
+ }
+}
+
+// DisableSSLOption sets the DisableSSL options. Can be used as a functional
+// option when resolving endpoints.
+func DisableSSLOption(o *Options) {
+ o.DisableSSL = true
+}
+
+// UseDualStackOption sets the UseDualStack option. Can be used as a functional
+// option when resolving endpoints.
+func UseDualStackOption(o *Options) {
+ o.UseDualStack = true
+}
+
+// StrictMatchingOption sets the StrictMatching option. Can be used as a functional
+// option when resolving endpoints.
+func StrictMatchingOption(o *Options) {
+ o.StrictMatching = true
+}
+
+// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used
+// as a functional option when resolving endpoints.
+func ResolveUnknownServiceOption(o *Options) {
+ o.ResolveUnknownService = true
+}
+
+// A Resolver provides the interface for functionality to resolve endpoints.
+// The build in Partition and DefaultResolver return value satisfy this interface.
+type Resolver interface {
+ EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+}
+
+// ResolverFunc is a helper utility that wraps a function so it satisfies the
+// Resolver interface. This is useful when you want to add additional endpoint
+// resolving logic, or stub out specific endpoints with custom values.
+type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+
+// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface.
+func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return fn(service, region, opts...)
+}
+
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
+// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS.
+//
+// If disableSSL is set, it will only set the URL's scheme if the URL does not
+// contain a scheme.
+func AddScheme(endpoint string, disableSSL bool) string {
+ if !schemeRE.MatchString(endpoint) {
+ scheme := "https"
+ if disableSSL {
+ scheme = "http"
+ }
+ endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
+ }
+
+ return endpoint
+}
+
+// EnumPartitions a provides a way to retrieve the underlying partitions that
+// make up the SDK's default Resolver, or any resolver decoded from a model
+// file.
+//
+// Use this interface with DefaultResolver and DecodeModels to get the list of
+// Partitions.
+type EnumPartitions interface {
+ Partitions() []Partition
+}
+
+// RegionsForService returns a map of regions for the partition and service.
+// If either the partition or service does not exist false will be returned
+// as the second parameter.
+//
+// This example shows how to get the regions for DynamoDB in the AWS partition.
+// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
+//
+// This is equivalent to using the partition directly.
+// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
+func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
+ for _, p := range ps {
+ if p.ID() != partitionID {
+ continue
+ }
+ if _, ok := p.p.Services[serviceID]; !ok {
+ break
+ }
+
+ s := Service{
+ id: serviceID,
+ p: p.p,
+ }
+ return s.Regions(), true
+ }
+
+ return map[string]Region{}, false
+}
+
+// PartitionForRegion returns the first partition which includes the region
+// passed in. This includes both known regions and regions which match
+// a pattern supported by the partition which may include regions that are
+// not explicitly known by the partition. Use the Regions method of the
+// returned Partition if explicit support is needed.
+func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
+ for _, p := range ps {
+ if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) {
+ return p, true
+ }
+ }
+
+ return Partition{}, false
+}
+
+// A Partition provides the ability to enumerate the partition's regions
+// and services.
+type Partition struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier of the partition.
+func (p Partition) ID() string { return p.id }
+
+// EndpointFor attempts to resolve the endpoint based on service and region.
+// See Options for information on configuring how the endpoint is resolved.
+//
+// If the service cannot be found in the metadata the UnknownServiceError
+// error will be returned. This validation will occur regardless if
+// StrictMatching is enabled. To enable resolving unknown services set the
+// "ResolveUnknownService" option to true. When StrictMatching is disabled
+// this option allows the partition resolver to resolve a endpoint based on
+// the service endpoint ID provided.
+//
+// When resolving endpoints you can choose to enable StrictMatching. This will
+// require the provided service and region to be known by the partition.
+// If the endpoint cannot be strictly resolved an error will be returned. This
+// mode is useful to ensure the endpoint resolved is valid. Without
+// StrictMatching enabled the endpoint returned my look valid but may not work.
+// StrictMatching requires the SDK to be updated if you want to take advantage
+// of new regions and services expansions.
+//
+// Errors that can be returned.
+// * UnknownServiceError
+// * UnknownEndpointError
+func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return p.p.EndpointFor(service, region, opts...)
+}
+
+// Regions returns a map of Regions indexed by their ID. This is useful for
+// enumerating over the regions in a partition.
+func (p Partition) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id, r := range p.p.Regions {
+ rs[id] = Region{
+ id: id,
+ desc: r.Description,
+ p: p.p,
+ }
+ }
+
+ return rs
+}
+
+// Services returns a map of Service indexed by their ID. This is useful for
+// enumerating over the services in a partition.
+func (p Partition) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id := range p.p.Services {
+ ss[id] = Service{
+ id: id,
+ p: p.p,
+ }
+ }
+
+ return ss
+}
+
+// A Region provides information about a region, and ability to resolve an
+// endpoint from the context of a region, given a service.
+type Region struct {
+ id, desc string
+ p *partition
+}
+
+// ID returns the region's identifier.
+func (r Region) ID() string { return r.id }
+
+// Description returns the region's description. The region description
+// is free text, it can be empty, and it may change between SDK releases.
+func (r Region) Description() string { return r.desc }
+
+// ResolveEndpoint resolves an endpoint from the context of the region given
+// a service. See Partition.EndpointFor for usage and errors that can be returned.
+func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return r.p.EndpointFor(service, r.id, opts...)
+}
+
+// Services returns a list of all services that are known to be in this region.
+func (r Region) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id, s := range r.p.Services {
+ if _, ok := s.Endpoints[r.id]; ok {
+ ss[id] = Service{
+ id: id,
+ p: r.p,
+ }
+ }
+ }
+
+ return ss
+}
+
+// A Service provides information about a service, and ability to resolve an
+// endpoint from the context of a service, given a region.
+type Service struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier for the service.
+func (s Service) ID() string { return s.id }
+
+// ResolveEndpoint resolves an endpoint from the context of a service given
+// a region. See Partition.EndpointFor for usage and errors that can be returned.
+func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return s.p.EndpointFor(s.id, region, opts...)
+}
+
+// Regions returns a map of Regions that the service is present in.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id := range s.p.Services[s.id].Endpoints {
+ if r, ok := s.p.Regions[id]; ok {
+ rs[id] = Region{
+ id: id,
+ desc: r.Description,
+ p: s.p,
+ }
+ }
+ }
+
+ return rs
+}
+
+// Endpoints returns a map of Endpoints indexed by their ID for all known
+// endpoints for a service.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Endpoints() map[string]Endpoint {
+ es := map[string]Endpoint{}
+ for id := range s.p.Services[s.id].Endpoints {
+ es[id] = Endpoint{
+ id: id,
+ serviceID: s.id,
+ p: s.p,
+ }
+ }
+
+ return es
+}
+
+// A Endpoint provides information about endpoints, and provides the ability
+// to resolve that endpoint for the service, and the region the endpoint
+// represents.
+type Endpoint struct {
+ id string
+ serviceID string
+ p *partition
+}
+
+// ID returns the identifier for an endpoint.
+func (e Endpoint) ID() string { return e.id }
+
+// ServiceID returns the identifier the endpoint belongs to.
+func (e Endpoint) ServiceID() string { return e.serviceID }
+
+// ResolveEndpoint resolves an endpoint from the context of a service and
+// region the endpoint represents. See Partition.EndpointFor for usage and
+// errors that can be returned.
+func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return e.p.EndpointFor(e.serviceID, e.id, opts...)
+}
+
+// A ResolvedEndpoint is an endpoint that has been resolved based on a partition
+// service, and region.
+type ResolvedEndpoint struct {
+ // The endpoint URL
+ URL string
+
+ // The region that should be used for signing requests.
+ SigningRegion string
+
+ // The service name that should be used for signing requests.
+ SigningName string
+
+ // States that the signing name for this endpoint was derived from metadata
+ // passed in, but was not explicitly modeled.
+ SigningNameDerived bool
+
+ // The signing method that should be used for signing requests.
+ SigningMethod string
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError awserr.Error
+
+// A EndpointNotFoundError is returned when in StrictMatching mode, and the
+// endpoint for the service and region cannot be found in any of the partitions.
+type EndpointNotFoundError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+}
+
+// A UnknownServiceError is returned when the service does not resolve to an
+// endpoint. Includes a list of all known services for the partition. Returned
+// when a partition does not support the service.
+type UnknownServiceError struct {
+ awsError
+ Partition string
+ Service string
+ Known []string
+}
+
+// NewUnknownServiceError builds and returns UnknownServiceError.
+func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
+ return UnknownServiceError{
+ awsError: awserr.New("UnknownServiceError",
+ "could not resolve endpoint for unknown service", nil),
+ Partition: p,
+ Service: s,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q",
+ e.Partition, e.Service)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) String() string {
+ return e.Error()
+}
+
+// A UnknownEndpointError is returned when in StrictMatching mode and the
+// service is valid, but the region does not resolve to an endpoint. Includes
+// a list of all known endpoints for the service.
+type UnknownEndpointError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+ Known []string
+}
+
+// NewUnknownEndpointError builds and returns UnknownEndpointError.
+func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
+ return UnknownEndpointError{
+ awsError: awserr.New("UnknownEndpointError",
+ "could not resolve endpoint", nil),
+ Partition: p,
+ Service: s,
+ Region: r,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
+ e.Partition, e.Service, e.Region)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) String() string {
+ return e.Error()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
new file mode 100755
index 0000000..ff6f76d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
@@ -0,0 +1,307 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type partitions []partition
+
+func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ var opt Options
+ opt.Set(opts...)
+
+ for i := 0; i < len(ps); i++ {
+ if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) {
+ continue
+ }
+
+ return ps[i].EndpointFor(service, region, opts...)
+ }
+
+ // If loose matching fallback to first partition format to use
+ // when resolving the endpoint.
+ if !opt.StrictMatching && len(ps) > 0 {
+ return ps[0].EndpointFor(service, region, opts...)
+ }
+
+ return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
+}
+
+// Partitions satisfies the EnumPartitions interface and returns a list
+// of Partitions representing each partition represented in the SDK's
+// endpoints model.
+func (ps partitions) Partitions() []Partition {
+ parts := make([]Partition, 0, len(ps))
+ for i := 0; i < len(ps); i++ {
+ parts = append(parts, ps[i].Partition())
+ }
+
+ return parts
+}
+
+type partition struct {
+ ID string `json:"partition"`
+ Name string `json:"partitionName"`
+ DNSSuffix string `json:"dnsSuffix"`
+ RegionRegex regionRegex `json:"regionRegex"`
+ Defaults endpoint `json:"defaults"`
+ Regions regions `json:"regions"`
+ Services services `json:"services"`
+}
+
+func (p partition) Partition() Partition {
+ return Partition{
+ id: p.ID,
+ p: &p,
+ }
+}
+
+func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool {
+ s, hasService := p.Services[service]
+ _, hasEndpoint := s.Endpoints[region]
+
+ if hasEndpoint && hasService {
+ return true
+ }
+
+ if strictMatch {
+ return false
+ }
+
+ return p.RegionRegex.MatchString(region)
+}
+
+func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
+ var opt Options
+ opt.Set(opts...)
+
+ s, hasService := p.Services[service]
+ if !(hasService || opt.ResolveUnknownService) {
+ // Only return error if the resolver will not fallback to creating
+ // endpoint based on service endpoint ID passed in.
+ return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
+ }
+
+ e, hasEndpoint := s.endpointForRegion(region)
+ if !hasEndpoint && opt.StrictMatching {
+ return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
+ }
+
+ defs := []endpoint{p.Defaults, s.Defaults}
+ return e.resolve(service, region, p.DNSSuffix, defs, opt), nil
+}
+
+func serviceList(ss services) []string {
+ list := make([]string, 0, len(ss))
+ for k := range ss {
+ list = append(list, k)
+ }
+ return list
+}
+func endpointList(es endpoints) []string {
+ list := make([]string, 0, len(es))
+ for k := range es {
+ list = append(list, k)
+ }
+ return list
+}
+
+type regionRegex struct {
+ *regexp.Regexp
+}
+
+func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
+ // Strip leading and trailing quotes
+ regex, err := strconv.Unquote(string(b))
+ if err != nil {
+ return fmt.Errorf("unable to strip quotes from regex, %v", err)
+ }
+
+ rr.Regexp, err = regexp.Compile(regex)
+ if err != nil {
+ return fmt.Errorf("unable to unmarshal region regex, %v", err)
+ }
+ return nil
+}
+
+type regions map[string]region
+
+type region struct {
+ Description string `json:"description"`
+}
+
+type services map[string]service
+
+type service struct {
+ PartitionEndpoint string `json:"partitionEndpoint"`
+ IsRegionalized boxedBool `json:"isRegionalized,omitempty"`
+ Defaults endpoint `json:"defaults"`
+ Endpoints endpoints `json:"endpoints"`
+}
+
+func (s *service) endpointForRegion(region string) (endpoint, bool) {
+ if s.IsRegionalized == boxedFalse {
+ return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
+ }
+
+ if e, ok := s.Endpoints[region]; ok {
+ return e, true
+ }
+
+ // Unable to find any matching endpoint, return
+ // blank that will be used for generic endpoint creation.
+ return endpoint{}, false
+}
+
+type endpoints map[string]endpoint
+
+type endpoint struct {
+ Hostname string `json:"hostname"`
+ Protocols []string `json:"protocols"`
+ CredentialScope credentialScope `json:"credentialScope"`
+
+ // Custom fields not modeled
+ HasDualStack boxedBool `json:"-"`
+ DualStackHostname string `json:"-"`
+
+ // Signature Version not used
+ SignatureVersions []string `json:"signatureVersions"`
+
+ // SSLCommonName not used.
+ SSLCommonName string `json:"sslCommonName"`
+}
+
+const (
+ defaultProtocol = "https"
+ defaultSigner = "v4"
+)
+
+var (
+ protocolPriority = []string{"https", "http"}
+ signerPriority = []string{"v4", "v2"}
+)
+
+func getByPriority(s []string, p []string, def string) string {
+ if len(s) == 0 {
+ return def
+ }
+
+ for i := 0; i < len(p); i++ {
+ for j := 0; j < len(s); j++ {
+ if s[j] == p[i] {
+ return s[j]
+ }
+ }
+ }
+
+ return s[0]
+}
+
+func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
+ var merged endpoint
+ for _, def := range defs {
+ merged.mergeIn(def)
+ }
+ merged.mergeIn(e)
+ e = merged
+
+ hostname := e.Hostname
+
+ // Offset the hostname for dualstack if enabled
+ if opts.UseDualStack && e.HasDualStack == boxedTrue {
+ hostname = e.DualStackHostname
+ }
+
+ u := strings.Replace(hostname, "{service}", service, 1)
+ u = strings.Replace(u, "{region}", region, 1)
+ u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
+
+ scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
+ u = fmt.Sprintf("%s://%s", scheme, u)
+
+ signingRegion := e.CredentialScope.Region
+ if len(signingRegion) == 0 {
+ signingRegion = region
+ }
+
+ signingName := e.CredentialScope.Service
+ var signingNameDerived bool
+ if len(signingName) == 0 {
+ signingName = service
+ signingNameDerived = true
+ }
+
+ return ResolvedEndpoint{
+ URL: u,
+ SigningRegion: signingRegion,
+ SigningName: signingName,
+ SigningNameDerived: signingNameDerived,
+ SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+ }
+}
+
+func getEndpointScheme(protocols []string, disableSSL bool) string {
+ if disableSSL {
+ return "http"
+ }
+
+ return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func (e *endpoint) mergeIn(other endpoint) {
+ if len(other.Hostname) > 0 {
+ e.Hostname = other.Hostname
+ }
+ if len(other.Protocols) > 0 {
+ e.Protocols = other.Protocols
+ }
+ if len(other.SignatureVersions) > 0 {
+ e.SignatureVersions = other.SignatureVersions
+ }
+ if len(other.CredentialScope.Region) > 0 {
+ e.CredentialScope.Region = other.CredentialScope.Region
+ }
+ if len(other.CredentialScope.Service) > 0 {
+ e.CredentialScope.Service = other.CredentialScope.Service
+ }
+ if len(other.SSLCommonName) > 0 {
+ e.SSLCommonName = other.SSLCommonName
+ }
+ if other.HasDualStack != boxedBoolUnset {
+ e.HasDualStack = other.HasDualStack
+ }
+ if len(other.DualStackHostname) > 0 {
+ e.DualStackHostname = other.DualStackHostname
+ }
+}
+
+type credentialScope struct {
+ Region string `json:"region"`
+ Service string `json:"service"`
+}
+
+type boxedBool int
+
+func (b *boxedBool) UnmarshalJSON(buf []byte) error {
+ v, err := strconv.ParseBool(string(buf))
+ if err != nil {
+ return err
+ }
+
+ if v {
+ *b = boxedTrue
+ } else {
+ *b = boxedFalse
+ }
+
+ return nil
+}
+
+const (
+ boxedBoolUnset boxedBool = iota
+ boxedFalse
+ boxedTrue
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
new file mode 100755
index 0000000..0fdfcc5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
@@ -0,0 +1,351 @@
+// +build codegen
+
+package endpoints
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "text/template"
+ "unicode"
+)
+
+// A CodeGenOptions are the options for code generating the endpoints into
+// Go code from the endpoints model definition.
+type CodeGenOptions struct {
+ // Options for how the model will be decoded.
+ DecodeModelOptions DecodeModelOptions
+
+ // Disables code generation of the service endpoint prefix IDs defined in
+ // the model.
+ DisableGenerateServiceIDs bool
+}
+
+// Set combines all of the option functions together
+func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// CodeGenModel given a endpoints model file will decode it and attempt to
+// generate Go code from the model definition. Error will be returned if
+// the code is unable to be generated, or decoded.
+func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
+ var opts CodeGenOptions
+ opts.Set(optFns...)
+
+ resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
+ *d = opts.DecodeModelOptions
+ })
+ if err != nil {
+ return err
+ }
+
+ v := struct {
+ Resolver
+ CodeGenOptions
+ }{
+ Resolver: resolver,
+ CodeGenOptions: opts,
+ }
+
+ tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
+ if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil {
+ return fmt.Errorf("failed to execute template, %v", err)
+ }
+
+ return nil
+}
+
+func toSymbol(v string) string {
+ out := []rune{}
+ for _, c := range strings.Title(v) {
+ if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
+ continue
+ }
+
+ out = append(out, c)
+ }
+
+ return string(out)
+}
+
+func quoteString(v string) string {
+ return fmt.Sprintf("%q", v)
+}
+
+func regionConstName(p, r string) string {
+ return toSymbol(p) + toSymbol(r)
+}
+
+func partitionGetter(id string) string {
+ return fmt.Sprintf("%sPartition", toSymbol(id))
+}
+
+func partitionVarName(id string) string {
+ return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
+}
+
+func listPartitionNames(ps partitions) string {
+ names := []string{}
+ switch len(ps) {
+ case 1:
+ return ps[0].Name
+ case 2:
+ return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
+ default:
+ for i, p := range ps {
+ if i == len(ps)-1 {
+ names = append(names, "and "+p.Name)
+ } else {
+ names = append(names, p.Name)
+ }
+ }
+ return strings.Join(names, ", ")
+ }
+}
+
+func boxedBoolIfSet(msg string, v boxedBool) string {
+ switch v {
+ case boxedTrue:
+ return fmt.Sprintf(msg, "boxedTrue")
+ case boxedFalse:
+ return fmt.Sprintf(msg, "boxedFalse")
+ default:
+ return ""
+ }
+}
+
+func stringIfSet(msg, v string) string {
+ if len(v) == 0 {
+ return ""
+ }
+
+ return fmt.Sprintf(msg, v)
+}
+
+func stringSliceIfSet(msg string, vs []string) string {
+ if len(vs) == 0 {
+ return ""
+ }
+
+ names := []string{}
+ for _, v := range vs {
+ names = append(names, `"`+v+`"`)
+ }
+
+ return fmt.Sprintf(msg, strings.Join(names, ","))
+}
+
+func endpointIsSet(v endpoint) bool {
+ return !reflect.DeepEqual(v, endpoint{})
+}
+
+func serviceSet(ps partitions) map[string]struct{} {
+ set := map[string]struct{}{}
+ for _, p := range ps {
+ for id := range p.Services {
+ set[id] = struct{}{}
+ }
+ }
+
+ return set
+}
+
+var funcMap = template.FuncMap{
+ "ToSymbol": toSymbol,
+ "QuoteString": quoteString,
+ "RegionConst": regionConstName,
+ "PartitionGetter": partitionGetter,
+ "PartitionVarName": partitionVarName,
+ "ListPartitionNames": listPartitionNames,
+ "BoxedBoolIfSet": boxedBoolIfSet,
+ "StringIfSet": stringIfSet,
+ "StringSliceIfSet": stringSliceIfSet,
+ "EndpointIsSet": endpointIsSet,
+ "ServicesSet": serviceSet,
+}
+
+const v3Tmpl = `
+{{ define "defaults" -}}
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+ {{ template "partition consts" $.Resolver }}
+
+ {{ range $_, $partition := $.Resolver }}
+ {{ template "partition region consts" $partition }}
+ {{ end }}
+
+ {{ if not $.DisableGenerateServiceIDs -}}
+ {{ template "service consts" $.Resolver }}
+ {{- end }}
+
+ {{ template "endpoint resolvers" $.Resolver }}
+{{- end }}
+
+{{ define "partition consts" }}
+ // Partition identifiers
+ const (
+ {{ range $_, $p := . -}}
+ {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "partition region consts" }}
+ // {{ .Name }} partition's regions.
+ const (
+ {{ range $id, $region := .Regions -}}
+ {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "service consts" }}
+ // Service identifiers
+ const (
+ {{ $serviceSet := ServicesSet . -}}
+ {{ range $id, $_ := $serviceSet -}}
+ {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "endpoint resolvers" }}
+ // DefaultResolver returns an Endpoint resolver that will be able
+ // to resolve endpoints for: {{ ListPartitionNames . }}.
+ //
+ // Use DefaultPartitions() to get the list of the default partitions.
+ func DefaultResolver() Resolver {
+ return defaultPartitions
+ }
+
+ // DefaultPartitions returns a list of the partitions the SDK is bundled
+ // with. The available partitions are: {{ ListPartitionNames . }}.
+ //
+ // partitions := endpoints.DefaultPartitions
+ // for _, p := range partitions {
+ // // ... inspect partitions
+ // }
+ func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
+ }
+
+ var defaultPartitions = partitions{
+ {{ range $_, $partition := . -}}
+ {{ PartitionVarName $partition.ID }},
+ {{ end }}
+ }
+
+ {{ range $_, $partition := . -}}
+ {{ $name := PartitionGetter $partition.ID -}}
+ // {{ $name }} returns the Resolver for {{ $partition.Name }}.
+ func {{ $name }}() Partition {
+ return {{ PartitionVarName $partition.ID }}.Partition()
+ }
+ var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
+ {{ end }}
+{{ end }}
+
+{{ define "default partitions" }}
+ func DefaultPartitions() []Partition {
+ return []partition{
+ {{ range $_, $partition := . -}}
+ // {{ ToSymbol $partition.ID}}Partition(),
+ {{ end }}
+ }
+ }
+{{ end }}
+
+{{ define "gocode Partition" -}}
+partition{
+ {{ StringIfSet "ID: %q,\n" .ID -}}
+ {{ StringIfSet "Name: %q,\n" .Name -}}
+ {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
+ RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults }},
+ {{- end }}
+ Regions: {{ template "gocode Regions" .Regions }},
+ Services: {{ template "gocode Services" .Services }},
+}
+{{- end }}
+
+{{ define "gocode RegionRegex" -}}
+regionRegex{
+ Regexp: func() *regexp.Regexp{
+ reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
+ return reg
+ }(),
+}
+{{- end }}
+
+{{ define "gocode Regions" -}}
+regions{
+ {{ range $id, $region := . -}}
+ "{{ $id }}": {{ template "gocode Region" $region }},
+ {{ end -}}
+}
+{{- end }}
+
+{{ define "gocode Region" -}}
+region{
+ {{ StringIfSet "Description: %q,\n" .Description -}}
+}
+{{- end }}
+
+{{ define "gocode Services" -}}
+services{
+ {{ range $id, $service := . -}}
+ "{{ $id }}": {{ template "gocode Service" $service }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Service" -}}
+service{
+ {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
+ {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults -}},
+ {{- end }}
+ {{ if .Endpoints -}}
+ Endpoints: {{ template "gocode Endpoints" .Endpoints }},
+ {{- end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoints" -}}
+endpoints{
+ {{ range $id, $endpoint := . -}}
+ "{{ $id }}": {{ template "gocode Endpoint" $endpoint }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoint" -}}
+endpoint{
+ {{ StringIfSet "Hostname: %q,\n" .Hostname -}}
+ {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
+ {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
+ {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
+ {{ if or .CredentialScope.Region .CredentialScope.Service -}}
+ CredentialScope: credentialScope{
+ {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
+ {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
+ },
+ {{- end }}
+ {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}}
+ {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}}
+
+}
+{{- end }}
+`
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
new file mode 100755
index 0000000..fa06f7a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
@@ -0,0 +1,13 @@
+package aws
+
+import "github.com/aws/aws-sdk-go/aws/awserr"
+
+var (
+ // ErrMissingRegion is an error that is returned if region configuration is
+ // not found.
+ ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
+
+ // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
+ // resolved for a service.
+ ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
new file mode 100755
index 0000000..91a6f27
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
@@ -0,0 +1,12 @@
+package aws
+
+// JSONValue is a representation of a grab bag type that will be marshaled
+// into a json string. This type can be used just like any other map.
+//
+// Example:
+//
+// values := aws.JSONValue{
+// "Foo": "Bar",
+// }
+// values["Baz"] = "Qux"
+type JSONValue map[string]interface{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
new file mode 100755
index 0000000..6ed15b2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
@@ -0,0 +1,118 @@
+package aws
+
+import (
+ "log"
+ "os"
+)
+
+// A LogLevelType defines the level logging should be performed at. Used to instruct
+// the SDK which statements should be logged.
+type LogLevelType uint
+
+// LogLevel returns the pointer to a LogLevel. Should be used to workaround
+// not being able to take the address of a non-composite literal.
+func LogLevel(l LogLevelType) *LogLevelType {
+ return &l
+}
+
+// Value returns the LogLevel value or the default value LogOff if the LogLevel
+// is nil. Safe to use on nil value LogLevelTypes.
+func (l *LogLevelType) Value() LogLevelType {
+ if l != nil {
+ return *l
+ }
+ return LogOff
+}
+
+// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
+// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
+// LogLevel is nil, will default to LogOff comparison.
+func (l *LogLevelType) Matches(v LogLevelType) bool {
+ c := l.Value()
+ return c&v == v
+}
+
+// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
+// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default
+// to LogOff comparison.
+func (l *LogLevelType) AtLeast(v LogLevelType) bool {
+ c := l.Value()
+ return c >= v
+}
+
+const (
+ // LogOff states that no logging should be performed by the SDK. This is the
+ // default state of the SDK, and should be use to disable all logging.
+ LogOff LogLevelType = iota * 0x1000
+
+ // LogDebug state that debug output should be logged by the SDK. This should
+ // be used to inspect request made and responses received.
+ LogDebug
+)
+
+// Debug Logging Sub Levels
+const (
+ // LogDebugWithSigning states that the SDK should log request signing and
+ // presigning events. This should be used to log the signing details of
+ // requests for debugging. Will also enable LogDebug.
+ LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
+
+ // LogDebugWithHTTPBody states the SDK should log HTTP request and response
+ // HTTP bodys in addition to the headers and path. This should be used to
+ // see the body content of requests and responses made while using the SDK
+ // Will also enable LogDebug.
+ LogDebugWithHTTPBody
+
+ // LogDebugWithRequestRetries states the SDK should log when service requests will
+ // be retried. This should be used to log when you want to log when service
+ // requests are being retried. Will also enable LogDebug.
+ LogDebugWithRequestRetries
+
+ // LogDebugWithRequestErrors states the SDK should log when service requests fail
+ // to build, send, validate, or unmarshal.
+ LogDebugWithRequestErrors
+
+ // LogDebugWithEventStreamBody states the SDK should log EventStream
+ // request and response bodys. This should be used to log the EventStream
+ // wire unmarshaled message content of requests and responses made while
+ // using the SDK Will also enable LogDebug.
+ LogDebugWithEventStreamBody
+)
+
+// A Logger is a minimalistic interface for the SDK to log messages to. Should
+// be used to provide custom logging writers for the SDK to use.
+type Logger interface {
+ Log(...interface{})
+}
+
+// A LoggerFunc is a convenience type to convert a function taking a variadic
+// list of arguments and wrap it so the Logger interface can be used.
+//
+// Example:
+// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
+// fmt.Fprintln(os.Stdout, args...)
+// })})
+type LoggerFunc func(...interface{})
+
+// Log calls the wrapped function with the arguments provided
+func (f LoggerFunc) Log(args ...interface{}) {
+ f(args...)
+}
+
+// NewDefaultLogger returns a Logger which will write log messages to stdout, and
+// use same formatting runes as the stdlib log.Logger
+func NewDefaultLogger() Logger {
+ return &defaultLogger{
+ logger: log.New(os.Stdout, "", log.LstdFlags),
+ }
+}
+
+// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
+type defaultLogger struct {
+ logger *log.Logger
+}
+
+// Log logs the parameters to the stdlib logger. See log.Println.
+func (l defaultLogger) Log(args ...interface{}) {
+ l.logger.Println(args...)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
new file mode 100755
index 0000000..271da43
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
@@ -0,0 +1,19 @@
+// +build !appengine,!plan9
+
+package request
+
+import (
+ "net"
+ "os"
+ "syscall"
+)
+
+func isErrConnectionReset(err error) bool {
+ if opErr, ok := err.(*net.OpError); ok {
+ if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
+ return sysErr.Err == syscall.ECONNRESET
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
new file mode 100755
index 0000000..daf9eca
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
@@ -0,0 +1,11 @@
+// +build appengine plan9
+
+package request
+
+import (
+ "strings"
+)
+
+func isErrConnectionReset(err error) bool {
+ return strings.Contains(err.Error(), "connection reset")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
new file mode 100755
index 0000000..8ef8548
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
@@ -0,0 +1,277 @@
+package request
+
+import (
+ "fmt"
+ "strings"
+)
+
+// A Handlers provides a collection of request handlers for various
+// stages of handling requests.
+type Handlers struct {
+ Validate HandlerList
+ Build HandlerList
+ Sign HandlerList
+ Send HandlerList
+ ValidateResponse HandlerList
+ Unmarshal HandlerList
+ UnmarshalStream HandlerList
+ UnmarshalMeta HandlerList
+ UnmarshalError HandlerList
+ Retry HandlerList
+ AfterRetry HandlerList
+ CompleteAttempt HandlerList
+ Complete HandlerList
+}
+
+// Copy returns of this handler's lists.
+func (h *Handlers) Copy() Handlers {
+ return Handlers{
+ Validate: h.Validate.copy(),
+ Build: h.Build.copy(),
+ Sign: h.Sign.copy(),
+ Send: h.Send.copy(),
+ ValidateResponse: h.ValidateResponse.copy(),
+ Unmarshal: h.Unmarshal.copy(),
+ UnmarshalStream: h.UnmarshalStream.copy(),
+ UnmarshalError: h.UnmarshalError.copy(),
+ UnmarshalMeta: h.UnmarshalMeta.copy(),
+ Retry: h.Retry.copy(),
+ AfterRetry: h.AfterRetry.copy(),
+ CompleteAttempt: h.CompleteAttempt.copy(),
+ Complete: h.Complete.copy(),
+ }
+}
+
+// Clear removes callback functions for all handlers
+func (h *Handlers) Clear() {
+ h.Validate.Clear()
+ h.Build.Clear()
+ h.Send.Clear()
+ h.Sign.Clear()
+ h.Unmarshal.Clear()
+ h.UnmarshalStream.Clear()
+ h.UnmarshalMeta.Clear()
+ h.UnmarshalError.Clear()
+ h.ValidateResponse.Clear()
+ h.Retry.Clear()
+ h.AfterRetry.Clear()
+ h.CompleteAttempt.Clear()
+ h.Complete.Clear()
+}
+
+// A HandlerListRunItem represents an entry in the HandlerList which
+// is being run.
+type HandlerListRunItem struct {
+ Index int
+ Handler NamedHandler
+ Request *Request
+}
+
+// A HandlerList manages zero or more handlers in a list.
+type HandlerList struct {
+ list []NamedHandler
+
+ // Called after each request handler in the list is called. If set
+ // and the func returns true the HandlerList will continue to iterate
+ // over the request handlers. If false is returned the HandlerList
+ // will stop iterating.
+ //
+ // Should be used if extra logic to be performed between each handler
+ // in the list. This can be used to terminate a list's iteration
+ // based on a condition such as error like, HandlerListStopOnError.
+ // Or for logging like HandlerListLogItem.
+ AfterEachFn func(item HandlerListRunItem) bool
+}
+
+// A NamedHandler is a struct that contains a name and function callback.
+type NamedHandler struct {
+ Name string
+ Fn func(*Request)
+}
+
+// copy creates a copy of the handler list.
+func (l *HandlerList) copy() HandlerList {
+ n := HandlerList{
+ AfterEachFn: l.AfterEachFn,
+ }
+ if len(l.list) == 0 {
+ return n
+ }
+
+ n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
+ return n
+}
+
+// Clear clears the handler list.
+func (l *HandlerList) Clear() {
+ l.list = l.list[0:0]
+}
+
+// Len returns the number of handlers in the list.
+func (l *HandlerList) Len() int {
+ return len(l.list)
+}
+
+// PushBack pushes handler f to the back of the handler list.
+func (l *HandlerList) PushBack(f func(*Request)) {
+ l.PushBackNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushBackNamed pushes named handler f to the back of the handler list.
+func (l *HandlerList) PushBackNamed(n NamedHandler) {
+ if cap(l.list) == 0 {
+ l.list = make([]NamedHandler, 0, 5)
+ }
+ l.list = append(l.list, n)
+}
+
+// PushFront pushes handler f to the front of the handler list.
+func (l *HandlerList) PushFront(f func(*Request)) {
+ l.PushFrontNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushFrontNamed pushes named handler f to the front of the handler list.
+func (l *HandlerList) PushFrontNamed(n NamedHandler) {
+ if cap(l.list) == len(l.list) {
+ // Allocating new list required
+ l.list = append([]NamedHandler{n}, l.list...)
+ } else {
+ // Enough room to prepend into list.
+ l.list = append(l.list, NamedHandler{})
+ copy(l.list[1:], l.list)
+ l.list[0] = n
+ }
+}
+
+// Remove removes a NamedHandler n
+func (l *HandlerList) Remove(n NamedHandler) {
+ l.RemoveByName(n.Name)
+}
+
+// RemoveByName removes a NamedHandler by name.
+func (l *HandlerList) RemoveByName(name string) {
+ for i := 0; i < len(l.list); i++ {
+ m := l.list[i]
+ if m.Name == name {
+ // Shift array preventing creating new arrays
+ copy(l.list[i:], l.list[i+1:])
+ l.list[len(l.list)-1] = NamedHandler{}
+ l.list = l.list[:len(l.list)-1]
+
+ // decrement list so next check to length is correct
+ i--
+ }
+ }
+}
+
+// SwapNamed will swap out any existing handlers with the same name as the
+// passed in NamedHandler returning true if handlers were swapped. False is
+// returned otherwise.
+func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
+ for i := 0; i < len(l.list); i++ {
+ if l.list[i].Name == n.Name {
+ l.list[i].Fn = n.Fn
+ swapped = true
+ }
+ }
+
+ return swapped
+}
+
+// Swap will swap out all handlers matching the name passed in. The matched
+// handlers will be swapped in. True is returned if the handlers were swapped.
+func (l *HandlerList) Swap(name string, replace NamedHandler) bool {
+ var swapped bool
+
+ for i := 0; i < len(l.list); i++ {
+ if l.list[i].Name == name {
+ l.list[i] = replace
+ swapped = true
+ }
+ }
+
+ return swapped
+}
+
+// SetBackNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the end of the list.
+func (l *HandlerList) SetBackNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushBackNamed(n)
+ }
+}
+
+// SetFrontNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the beginning of
+// the list.
+func (l *HandlerList) SetFrontNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushFrontNamed(n)
+ }
+}
+
+// Run executes all handlers in the list with a given request object.
+func (l *HandlerList) Run(r *Request) {
+ for i, h := range l.list {
+ h.Fn(r)
+ item := HandlerListRunItem{
+ Index: i, Handler: h, Request: r,
+ }
+ if l.AfterEachFn != nil && !l.AfterEachFn(item) {
+ return
+ }
+ }
+}
+
+// HandlerListLogItem logs the request handler and the state of the
+// request's Error value. Always returns true to continue iterating
+// request handlers in a HandlerList.
+func HandlerListLogItem(item HandlerListRunItem) bool {
+ if item.Request.Config.Logger == nil {
+ return true
+ }
+ item.Request.Config.Logger.Log("DEBUG: RequestHandler",
+ item.Index, item.Handler.Name, item.Request.Error)
+
+ return true
+}
+
+// HandlerListStopOnError returns false to stop the HandlerList iterating
+// over request handlers if Request.Error is not nil. True otherwise
+// to continue iterating.
+func HandlerListStopOnError(item HandlerListRunItem) bool {
+ return item.Request.Error == nil
+}
+
+// WithAppendUserAgent will add a string to the user agent prefixed with a
+// single white space.
+func WithAppendUserAgent(s string) Option {
+ return func(r *Request) {
+ r.Handlers.Build.PushBack(func(r2 *Request) {
+ AddToUserAgent(r, s)
+ })
+ }
+}
+
+// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
+// header. If the extra parameters are provided they will be added as metadata to the
+// name/version pair resulting in the following format.
+// "name/version (extra0; extra1; ...)"
+// The user agent part will be concatenated with this current request's user agent string.
+func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
+ ua := fmt.Sprintf("%s/%s", name, version)
+ if len(extra) > 0 {
+ ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
+ }
+ return func(r *Request) {
+ AddToUserAgent(r, ua)
+ }
+}
+
+// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
+// The input string will be concatenated with the current request's user agent string.
+func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
+ return func(r *Request) {
+ AddToUserAgent(r, s)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
new file mode 100755
index 0000000..79f7960
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
@@ -0,0 +1,24 @@
+package request
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+)
+
+func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
+ req := new(http.Request)
+ *req = *r
+ req.URL = &url.URL{}
+ *req.URL = *r.URL
+ req.Body = body
+
+ req.Header = http.Header{}
+ for k, v := range r.Header {
+ for _, vv := range v {
+ req.Header.Add(k, vv)
+ }
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
new file mode 100755
index 0000000..b0c2ef4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
@@ -0,0 +1,60 @@
+package request
+
+import (
+ "io"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+// offsetReader is a thread-safe io.ReadCloser to prevent racing
+// with retrying requests
+type offsetReader struct {
+ buf io.ReadSeeker
+ lock sync.Mutex
+ closed bool
+}
+
+func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
+ reader := &offsetReader{}
+ buf.Seek(offset, sdkio.SeekStart)
+
+ reader.buf = buf
+ return reader
+}
+
+// Close will close the instance of the offset reader's access to
+// the underlying io.ReadSeeker.
+func (o *offsetReader) Close() error {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+ o.closed = true
+ return nil
+}
+
+// Read is a thread-safe read of the underlying io.ReadSeeker
+func (o *offsetReader) Read(p []byte) (int, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ if o.closed {
+ return 0, io.EOF
+ }
+
+ return o.buf.Read(p)
+}
+
+// Seek is a thread-safe seeking operation.
+func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ return o.buf.Seek(offset, whence)
+}
+
+// CloseAndCopy will return a new offsetReader with a copy of the old buffer
+// and close the old buffer.
+func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
+ o.Close()
+ return newOffsetReader(o.buf, offset)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
new file mode 100755
index 0000000..8f2eb3e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -0,0 +1,673 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+const (
+ // ErrCodeSerialization is the serialization error code that is received
+ // during protocol unmarshaling.
+ ErrCodeSerialization = "SerializationError"
+
+ // ErrCodeRead is an error that is returned during HTTP reads.
+ ErrCodeRead = "ReadError"
+
+ // ErrCodeResponseTimeout is the connection timeout error that is received
+ // during body reads.
+ ErrCodeResponseTimeout = "ResponseTimeout"
+
+ // ErrCodeInvalidPresignExpire is returned when the expire time provided to
+ // presign is invalid
+ ErrCodeInvalidPresignExpire = "InvalidPresignExpireError"
+
+ // CanceledErrorCode is the error code that will be returned by an
+ // API request that was canceled. Requests given a aws.Context may
+ // return this error when canceled.
+ CanceledErrorCode = "RequestCanceled"
+)
+
+// A Request is the service request to be made.
+type Request struct {
+ Config aws.Config
+ ClientInfo metadata.ClientInfo
+ Handlers Handlers
+
+ Retryer
+ AttemptTime time.Time
+ Time time.Time
+ Operation *Operation
+ HTTPRequest *http.Request
+ HTTPResponse *http.Response
+ Body io.ReadSeeker
+ BodyStart int64 // offset from beginning of Body that the request body starts
+ Params interface{}
+ Error error
+ Data interface{}
+ RequestID string
+ RetryCount int
+ Retryable *bool
+ RetryDelay time.Duration
+ NotHoist bool
+ SignedHeaderVals http.Header
+ LastSignedAt time.Time
+ DisableFollowRedirects bool
+
+ // A value greater than 0 instructs the request to be signed as Presigned URL
+ // You should not set this field directly. Instead use Request's
+ // Presign or PresignRequest methods.
+ ExpireTime time.Duration
+
+ context aws.Context
+
+ built bool
+
+ // Need to persist an intermediate body between the input Body and HTTP
+ // request body because the HTTP Client's transport can maintain a reference
+ // to the HTTP request's body after the client has returned. This value is
+ // safe to use concurrently and wrap the input Body for each HTTP request.
+ safeBody *offsetReader
+}
+
+// An Operation is the service API operation to be made.
+type Operation struct {
+ Name string
+ HTTPMethod string
+ HTTPPath string
+ *Paginator
+
+ BeforePresignFn func(r *Request) error
+}
+
+// New returns a new Request pointer for the service API
+// operation and parameters.
+//
+// Params is any value of input parameters to be the request payload.
+// Data is pointer value to an object which the request's response
+// payload will be deserialized to.
+func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
+ retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+
+ method := operation.HTTPMethod
+ if method == "" {
+ method = "POST"
+ }
+
+ httpReq, _ := http.NewRequest(method, "", nil)
+
+ var err error
+ httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
+ if err != nil {
+ httpReq.URL = &url.URL{}
+ err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
+ }
+
+ SanitizeHostForHeader(httpReq)
+
+ r := &Request{
+ Config: cfg,
+ ClientInfo: clientInfo,
+ Handlers: handlers.Copy(),
+
+ Retryer: retryer,
+ Time: time.Now(),
+ ExpireTime: 0,
+ Operation: operation,
+ HTTPRequest: httpReq,
+ Body: nil,
+ Params: params,
+ Error: err,
+ Data: data,
+ }
+ r.SetBufferBody([]byte{})
+
+ return r
+}
+
+// A Option is a functional option that can augment or modify a request when
+// using a WithContext API operation method.
+type Option func(*Request)
+
+// WithGetResponseHeader builds a request Option which will retrieve a single
+// header value from the HTTP Response. If there are multiple values for the
+// header key use WithGetResponseHeaders instead to access the http.Header
+// map directly. The passed in val pointer must be non-nil.
+//
+// This Option can be used multiple times with a single API operation.
+//
+// var id2, versionID string
+// svc.PutObjectWithContext(ctx, params,
+// request.WithGetResponseHeader("x-amz-id-2", &id2),
+// request.WithGetResponseHeader("x-amz-version-id", &versionID),
+// )
+func WithGetResponseHeader(key string, val *string) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *val = req.HTTPResponse.Header.Get(key)
+ })
+ }
+}
+
+// WithGetResponseHeaders builds a request Option which will retrieve the
+// headers from the HTTP response and assign them to the passed in headers
+// variable. The passed in headers pointer must be non-nil.
+//
+// var headers http.Header
+// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers))
+func WithGetResponseHeaders(headers *http.Header) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *headers = req.HTTPResponse.Header
+ })
+ }
+}
+
+// WithLogLevel is a request option that will set the request to use a specific
+// log level when the request is made.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody)
+func WithLogLevel(l aws.LogLevelType) Option {
+ return func(r *Request) {
+ r.Config.LogLevel = aws.LogLevel(l)
+ }
+}
+
+// ApplyOptions will apply each option to the request calling them in the order
+// the were provided.
+func (r *Request) ApplyOptions(opts ...Option) {
+ for _, opt := range opts {
+ opt(r)
+ }
+}
+
+// Context will always returns a non-nil context. If Request does not have a
+// context aws.BackgroundContext will be returned.
+func (r *Request) Context() aws.Context {
+ if r.context != nil {
+ return r.context
+ }
+ return aws.BackgroundContext()
+}
+
+// SetContext adds a Context to the current request that can be used to cancel
+// a in-flight request. The Context value must not be nil, or this method will
+// panic.
+//
+// Unlike http.Request.WithContext, SetContext does not return a copy of the
+// Request. It is not safe to use use a single Request value for multiple
+// requests. A new Request should be created for each API operation request.
+//
+// Go 1.6 and below:
+// The http.Request's Cancel field will be set to the Done() value of
+// the context. This will overwrite the Cancel field's value.
+//
+// Go 1.7 and above:
+// The http.Request.WithContext will be used to set the context on the underlying
+// http.Request. This will create a shallow copy of the http.Request. The SDK
+// may create sub contexts in the future for nested requests such as retries.
+func (r *Request) SetContext(ctx aws.Context) {
+ if ctx == nil {
+ panic("context cannot be nil")
+ }
+ setRequestContext(r, ctx)
+}
+
+// WillRetry returns if the request's can be retried.
+func (r *Request) WillRetry() bool {
+ if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody {
+ return false
+ }
+ return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
+}
+
+// ParamsFilled returns if the request's parameters have been populated
+// and the parameters are valid. False is returned if no parameters are
+// provided or invalid.
+func (r *Request) ParamsFilled() bool {
+ return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
+}
+
+// DataFilled returns true if the request's data for response deserialization
+// target has been set and is a valid. False is returned if data is not
+// set, or is invalid.
+func (r *Request) DataFilled() bool {
+ return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
+}
+
+// SetBufferBody will set the request's body bytes that will be sent to
+// the service API.
+func (r *Request) SetBufferBody(buf []byte) {
+ r.SetReaderBody(bytes.NewReader(buf))
+}
+
+// SetStringBody sets the body of the request to be backed by a string.
+func (r *Request) SetStringBody(s string) {
+ r.SetReaderBody(strings.NewReader(s))
+}
+
+// SetReaderBody will set the request's body reader.
+func (r *Request) SetReaderBody(reader io.ReadSeeker) {
+ r.Body = reader
+ r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset.
+ r.ResetBody()
+}
+
+// Presign returns the request's signed URL. Error will be returned
+// if the signing fails. The expire parameter is only used for presigned Amazon
+// S3 API requests. All other AWS services will use a fixed expiration
+// time of 15 minutes.
+//
+// It is invalid to create a presigned URL with a expire duration 0 or less. An
+// error is returned if expire duration is 0 or less.
+func (r *Request) Presign(expire time.Duration) (string, error) {
+ r = r.copy()
+
+ // Presign requires all headers be hoisted. There is no way to retrieve
+ // the signed headers not hoisted without this. Making the presigned URL
+ // useless.
+ r.NotHoist = false
+
+ u, _, err := getPresignedURL(r, expire)
+ return u, err
+}
+
+// PresignRequest behaves just like presign, with the addition of returning a
+// set of headers that were signed. The expire parameter is only used for
+// presigned Amazon S3 API requests. All other AWS services will use a fixed
+// expiration time of 15 minutes.
+//
+// It is invalid to create a presigned URL with a expire duration 0 or less. An
+// error is returned if expire duration is 0 or less.
+//
+// Returns the URL string for the API operation with signature in the query string,
+// and the HTTP headers that were included in the signature. These headers must
+// be included in any HTTP request made with the presigned URL.
+//
+// To prevent hoisting any headers to the query string set NotHoist to true on
+// this Request value prior to calling PresignRequest.
+func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) {
+ r = r.copy()
+ return getPresignedURL(r, expire)
+}
+
+// IsPresigned returns true if the request represents a presigned API url.
+func (r *Request) IsPresigned() bool {
+ return r.ExpireTime != 0
+}
+
+func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) {
+ if expire <= 0 {
+ return "", nil, awserr.New(
+ ErrCodeInvalidPresignExpire,
+ "presigned URL requires an expire duration greater than 0",
+ nil,
+ )
+ }
+
+ r.ExpireTime = expire
+
+ if r.Operation.BeforePresignFn != nil {
+ if err := r.Operation.BeforePresignFn(r); err != nil {
+ return "", nil, err
+ }
+ }
+
+ if err := r.Sign(); err != nil {
+ return "", nil, err
+ }
+
+ return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
+}
+
+func debugLogReqError(r *Request, stage string, retrying bool, err error) {
+ if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
+ return
+ }
+
+ retryStr := "not retrying"
+ if retrying {
+ retryStr = "will retry"
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
+ stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
+}
+
+// Build will build the request's object so it can be signed and sent
+// to the service. Build will also validate all the request's parameters.
+// Any additional build Handlers set on this request will be run
+// in the order they were set.
+//
+// The request will only be built once. Multiple calls to build will have
+// no effect.
+//
+// If any Validate or Build errors occur the build will stop and the error
+// which occurred will be returned.
+func (r *Request) Build() error {
+ if !r.built {
+ r.Handlers.Validate.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Validate Request", false, r.Error)
+ return r.Error
+ }
+ r.Handlers.Build.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+ r.built = true
+ }
+
+ return r.Error
+}
+
+// Sign will sign the request, returning error if errors are encountered.
+//
+// Sign will build the request prior to signing. All Sign Handlers will
+// be executed in the order they were set.
+func (r *Request) Sign() error {
+ r.Build()
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+
+ r.Handlers.Sign.Run(r)
+ return r.Error
+}
+
+func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
+ if r.safeBody != nil {
+ r.safeBody.Close()
+ }
+
+ r.safeBody = newOffsetReader(r.Body, r.BodyStart)
+
+ // Go 1.8 tightened and clarified the rules code needs to use when building
+ // requests with the http package. Go 1.8 removed the automatic detection
+ // of if the Request.Body was empty, or actually had bytes in it. The SDK
+ // always sets the Request.Body even if it is empty and should not actually
+ // be sent. This is incorrect.
+ //
+ // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http
+ // client that the request really should be sent without a body. The
+ // Request.Body cannot be set to nil, which is preferable, because the
+ // field is exported and could introduce nil pointer dereferences for users
+ // of the SDK if they used that field.
+ //
+ // Related golang/go#18257
+ l, err := aws.SeekerLen(r.Body)
+ if err != nil {
+ return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
+ }
+
+ var body io.ReadCloser
+ if l == 0 {
+ body = NoBody
+ } else if l > 0 {
+ body = r.safeBody
+ } else {
+ // Hack to prevent sending bodies for methods where the body
+ // should be ignored by the server. Sending bodies on these
+ // methods without an associated ContentLength will cause the
+ // request to socket timeout because the server does not handle
+ // Transfer-Encoding: chunked bodies for these methods.
+ //
+ // This would only happen if a aws.ReaderSeekerCloser was used with
+ // a io.Reader that was not also an io.Seeker, or did not implement
+ // Len() method.
+ switch r.Operation.HTTPMethod {
+ case "GET", "HEAD", "DELETE":
+ body = NoBody
+ default:
+ body = r.safeBody
+ }
+ }
+
+ return body, nil
+}
+
+// GetBody will return an io.ReadSeeker of the Request's underlying
+// input body with a concurrency safe wrapper.
+func (r *Request) GetBody() io.ReadSeeker {
+ return r.safeBody
+}
+
+// Send will send the request, returning error if errors are encountered.
+//
+// Send will sign the request prior to sending. All Send Handlers will
+// be executed in the order they were set.
+//
+// Canceling a request is non-deterministic. If a request has been canceled,
+// then the transport will choose, randomly, one of the state channels during
+// reads or getting the connection.
+//
+// readLoop() and getConn(req *Request, cm connectMethod)
+// https://github.com/golang/go/blob/master/src/net/http/transport.go
+//
+// Send will not close the request.Request's body.
+func (r *Request) Send() error {
+ defer func() {
+ // Regardless of success or failure of the request trigger the Complete
+ // request handlers.
+ r.Handlers.Complete.Run(r)
+ }()
+
+ if err := r.Error; err != nil {
+ return err
+ }
+
+ for {
+ r.Error = nil
+ r.AttemptTime = time.Now()
+
+ if err := r.Sign(); err != nil {
+ debugLogReqError(r, "Sign Request", false, err)
+ return err
+ }
+
+ if err := r.sendRequest(); err == nil {
+ return nil
+ } else if !shouldRetryCancel(r.Error) {
+ return err
+ } else {
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+
+ if r.Error != nil || !aws.BoolValue(r.Retryable) {
+ return r.Error
+ }
+
+ r.prepareRetry()
+ continue
+ }
+ }
+}
+
+func (r *Request) prepareRetry() {
+ if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
+ r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
+ }
+
+ // The previous http.Request will have a reference to the r.Body
+ // and the HTTP Client's Transport may still be reading from
+ // the request's body even though the Client's Do returned.
+ r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
+ r.ResetBody()
+
+ // Closing response body to ensure that no response body is leaked
+ // between retry attempts.
+ if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
+ r.HTTPResponse.Body.Close()
+ }
+}
+
+func (r *Request) sendRequest() (sendErr error) {
+ defer r.Handlers.CompleteAttempt.Run(r)
+
+ r.Retryable = nil
+ r.Handlers.Send.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Send Request", r.WillRetry(), r.Error)
+ return r.Error
+ }
+
+ r.Handlers.UnmarshalMeta.Run(r)
+ r.Handlers.ValidateResponse.Run(r)
+ if r.Error != nil {
+ r.Handlers.UnmarshalError.Run(r)
+ debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error)
+ return r.Error
+ }
+
+ r.Handlers.Unmarshal.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error)
+ return r.Error
+ }
+
+ return nil
+}
+
+// copy will copy a request which will allow for local manipulation of the
+// request.
+func (r *Request) copy() *Request {
+ req := &Request{}
+ *req = *r
+ req.Handlers = r.Handlers.Copy()
+ op := *r.Operation
+ req.Operation = &op
+ return req
+}
+
+// AddToUserAgent adds the string to the end of the request's current user agent.
+func AddToUserAgent(r *Request, s string) {
+ curUA := r.HTTPRequest.Header.Get("User-Agent")
+ if len(curUA) > 0 {
+ s = curUA + " " + s
+ }
+ r.HTTPRequest.Header.Set("User-Agent", s)
+}
+
+type temporary interface {
+ Temporary() bool
+}
+
+func shouldRetryCancel(err error) bool {
+ switch err := err.(type) {
+ case awserr.Error:
+ if err.Code() == CanceledErrorCode {
+ return false
+ }
+ return shouldRetryCancel(err.OrigErr())
+ case *url.Error:
+ if strings.Contains(err.Error(), "connection refused") {
+ // Refused connections should be retried as the service may not yet
+ // be running on the port. Go TCP dial considers refused
+ // connections as not temporary.
+ return true
+ }
+ // *url.Error only implements Temporary after golang 1.6 but since
+ // url.Error only wraps the error:
+ return shouldRetryCancel(err.Err)
+ case temporary:
+ // If the error is temporary, we want to allow continuation of the
+ // retry process
+ return err.Temporary()
+ case nil:
+ // `awserr.Error.OrigErr()` can be nil, meaning there was an error but
+ // because we don't know the cause, it is marked as retriable. See
+ // TestRequest4xxUnretryable for an example.
+ return true
+ default:
+ switch err.Error() {
+ case "net/http: request canceled",
+ "net/http: request canceled while waiting for connection":
+ // known 1.5 error case when an http request is cancelled
+ return false
+ }
+ // here we don't know the error; so we allow a retry.
+ return true
+ }
+}
+
+// SanitizeHostForHeader removes default port from host and updates request.Host
+func SanitizeHostForHeader(r *http.Request) {
+ host := getHost(r)
+ port := portOnly(host)
+ if port != "" && isDefaultPort(r.URL.Scheme, port) {
+ r.Host = stripPort(host)
+ }
+}
+
+// Returns host from request
+func getHost(r *http.Request) string {
+ if r.Host != "" {
+ return r.Host
+ }
+
+ return r.URL.Host
+}
+
+// Hostname returns u.Host, without any port number.
+//
+// If Host is an IPv6 literal with a port number, Hostname returns the
+// IPv6 literal without the square brackets. IPv6 literals may include
+// a zone identifier.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
+
+// Port returns the port part of u.Host, without the leading colon.
+// If u.Host doesn't contain a port, Port returns an empty string.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func portOnly(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return ""
+ }
+ if i := strings.Index(hostport, "]:"); i != -1 {
+ return hostport[i+len("]:"):]
+ }
+ if strings.Contains(hostport, "]") {
+ return ""
+ }
+ return hostport[colon+len(":"):]
+}
+
+// Returns true if the specified URI is using the standard port
+// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
+func isDefaultPort(scheme, port string) bool {
+ if port == "" {
+ return true
+ }
+
+ lowerCaseScheme := strings.ToLower(scheme)
+ if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
new file mode 100755
index 0000000..e36e468
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
@@ -0,0 +1,39 @@
+// +build !go1.8
+
+package request
+
+import "io"
+
+// NoBody is an io.ReadCloser with no bytes. Read always returns EOF
+// and Close always returns nil. It can be used in an outgoing client
+// request to explicitly signal that a request has zero bytes.
+// An alternative, however, is to simply set Request.Body to nil.
+//
+// Copy of Go 1.8 NoBody type from net/http/http.go
+type noBody struct{}
+
+func (noBody) Read([]byte) (int, error) { return 0, io.EOF }
+func (noBody) Close() error { return nil }
+func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
+
+// NoBody is an empty reader that will trigger the Go HTTP client to not include
+// and body in the HTTP request.
+var NoBody = noBody{}
+
+// ResetBody rewinds the request body back to its starting position, and
+// sets the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+//
+// ResetBody will automatically be called by the SDK's build handler, but if
+// the request is being used directly ResetBody must be called before the request
+// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
+// call ResetBody.
+func (r *Request) ResetBody() {
+ body, err := r.getNextRequestBody()
+ if err != nil {
+ r.Error = err
+ return
+ }
+
+ r.HTTPRequest.Body = body
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
new file mode 100755
index 0000000..7c6a800
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
@@ -0,0 +1,33 @@
+// +build go1.8
+
+package request
+
+import (
+ "net/http"
+)
+
+// NoBody is a http.NoBody reader instructing Go HTTP client to not include
+// and body in the HTTP request.
+var NoBody = http.NoBody
+
+// ResetBody rewinds the request body back to its starting position, and
+// sets the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+//
+// ResetBody will automatically be called by the SDK's build handler, but if
+// the request is being used directly ResetBody must be called before the request
+// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
+// call ResetBody.
+//
+// Will also set the Go 1.8's http.Request.GetBody member to allow retrying
+// PUT/POST redirects.
+func (r *Request) ResetBody() {
+ body, err := r.getNextRequestBody()
+ if err != nil {
+ r.Error = err
+ return
+ }
+
+ r.HTTPRequest.Body = body
+ r.HTTPRequest.GetBody = r.getNextRequestBody
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
new file mode 100755
index 0000000..a7365cd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
@@ -0,0 +1,14 @@
+// +build go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+ r.context = ctx
+ r.HTTPRequest = r.HTTPRequest.WithContext(ctx)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
new file mode 100755
index 0000000..307fa07
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
@@ -0,0 +1,14 @@
+// +build !go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+ r.context = ctx
+ r.HTTPRequest.Cancel = ctx.Done()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
new file mode 100755
index 0000000..a633ed5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
@@ -0,0 +1,264 @@
+package request
+
+import (
+ "reflect"
+ "sync/atomic"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// A Pagination provides paginating of SDK API operations which are paginatable.
+// Generally you should not use this type directly, but use the "Pages" API
+// operations method to automatically perform pagination for you. Such as,
+// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods.
+//
+// Pagination differs from a Paginator type in that pagination is the type that
+// does the pagination between API operations, and Paginator defines the
+// configuration that will be used per page request.
+//
+// cont := true
+// for p.Next() && cont {
+// data := p.Page().(*s3.ListObjectsOutput)
+// // process the page's data
+// }
+// return p.Err()
+//
+// See service client API operation Pages methods for examples how the SDK will
+// use the Pagination type.
+type Pagination struct {
+ // Function to return a Request value for each pagination request.
+ // Any configuration or handlers that need to be applied to the request
+ // prior to getting the next page should be done here before the request
+ // returned.
+ //
+ // NewRequest should always be built from the same API operations. It is
+ // undefined if different API operations are returned on subsequent calls.
+ NewRequest func() (*Request, error)
+ // EndPageOnSameToken, when enabled, will allow the paginator to stop on
+ // token that are the same as its previous tokens.
+ EndPageOnSameToken bool
+
+ started bool
+ prevTokens []interface{}
+ nextTokens []interface{}
+
+ err error
+ curPage interface{}
+}
+
+// HasNextPage will return true if Pagination is able to determine that the API
+// operation has additional pages. False will be returned if there are no more
+// pages remaining.
+//
+// Will always return true if Next has not been called yet.
+func (p *Pagination) HasNextPage() bool {
+ if !p.started {
+ return true
+ }
+
+ hasNextPage := len(p.nextTokens) != 0
+ if p.EndPageOnSameToken {
+ return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens)
+ }
+ return hasNextPage
+}
+
+// Err returns the error Pagination encountered when retrieving the next page.
+func (p *Pagination) Err() error {
+ return p.err
+}
+
+// Page returns the current page. Page should only be called after a successful
+// call to Next. It is undefined what Page will return if Page is called after
+// Next returns false.
+func (p *Pagination) Page() interface{} {
+ return p.curPage
+}
+
+// Next will attempt to retrieve the next page for the API operation. When a page
+// is retrieved true will be returned. If the page cannot be retrieved, or there
+// are no more pages false will be returned.
+//
+// Use the Page method to retrieve the current page data. The data will need
+// to be cast to the API operation's output type.
+//
+// Use the Err method to determine if an error occurred if Page returns false.
+func (p *Pagination) Next() bool {
+ if !p.HasNextPage() {
+ return false
+ }
+
+ req, err := p.NewRequest()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ if p.started {
+ for i, intok := range req.Operation.InputTokens {
+ awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i])
+ }
+ }
+ p.started = true
+
+ err = req.Send()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ p.prevTokens = p.nextTokens
+ p.nextTokens = req.nextPageTokens()
+ p.curPage = req.Data
+
+ return true
+}
+
+// A Paginator is the configuration data that defines how an API operation
+// should be paginated. This type is used by the API service models to define
+// the generated pagination config for service APIs.
+//
+// The Pagination type is what provides iterating between pages of an API. It
+// is only used to store the token metadata the SDK should use for performing
+// pagination.
+type Paginator struct {
+ InputTokens []string
+ OutputTokens []string
+ LimitToken string
+ TruncationToken string
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of data.
+func (r *Request) nextPageTokens() []interface{} {
+ if r.Operation.Paginator == nil {
+ return nil
+ }
+ if r.Operation.TruncationToken != "" {
+ tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
+ if len(tr) == 0 {
+ return nil
+ }
+
+ switch v := tr[0].(type) {
+ case *bool:
+ if !aws.BoolValue(v) {
+ return nil
+ }
+ case bool:
+ if v == false {
+ return nil
+ }
+ }
+ }
+
+ tokens := []interface{}{}
+ tokenAdded := false
+ for _, outToken := range r.Operation.OutputTokens {
+ vs, _ := awsutil.ValuesAtPath(r.Data, outToken)
+ if len(vs) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ v := vs[0]
+
+ switch tv := v.(type) {
+ case *string:
+ if len(aws.StringValue(tv)) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ case string:
+ if len(tv) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ }
+
+ tokenAdded = true
+ tokens = append(tokens, v)
+ }
+ if !tokenAdded {
+ return nil
+ }
+
+ return tokens
+}
+
+// Ensure a deprecated item is only logged once instead of each time its used.
+func logDeprecatedf(logger aws.Logger, flag *int32, msg string) {
+ if logger == nil {
+ return
+ }
+ if atomic.CompareAndSwapInt32(flag, 0, 1) {
+ logger.Log(msg)
+ }
+}
+
+var (
+ logDeprecatedHasNextPage int32
+ logDeprecatedNextPage int32
+ logDeprecatedEachPage int32
+)
+
+// HasNextPage returns true if this request has more pages of data available.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) HasNextPage() bool {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage,
+ "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ return len(r.nextPageTokens()) > 0
+}
+
+// NextPage returns a new Request that can be executed to return the next
+// page of result data. Call .Send() on this request to execute it.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) NextPage() *Request {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage,
+ "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ tokens := r.nextPageTokens()
+ if len(tokens) == 0 {
+ return nil
+ }
+
+ data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
+ nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
+ for i, intok := range nr.Operation.InputTokens {
+ awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
+ }
+ return nr
+}
+
+// EachPage iterates over each page of a paginated request object. The fn
+// parameter should be a function with the following sample signature:
+//
+// func(page *T, lastPage bool) bool {
+// return true // return false to stop iterating
+// }
+//
+// Where "T" is the structure type matching the output structure of the given
+// operation. For example, a request object generated by
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
+// as the structure "T". The lastPage value represents whether the page is
+// the last page of data or not. The return value of this function should
+// return true to keep iterating or false to stop.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage,
+ "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ for page := r; page != nil; page = page.NextPage() {
+ if err := page.Send(); err != nil {
+ return err
+ }
+ if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
+ return page.Error
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
new file mode 100755
index 0000000..d0aa54c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -0,0 +1,163 @@
+package request
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Retryer is an interface to control retry logic for a given service.
+// The default implementation used by most services is the client.DefaultRetryer
+// structure, which contains basic retry logic using exponential backoff.
+type Retryer interface {
+ RetryRules(*Request) time.Duration
+ ShouldRetry(*Request) bool
+ MaxRetries() int
+}
+
+// WithRetryer sets a config Retryer value to the given Config returning it
+// for chaining.
+func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
+ cfg.Retryer = retryer
+ return cfg
+}
+
+// retryableCodes is a collection of service response codes which are retry-able
+// without any further action.
+var retryableCodes = map[string]struct{}{
+ "RequestError": {},
+ "RequestTimeout": {},
+ ErrCodeResponseTimeout: {},
+ "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
+}
+
+var throttleCodes = map[string]struct{}{
+ "ProvisionedThroughputExceededException": {},
+ "Throttling": {},
+ "ThrottlingException": {},
+ "RequestLimitExceeded": {},
+ "RequestThrottled": {},
+ "RequestThrottledException": {},
+ "TooManyRequestsException": {}, // Lambda functions
+ "PriorRequestNotComplete": {}, // Route53
+ "TransactionInProgressException": {},
+}
+
+// credsExpiredCodes is a collection of error codes which signify the credentials
+// need to be refreshed. Expired tokens require refreshing of credentials, and
+// resigning before the request can be retried.
+var credsExpiredCodes = map[string]struct{}{
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ "RequestExpired": {}, // EC2 Only
+}
+
+func isCodeThrottle(code string) bool {
+ _, ok := throttleCodes[code]
+ return ok
+}
+
+func isCodeRetryable(code string) bool {
+ if _, ok := retryableCodes[code]; ok {
+ return true
+ }
+
+ return isCodeExpiredCreds(code)
+}
+
+func isCodeExpiredCreds(code string) bool {
+ _, ok := credsExpiredCodes[code]
+ return ok
+}
+
+var validParentCodes = map[string]struct{}{
+ ErrCodeSerialization: {},
+ ErrCodeRead: {},
+}
+
+type temporaryError interface {
+ Temporary() bool
+}
+
+func isNestedErrorRetryable(parentErr awserr.Error) bool {
+ if parentErr == nil {
+ return false
+ }
+
+ if _, ok := validParentCodes[parentErr.Code()]; !ok {
+ return false
+ }
+
+ err := parentErr.OrigErr()
+ if err == nil {
+ return false
+ }
+
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code())
+ }
+
+ if t, ok := err.(temporaryError); ok {
+ return t.Temporary() || isErrConnectionReset(err)
+ }
+
+ return isErrConnectionReset(err)
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if error is nil.
+func IsErrorRetryable(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr)
+ }
+ }
+ return false
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if error is nil.
+func IsErrorThrottle(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeThrottle(aerr.Code())
+ }
+ }
+ return false
+}
+
+// IsErrorExpiredCreds returns whether the error code is a credential expiry error.
+// Returns false if error is nil.
+func IsErrorExpiredCreds(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeExpiredCreds(aerr.Code())
+ }
+ }
+ return false
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorRetryable
+func (r *Request) IsErrorRetryable() bool {
+ return IsErrorRetryable(r.Error)
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if the request has no Error set
+//
+// Alias for the utility function IsErrorThrottle
+func (r *Request) IsErrorThrottle() bool {
+ return IsErrorThrottle(r.Error)
+}
+
+// IsErrorExpired returns whether the error code is a credential expiry error.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorExpiredCreds
+func (r *Request) IsErrorExpired() bool {
+ return IsErrorExpiredCreds(r.Error)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
new file mode 100755
index 0000000..09a44eb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
@@ -0,0 +1,94 @@
+package request
+
+import (
+ "io"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var timeoutErr = awserr.New(
+ ErrCodeResponseTimeout,
+ "read on body has reached the timeout limit",
+ nil,
+)
+
+type readResult struct {
+ n int
+ err error
+}
+
+// timeoutReadCloser will handle body reads that take too long.
+// We will return a ErrReadTimeout error if a timeout occurs.
+type timeoutReadCloser struct {
+ reader io.ReadCloser
+ duration time.Duration
+}
+
+// Read will spin off a goroutine to call the reader's Read method. We will
+// select on the timer's channel or the read's channel. Whoever completes first
+// will be returned.
+func (r *timeoutReadCloser) Read(b []byte) (int, error) {
+ timer := time.NewTimer(r.duration)
+ c := make(chan readResult, 1)
+
+ go func() {
+ n, err := r.reader.Read(b)
+ timer.Stop()
+ c <- readResult{n: n, err: err}
+ }()
+
+ select {
+ case data := <-c:
+ return data.n, data.err
+ case <-timer.C:
+ return 0, timeoutErr
+ }
+}
+
+func (r *timeoutReadCloser) Close() error {
+ return r.reader.Close()
+}
+
+const (
+ // HandlerResponseTimeout is what we use to signify the name of the
+ // response timeout handler.
+ HandlerResponseTimeout = "ResponseTimeoutHandler"
+)
+
+// adaptToResponseTimeoutError is a handler that will replace any top level error
+// to a ErrCodeResponseTimeout, if its child is that.
+func adaptToResponseTimeoutError(req *Request) {
+ if err, ok := req.Error.(awserr.Error); ok {
+ aerr, ok := err.OrigErr().(awserr.Error)
+ if ok && aerr.Code() == ErrCodeResponseTimeout {
+ req.Error = aerr
+ }
+ }
+}
+
+// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer.
+// This will allow for per read timeouts. If a timeout occurred, we will return the
+// ErrCodeResponseTimeout.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second)
+func WithResponseReadTimeout(duration time.Duration) Option {
+ return func(r *Request) {
+
+ var timeoutHandler = NamedHandler{
+ HandlerResponseTimeout,
+ func(req *Request) {
+ req.HTTPResponse.Body = &timeoutReadCloser{
+ reader: req.HTTPResponse.Body,
+ duration: duration,
+ }
+ }}
+
+ // remove the handler so we are not stomping over any new durations.
+ r.Handlers.Send.RemoveByName(HandlerResponseTimeout)
+ r.Handlers.Send.PushBackNamed(timeoutHandler)
+
+ r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError)
+ r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
new file mode 100755
index 0000000..8630683
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
@@ -0,0 +1,286 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const (
+ // InvalidParameterErrCode is the error code for invalid parameters errors
+ InvalidParameterErrCode = "InvalidParameter"
+ // ParamRequiredErrCode is the error code for required parameter errors
+ ParamRequiredErrCode = "ParamRequiredError"
+ // ParamMinValueErrCode is the error code for fields with too low of a
+ // number value.
+ ParamMinValueErrCode = "ParamMinValueError"
+ // ParamMinLenErrCode is the error code for fields without enough elements.
+ ParamMinLenErrCode = "ParamMinLenError"
+ // ParamMaxLenErrCode is the error code for value being too long.
+ ParamMaxLenErrCode = "ParamMaxLenError"
+
+ // ParamFormatErrCode is the error code for a field with invalid
+ // format or characters.
+ ParamFormatErrCode = "ParamFormatInvalidError"
+)
+
+// Validator provides a way for types to perform validation logic on their
+// input values that external code can use to determine if a type's values
+// are valid.
+type Validator interface {
+ Validate() error
+}
+
+// An ErrInvalidParams provides wrapping of invalid parameter errors found when
+// validating API operation input parameters.
+type ErrInvalidParams struct {
+ // Context is the base context of the invalid parameter group.
+ Context string
+ errs []ErrInvalidParam
+}
+
+// Add adds a new invalid parameter error to the collection of invalid
+// parameters. The context of the invalid parameter will be updated to reflect
+// this collection.
+func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
+ err.SetContext(e.Context)
+ e.errs = append(e.errs, err)
+}
+
+// AddNested adds the invalid parameter errors from another ErrInvalidParams
+// value into this collection. The nested errors will have their nested context
+// updated and base context to reflect the merging.
+//
+// Use for nested validations errors.
+func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
+ for _, err := range nested.errs {
+ err.SetContext(e.Context)
+ err.AddNestedContext(nestedCtx)
+ e.errs = append(e.errs, err)
+ }
+}
+
+// Len returns the number of invalid parameter errors
+func (e ErrInvalidParams) Len() int {
+ return len(e.errs)
+}
+
+// Code returns the code of the error
+func (e ErrInvalidParams) Code() string {
+ return InvalidParameterErrCode
+}
+
+// Message returns the message of the error
+func (e ErrInvalidParams) Message() string {
+ return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
+}
+
+// Error returns the string formatted form of the invalid parameters.
+func (e ErrInvalidParams) Error() string {
+ w := &bytes.Buffer{}
+ fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
+
+ for _, err := range e.errs {
+ fmt.Fprintf(w, "- %s\n", err.Message())
+ }
+
+ return w.String()
+}
+
+// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
+func (e ErrInvalidParams) OrigErr() error {
+ return awserr.NewBatchError(
+ InvalidParameterErrCode, e.Message(), e.OrigErrs())
+}
+
+// OrigErrs returns a slice of the invalid parameters
+func (e ErrInvalidParams) OrigErrs() []error {
+ errs := make([]error, len(e.errs))
+ for i := 0; i < len(errs); i++ {
+ errs[i] = e.errs[i]
+ }
+
+ return errs
+}
+
+// An ErrInvalidParam represents an invalid parameter error type.
+type ErrInvalidParam interface {
+ awserr.Error
+
+ // Field name the error occurred on.
+ Field() string
+
+ // SetContext updates the context of the error.
+ SetContext(string)
+
+ // AddNestedContext updates the error's context to include a nested level.
+ AddNestedContext(string)
+}
+
+type errInvalidParam struct {
+ context string
+ nestedContext string
+ field string
+ code string
+ msg string
+}
+
+// Code returns the error code for the type of invalid parameter.
+func (e *errInvalidParam) Code() string {
+ return e.code
+}
+
+// Message returns the reason the parameter was invalid, and its context.
+func (e *errInvalidParam) Message() string {
+ return fmt.Sprintf("%s, %s.", e.msg, e.Field())
+}
+
+// Error returns the string version of the invalid parameter error.
+func (e *errInvalidParam) Error() string {
+ return fmt.Sprintf("%s: %s", e.code, e.Message())
+}
+
+// OrigErr returns nil, Implemented for awserr.Error interface.
+func (e *errInvalidParam) OrigErr() error {
+ return nil
+}
+
+// Field Returns the field and context the error occurred.
+func (e *errInvalidParam) Field() string {
+ field := e.context
+ if len(field) > 0 {
+ field += "."
+ }
+ if len(e.nestedContext) > 0 {
+ field += fmt.Sprintf("%s.", e.nestedContext)
+ }
+ field += e.field
+
+ return field
+}
+
+// SetContext updates the base context of the error.
+func (e *errInvalidParam) SetContext(ctx string) {
+ e.context = ctx
+}
+
+// AddNestedContext prepends a context to the field's path.
+func (e *errInvalidParam) AddNestedContext(ctx string) {
+ if len(e.nestedContext) == 0 {
+ e.nestedContext = ctx
+ } else {
+ e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
+ }
+
+}
+
+// An ErrParamRequired represents an required parameter error.
+type ErrParamRequired struct {
+ errInvalidParam
+}
+
+// NewErrParamRequired creates a new required parameter error.
+func NewErrParamRequired(field string) *ErrParamRequired {
+ return &ErrParamRequired{
+ errInvalidParam{
+ code: ParamRequiredErrCode,
+ field: field,
+ msg: fmt.Sprintf("missing required field"),
+ },
+ }
+}
+
+// An ErrParamMinValue represents a minimum value parameter error.
+type ErrParamMinValue struct {
+ errInvalidParam
+ min float64
+}
+
+// NewErrParamMinValue creates a new minimum value parameter error.
+func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
+ return &ErrParamMinValue{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinValueErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field value of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinValue returns the field's require minimum value.
+//
+// float64 is returned for both int and float min values.
+func (e *ErrParamMinValue) MinValue() float64 {
+ return e.min
+}
+
+// An ErrParamMinLen represents a minimum length parameter error.
+type ErrParamMinLen struct {
+ errInvalidParam
+ min int
+}
+
+// NewErrParamMinLen creates a new minimum length parameter error.
+func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
+ return &ErrParamMinLen{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinLenErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field size of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinLen returns the field's required minimum length.
+func (e *ErrParamMinLen) MinLen() int {
+ return e.min
+}
+
+// An ErrParamMaxLen represents a maximum length parameter error.
+type ErrParamMaxLen struct {
+ errInvalidParam
+ max int
+}
+
+// NewErrParamMaxLen creates a new maximum length parameter error.
+func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen {
+ return &ErrParamMaxLen{
+ errInvalidParam: errInvalidParam{
+ code: ParamMaxLenErrCode,
+ field: field,
+ msg: fmt.Sprintf("maximum size of %v, %v", max, value),
+ },
+ max: max,
+ }
+}
+
+// MaxLen returns the field's required minimum length.
+func (e *ErrParamMaxLen) MaxLen() int {
+ return e.max
+}
+
+// An ErrParamFormat represents a invalid format parameter error.
+type ErrParamFormat struct {
+ errInvalidParam
+ format string
+}
+
+// NewErrParamFormat creates a new invalid format parameter error.
+func NewErrParamFormat(field string, format, value string) *ErrParamFormat {
+ return &ErrParamFormat{
+ errInvalidParam: errInvalidParam{
+ code: ParamFormatErrCode,
+ field: field,
+ msg: fmt.Sprintf("format %v, %v", format, value),
+ },
+ format: format,
+ }
+}
+
+// Format returns the field's required format.
+func (e *ErrParamFormat) Format() string {
+ return e.format
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
new file mode 100755
index 0000000..4601f88
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
@@ -0,0 +1,295 @@
+package request
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when
+// the waiter's max attempts have been exhausted.
+const WaiterResourceNotReadyErrorCode = "ResourceNotReady"
+
+// A WaiterOption is a function that will update the Waiter value's fields to
+// configure the waiter.
+type WaiterOption func(*Waiter)
+
+// WithWaiterMaxAttempts returns the maximum number of times the waiter should
+// attempt to check the resource for the target state.
+func WithWaiterMaxAttempts(max int) WaiterOption {
+ return func(w *Waiter) {
+ w.MaxAttempts = max
+ }
+}
+
+// WaiterDelay will return a delay the waiter should pause between attempts to
+// check the resource state. The passed in attempt is the number of times the
+// Waiter has checked the resource state.
+//
+// Attempt is the number of attempts the Waiter has made checking the resource
+// state.
+type WaiterDelay func(attempt int) time.Duration
+
+// ConstantWaiterDelay returns a WaiterDelay that will always return a constant
+// delay the waiter should use between attempts. It ignores the number of
+// attempts made.
+func ConstantWaiterDelay(delay time.Duration) WaiterDelay {
+ return func(attempt int) time.Duration {
+ return delay
+ }
+}
+
+// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in.
+func WithWaiterDelay(delayer WaiterDelay) WaiterOption {
+ return func(w *Waiter) {
+ w.Delay = delayer
+ }
+}
+
+// WithWaiterLogger returns a waiter option to set the logger a waiter
+// should use to log warnings and errors to.
+func WithWaiterLogger(logger aws.Logger) WaiterOption {
+ return func(w *Waiter) {
+ w.Logger = logger
+ }
+}
+
+// WithWaiterRequestOptions returns a waiter option setting the request
+// options for each request the waiter makes. Appends to waiter's request
+// options already set.
+func WithWaiterRequestOptions(opts ...Option) WaiterOption {
+ return func(w *Waiter) {
+ w.RequestOptions = append(w.RequestOptions, opts...)
+ }
+}
+
+// A Waiter provides the functionality to perform a blocking call which will
+// wait for a resource state to be satisfied by a service.
+//
+// This type should not be used directly. The API operations provided in the
+// service packages prefixed with "WaitUntil" should be used instead.
+type Waiter struct {
+ Name string
+ Acceptors []WaiterAcceptor
+ Logger aws.Logger
+
+ MaxAttempts int
+ Delay WaiterDelay
+
+ RequestOptions []Option
+ NewRequest func([]Option) (*Request, error)
+ SleepWithContext func(aws.Context, time.Duration) error
+}
+
+// ApplyOptions updates the waiter with the list of waiter options provided.
+func (w *Waiter) ApplyOptions(opts ...WaiterOption) {
+ for _, fn := range opts {
+ fn(w)
+ }
+}
+
+// WaiterState are states the waiter uses based on WaiterAcceptor definitions
+// to identify if the resource state the waiter is waiting on has occurred.
+type WaiterState int
+
+// String returns the string representation of the waiter state.
+func (s WaiterState) String() string {
+ switch s {
+ case SuccessWaiterState:
+ return "success"
+ case FailureWaiterState:
+ return "failure"
+ case RetryWaiterState:
+ return "retry"
+ default:
+ return "unknown waiter state"
+ }
+}
+
+// States the waiter acceptors will use to identify target resource states.
+const (
+ SuccessWaiterState WaiterState = iota // waiter successful
+ FailureWaiterState // waiter failed
+ RetryWaiterState // waiter needs to be retried
+)
+
+// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor
+// definition's Expected attribute.
+type WaiterMatchMode int
+
+// Modes the waiter will use when inspecting API response to identify target
+// resource states.
+const (
+ PathAllWaiterMatch WaiterMatchMode = iota // match on all paths
+ PathWaiterMatch // match on specific path
+ PathAnyWaiterMatch // match on any path
+ PathListWaiterMatch // match on list of paths
+ StatusWaiterMatch // match on status code
+ ErrorWaiterMatch // match on error
+)
+
+// String returns the string representation of the waiter match mode.
+func (m WaiterMatchMode) String() string {
+ switch m {
+ case PathAllWaiterMatch:
+ return "pathAll"
+ case PathWaiterMatch:
+ return "path"
+ case PathAnyWaiterMatch:
+ return "pathAny"
+ case PathListWaiterMatch:
+ return "pathList"
+ case StatusWaiterMatch:
+ return "status"
+ case ErrorWaiterMatch:
+ return "error"
+ default:
+ return "unknown waiter match mode"
+ }
+}
+
+// WaitWithContext will make requests for the API operation using NewRequest to
+// build API requests. The request's response will be compared against the
+// Waiter's Acceptors to determine the successful state of the resource the
+// waiter is inspecting.
+//
+// The passed in context must not be nil. If it is nil a panic will occur. The
+// Context will be used to cancel the waiter's pending requests and retry delays.
+// Use aws.BackgroundContext if no context is available.
+//
+// The waiter will continue until the target state defined by the Acceptors,
+// or the max attempts expires.
+//
+// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's
+// retryer ShouldRetry returns false. This normally will happen when the max
+// wait attempts expires.
+func (w Waiter) WaitWithContext(ctx aws.Context) error {
+
+ for attempt := 1; ; attempt++ {
+ req, err := w.NewRequest(w.RequestOptions)
+ if err != nil {
+ waiterLogf(w.Logger, "unable to create request %v", err)
+ return err
+ }
+ req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter"))
+ err = req.Send()
+
+ // See if any of the acceptors match the request's response, or error
+ for _, a := range w.Acceptors {
+ if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched {
+ return matchErr
+ }
+ }
+
+ // The Waiter should only check the resource state MaxAttempts times
+ // This is here instead of in the for loop above to prevent delaying
+ // unnecessary when the waiter will not retry.
+ if attempt == w.MaxAttempts {
+ break
+ }
+
+ // Delay to wait before inspecting the resource again
+ delay := w.Delay(attempt)
+ if sleepFn := req.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(delay)
+ } else {
+ sleepCtxFn := w.SleepWithContext
+ if sleepCtxFn == nil {
+ sleepCtxFn = aws.SleepWithContext
+ }
+
+ if err := sleepCtxFn(ctx, delay); err != nil {
+ return awserr.New(CanceledErrorCode, "waiter context canceled", err)
+ }
+ }
+ }
+
+ return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil)
+}
+
+// A WaiterAcceptor provides the information needed to wait for an API operation
+// to complete.
+type WaiterAcceptor struct {
+ State WaiterState
+ Matcher WaiterMatchMode
+ Argument string
+ Expected interface{}
+}
+
+// match returns if the acceptor found a match with the passed in request
+// or error. True is returned if the acceptor made a match, error is returned
+// if there was an error attempting to perform the match.
+func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) {
+ result := false
+ var vals []interface{}
+
+ switch a.Matcher {
+ case PathAllWaiterMatch, PathWaiterMatch:
+ // Require all matches to be equal for result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ if len(vals) == 0 {
+ break
+ }
+ result = true
+ for _, val := range vals {
+ if !awsutil.DeepEqual(val, a.Expected) {
+ result = false
+ break
+ }
+ }
+ case PathAnyWaiterMatch:
+ // Only a single match needs to equal for the result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ for _, val := range vals {
+ if awsutil.DeepEqual(val, a.Expected) {
+ result = true
+ break
+ }
+ }
+ case PathListWaiterMatch:
+ // ignored matcher
+ case StatusWaiterMatch:
+ s := a.Expected.(int)
+ result = s == req.HTTPResponse.StatusCode
+ case ErrorWaiterMatch:
+ if aerr, ok := err.(awserr.Error); ok {
+ result = aerr.Code() == a.Expected.(string)
+ }
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
+ name, a.Matcher)
+ }
+
+ if !result {
+ // If there was no matching result found there is nothing more to do
+ // for this response, retry the request.
+ return false, nil
+ }
+
+ switch a.State {
+ case SuccessWaiterState:
+ // waiter completed
+ return true, nil
+ case FailureWaiterState:
+ // Waiter failure state triggered
+ return true, awserr.New(WaiterResourceNotReadyErrorCode,
+ "failed waiting for successful resource state", err)
+ case RetryWaiterState:
+ // clear the error and retry the operation
+ return false, nil
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s",
+ name, a.State)
+ return false, nil
+ }
+}
+
+func waiterLogf(logger aws.Logger, msg string, args ...interface{}) {
+ if logger != nil {
+ logger.Log(fmt.Sprintf(msg, args...))
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
new file mode 100755
index 0000000..ea9ebb6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
@@ -0,0 +1,26 @@
+// +build go1.7
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCABundleTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
new file mode 100755
index 0000000..fec39df
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
@@ -0,0 +1,22 @@
+// +build !go1.6,go1.5
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCABundleTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
new file mode 100755
index 0000000..1c5a539
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
@@ -0,0 +1,23 @@
+// +build !go1.7,go1.6
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCABundleTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
new file mode 100755
index 0000000..38a7b05
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
@@ -0,0 +1,273 @@
+/*
+Package session provides configuration for the SDK's service clients.
+
+Sessions can be shared across all service clients that share the same base
+configuration. The Session is built from the SDK's default configuration and
+request handlers.
+
+Sessions should be cached when possible, because creating a new Session will
+load all configuration values from the environment, and config files each time
+the Session is created. Sharing the Session value across all of your service
+clients will ensure the configuration is loaded the fewest number of times possible.
+
+Concurrency
+
+Sessions are safe to use concurrently as long as the Session is not being
+modified. The SDK will not modify the Session once the Session has been created.
+Creating service clients concurrently from a shared Session is safe.
+
+Sessions from Shared Config
+
+Sessions can be created using the method above that will only load the
+additional config if the AWS_SDK_LOAD_CONFIG environment variable is set.
+Alternatively you can explicitly create a Session with shared config enabled.
+To do this you can use NewSessionWithOptions to configure how the Session will
+be created. Using the NewSessionWithOptions with SharedConfigState set to
+SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG
+environment variable was set.
+
+Creating Sessions
+
+When creating Sessions optional aws.Config values can be passed in that will
+override the default, or loaded config values the Session is being created
+with. This allows you to provide additional, or case based, configuration
+as needed.
+
+By default NewSession will only load credentials from the shared credentials
+file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
+set to a truthy value the Session will be created from the configuration
+values from the shared config (~/.aws/config) and shared credentials
+(~/.aws/credentials) files. See the section Sessions from Shared Config for
+more information.
+
+Create a Session with the default config and request handlers. With credentials
+region, and profile loaded from the environment and shared config automatically.
+Requires the AWS_PROFILE to be set, or "default" is used.
+
+ // Create Session
+ sess := session.Must(session.NewSession())
+
+ // Create a Session with a custom region
+ sess := session.Must(session.NewSession(&aws.Config{
+ Region: aws.String("us-east-1"),
+ }))
+
+ // Create a S3 client instance from a session
+ sess := session.Must(session.NewSession())
+
+ svc := s3.New(sess)
+
+Create Session With Option Overrides
+
+In addition to NewSession, Sessions can be created using NewSessionWithOptions.
+This func allows you to control and override how the Session will be created
+through code instead of being driven by environment variables only.
+
+Use NewSessionWithOptions when you want to provide the config profile, or
+override the shared config state (AWS_SDK_LOAD_CONFIG).
+
+ // Equivalent to session.NewSession()
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ // Options
+ }))
+
+ // Specify profile to load for the session's config
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ Profile: "profile_name",
+ }))
+
+ // Specify profile for config and region for requests
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{Region: aws.String("us-east-1")},
+ Profile: "profile_name",
+ }))
+
+ // Force enable Shared Config support
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ SharedConfigState: session.SharedConfigEnable,
+ }))
+
+Adding Handlers
+
+You can add handlers to a session for processing HTTP requests. All service
+clients that use the session inherit the handlers. For example, the following
+handler logs every request and its payload made by a service client:
+
+ // Create a session, and add additional handlers for all service
+ // clients created with the Session to inherit. Adds logging handler.
+ sess := session.Must(session.NewSession())
+
+ sess.Handlers.Send.PushFront(func(r *request.Request) {
+ // Log every request made and its payload
+ logger.Printf("Request: %s/%s, Payload: %s",
+ r.ClientInfo.ServiceName, r.Operation, r.Params)
+ })
+
+Deprecated "New" function
+
+The New session function has been deprecated because it does not provide good
+way to return errors that occur when loading the configuration files and values.
+Because of this, NewSession was created so errors can be retrieved when
+creating a session fails.
+
+Shared Config Fields
+
+By default the SDK will only load the shared credentials file's (~/.aws/credentials)
+credentials values, and all other config is provided by the environment variables,
+SDK defaults, and user provided aws.Config values.
+
+If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
+option is used to create the Session the full shared config values will be
+loaded. This includes credentials, region, and support for assume role. In
+addition the Session will load its configuration from both the shared config
+file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
+files have the same format.
+
+If both config files are present the configuration from both files will be
+read. The Session will be created from configuration values from the shared
+credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config).
+
+Credentials are the values the SDK should use for authenticating requests with
+AWS Services. They are from a configuration file will need to include both
+aws_access_key_id and aws_secret_access_key must be provided together in the
+same file to be considered valid. The values will be ignored if not a complete
+group. aws_session_token is an optional field that can be provided if both of
+the other two fields are also provided.
+
+ aws_access_key_id = AKID
+ aws_secret_access_key = SECRET
+ aws_session_token = TOKEN
+
+Assume Role values allow you to configure the SDK to assume an IAM role using
+a set of credentials provided in a config file via the source_profile field.
+Both "role_arn" and "source_profile" are required. The SDK supports assuming
+a role with MFA token if the session option AssumeRoleTokenProvider
+is set.
+
+ role_arn = arn:aws:iam:::role/
+ source_profile = profile_with_creds
+ external_id = 1234
+ mfa_serial =
+ role_session_name = session_name
+
+Region is the region the SDK should use for looking up AWS service endpoints
+and signing requests.
+
+ region = us-east-1
+
+Assume Role with MFA token
+
+To create a session with support for assuming an IAM role with MFA set the
+session option AssumeRoleTokenProvider to a function that will prompt for the
+MFA token code when the SDK assumes the role and refreshes the role's credentials.
+This allows you to configure the SDK via the shared config to assumea role
+with MFA tokens.
+
+In order for the SDK to assume a role with MFA the SharedConfigState
+session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG
+environment variable set.
+
+The shared configuration instructs the SDK to assume an IAM role with MFA
+when the mfa_serial configuration field is set in the shared config
+(~/.aws/config) or shared credentials (~/.aws/credentials) file.
+
+If mfa_serial is set in the configuration, the SDK will assume the role, and
+the AssumeRoleTokenProvider session option is not set an an error will
+be returned when creating the session.
+
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ AssumeRoleTokenProvider: stscreds.StdinTokenProvider,
+ }))
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess)
+
+To setup assume role outside of a session see the stscreds.AssumeRoleProvider
+documentation.
+
+Environment Variables
+
+When a Session is created several environment variables can be set to adjust
+how the SDK functions, and what configuration data it loads when creating
+Sessions. All environment values are optional, but some values like credentials
+require multiple of the values to set or the partial values will be ignored.
+All environment variable values are strings unless otherwise noted.
+
+Environment configuration values. If set both Access Key ID and Secret Access
+Key must be provided. Session Token and optionally also be provided, but is
+not required.
+
+ # Access Key ID
+ AWS_ACCESS_KEY_ID=AKID
+ AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+
+ # Secret Access Key
+ AWS_SECRET_ACCESS_KEY=SECRET
+ AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+
+ # Session Token
+ AWS_SESSION_TOKEN=TOKEN
+
+Region value will instruct the SDK where to make service API requests to. If is
+not provided in the environment the region must be provided before a service
+client request is made.
+
+ AWS_REGION=us-east-1
+
+ # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+ # and AWS_REGION is not also set.
+ AWS_DEFAULT_REGION=us-east-1
+
+Profile name the SDK should load use when loading shared config from the
+configuration files. If not provided "default" will be used as the profile name.
+
+ AWS_PROFILE=my_profile
+
+ # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+ # and AWS_PROFILE is not also set.
+ AWS_DEFAULT_PROFILE=my_profile
+
+SDK load config instructs the SDK to load the shared config in addition to
+shared credentials. This also expands the configuration loaded so the shared
+credentials will have parity with the shared config file. This also enables
+Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+env values as well.
+
+ AWS_SDK_LOAD_CONFIG=1
+
+Shared credentials file path can be set to instruct the SDK to use an alternative
+file for the shared credentials. If not set the file will be loaded from
+$HOME/.aws/credentials on Linux/Unix based systems, and
+%USERPROFILE%\.aws\credentials on Windows.
+
+ AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+
+Shared config file path can be set to instruct the SDK to use an alternative
+file for the shared config. If not set the file will be loaded from
+$HOME/.aws/config on Linux/Unix based systems, and
+%USERPROFILE%\.aws\config on Windows.
+
+ AWS_CONFIG_FILE=$HOME/my_shared_config
+
+Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
+will use instead of the default system's root CA bundle. Use this only
+if you want to replace the CA bundle the SDK uses for TLS requests.
+
+ AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+
+Enabling this option will attempt to merge the Transport into the SDK's HTTP
+client. If the client's Transport is not a http.Transport an error will be
+returned. If the Transport's TLS config is set this option will cause the SDK
+to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file
+contains multiple certificates all of them will be loaded.
+
+The Session option CustomCABundle is also available when creating sessions
+to also enable this feature. CustomCABundle session option field has priority
+over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+
+Setting a custom HTTPClient in the aws.Config options will override this setting.
+To use this option and custom HTTP client, the HTTP client needs to be provided
+when creating the session. Not the service client.
+*/
+package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
new file mode 100755
index 0000000..e3959b9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -0,0 +1,236 @@
+package session
+
+import (
+ "os"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+)
+
+// EnvProviderName provides a name of the provider when config is loaded from environment.
+const EnvProviderName = "EnvConfigCredentials"
+
+// envConfig is a collection of environment values the SDK will read
+// setup config from. All environment values are optional. But some values
+// such as credentials require multiple values to be complete or the values
+// will be ignored.
+type envConfig struct {
+ // Environment configuration values. If set both Access Key ID and Secret Access
+ // Key must be provided. Session Token and optionally also be provided, but is
+ // not required.
+ //
+ // # Access Key ID
+ // AWS_ACCESS_KEY_ID=AKID
+ // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+ //
+ // # Secret Access Key
+ // AWS_SECRET_ACCESS_KEY=SECRET
+ // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+ //
+ // # Session Token
+ // AWS_SESSION_TOKEN=TOKEN
+ Creds credentials.Value
+
+ // Region value will instruct the SDK where to make service API requests to. If is
+ // not provided in the environment the region must be provided before a service
+ // client request is made.
+ //
+ // AWS_REGION=us-east-1
+ //
+ // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+ // # and AWS_REGION is not also set.
+ // AWS_DEFAULT_REGION=us-east-1
+ Region string
+
+ // Profile name the SDK should load use when loading shared configuration from the
+ // shared configuration files. If not provided "default" will be used as the
+ // profile name.
+ //
+ // AWS_PROFILE=my_profile
+ //
+ // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+ // # and AWS_PROFILE is not also set.
+ // AWS_DEFAULT_PROFILE=my_profile
+ Profile string
+
+ // SDK load config instructs the SDK to load the shared config in addition to
+ // shared credentials. This also expands the configuration loaded from the shared
+ // credentials to have parity with the shared config file. This also enables
+ // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+ // env values as well.
+ //
+ // AWS_SDK_LOAD_CONFIG=1
+ EnableSharedConfig bool
+
+ // Shared credentials file path can be set to instruct the SDK to use an alternate
+ // file for the shared credentials. If not set the file will be loaded from
+ // $HOME/.aws/credentials on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\credentials on Windows.
+ //
+ // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+ SharedCredentialsFile string
+
+ // Shared config file path can be set to instruct the SDK to use an alternate
+ // file for the shared config. If not set the file will be loaded from
+ // $HOME/.aws/config on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\config on Windows.
+ //
+ // AWS_CONFIG_FILE=$HOME/my_shared_config
+ SharedConfigFile string
+
+ // Sets the path to a custom Credentials Authority (CA) Bundle PEM file
+ // that the SDK will use instead of the system's root CA bundle.
+ // Only use this if you want to configure the SDK to use a custom set
+ // of CAs.
+ //
+ // Enabling this option will attempt to merge the Transport
+ // into the SDK's HTTP client. If the client's Transport is
+ // not a http.Transport an error will be returned. If the
+ // Transport's TLS config is set this option will cause the
+ // SDK to overwrite the Transport's TLS config's RootCAs value.
+ //
+ // Setting a custom HTTPClient in the aws.Config options will override this setting.
+ // To use this option and custom HTTP client, the HTTP client needs to be provided
+ // when creating the session. Not the service client.
+ //
+ // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+ CustomCABundle string
+
+ csmEnabled string
+ CSMEnabled bool
+ CSMPort string
+ CSMClientID string
+
+ enableEndpointDiscovery string
+ // Enables endpoint discovery via environment variables.
+ //
+ // AWS_ENABLE_ENDPOINT_DISCOVERY=true
+ EnableEndpointDiscovery *bool
+}
+
+var (
+ csmEnabledEnvKey = []string{
+ "AWS_CSM_ENABLED",
+ }
+ csmPortEnvKey = []string{
+ "AWS_CSM_PORT",
+ }
+ csmClientIDEnvKey = []string{
+ "AWS_CSM_CLIENT_ID",
+ }
+ credAccessEnvKey = []string{
+ "AWS_ACCESS_KEY_ID",
+ "AWS_ACCESS_KEY",
+ }
+ credSecretEnvKey = []string{
+ "AWS_SECRET_ACCESS_KEY",
+ "AWS_SECRET_KEY",
+ }
+ credSessionEnvKey = []string{
+ "AWS_SESSION_TOKEN",
+ }
+
+ enableEndpointDiscoveryEnvKey = []string{
+ "AWS_ENABLE_ENDPOINT_DISCOVERY",
+ }
+
+ regionEnvKeys = []string{
+ "AWS_REGION",
+ "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
+ }
+ profileEnvKeys = []string{
+ "AWS_PROFILE",
+ "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
+ }
+ sharedCredsFileEnvKey = []string{
+ "AWS_SHARED_CREDENTIALS_FILE",
+ }
+ sharedConfigFileEnvKey = []string{
+ "AWS_CONFIG_FILE",
+ }
+)
+
+// loadEnvConfig retrieves the SDK's environment configuration.
+// See `envConfig` for the values that will be retrieved.
+//
+// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
+// the shared SDK config will be loaded in addition to the SDK's specific
+// configuration values.
+func loadEnvConfig() envConfig {
+ enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
+ return envConfigLoad(enableSharedConfig)
+}
+
+// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
+// SDK shared config. See `envConfig` for the values that will be retrieved.
+//
+// Loads the shared configuration in addition to the SDK's specific configuration.
+// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
+// environment variable is set.
+func loadSharedEnvConfig() envConfig {
+ return envConfigLoad(true)
+}
+
+func envConfigLoad(enableSharedConfig bool) envConfig {
+ cfg := envConfig{}
+
+ cfg.EnableSharedConfig = enableSharedConfig
+
+ setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey)
+ setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
+ setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
+
+ // CSM environment variables
+ setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey)
+ setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
+ setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
+ cfg.CSMEnabled = len(cfg.csmEnabled) > 0
+
+ // Require logical grouping of credentials
+ if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
+ cfg.Creds = credentials.Value{}
+ } else {
+ cfg.Creds.ProviderName = EnvProviderName
+ }
+
+ regionKeys := regionEnvKeys
+ profileKeys := profileEnvKeys
+ if !cfg.EnableSharedConfig {
+ regionKeys = regionKeys[:1]
+ profileKeys = profileKeys[:1]
+ }
+
+ setFromEnvVal(&cfg.Region, regionKeys)
+ setFromEnvVal(&cfg.Profile, profileKeys)
+
+ // endpoint discovery is in reference to it being enabled.
+ setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey)
+ if len(cfg.enableEndpointDiscovery) > 0 {
+ cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false")
+ }
+
+ setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
+ setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)
+
+ if len(cfg.SharedCredentialsFile) == 0 {
+ cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
+ }
+ if len(cfg.SharedConfigFile) == 0 {
+ cfg.SharedConfigFile = defaults.SharedConfigFilename()
+ }
+
+ cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
+
+ return cfg
+}
+
+func setFromEnvVal(dst *string, keys []string) {
+ for _, k := range keys {
+ if v := os.Getenv(k); len(v) > 0 {
+ *dst = v
+ break
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
new file mode 100755
index 0000000..be4b5f0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -0,0 +1,719 @@
+package session
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/processcreds"
+ "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
+ "github.com/aws/aws-sdk-go/aws/csm"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+const (
+ // ErrCodeSharedConfig represents an error that occurs in the shared
+ // configuration logic
+ ErrCodeSharedConfig = "SharedConfigErr"
+)
+
+// ErrSharedConfigSourceCollision will be returned if a section contains both
+// source_profile and credential_source
+var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil)
+
+// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
+// variables are empty and Environment was set as the credential source
+var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil)
+
+// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided
+var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil)
+
+// A Session provides a central location to create service clients from and
+// store configurations and request handlers for those services.
+//
+// Sessions are safe to create service clients concurrently, but it is not safe
+// to mutate the Session concurrently.
+//
+// The Session satisfies the service client's client.ConfigProvider.
+type Session struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// New creates a new instance of the handlers merging in the provided configs
+// on top of the SDK's default configurations. Once the Session is created it
+// can be mutated to modify the Config or Handlers. The Session is safe to be
+// read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
+// method could now encounter an error when loading the configuration. When
+// The environment variable is set, and an error occurs, New will return a
+// session that will fail all requests reporting the error that occurred while
+// loading the session. Use NewSession to get the error when creating the
+// session.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded, in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file.
+//
+// Deprecated: Use NewSession functions to create sessions instead. NewSession
+// has the same functionality as New except an error can be returned when the
+// func is called instead of waiting to receive an error until a request is made.
+func New(cfgs ...*aws.Config) *Session {
+ // load initial config from environment
+ envCfg := loadEnvConfig()
+
+ if envCfg.EnableSharedConfig {
+ var cfg aws.Config
+ cfg.MergeIn(cfgs...)
+ s, err := NewSessionWithOptions(Options{
+ Config: cfg,
+ SharedConfigState: SharedConfigEnable,
+ })
+ if err != nil {
+ // Old session.New expected all errors to be discovered when
+ // a request is made, and would report the errors then. This
+ // needs to be replicated if an error occurs while creating
+ // the session.
+ msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
+ "Use session.NewSession to handle errors occurring during session creation."
+
+ // Session creation failed, need to report the error and prevent
+ // any requests from succeeding.
+ s = &Session{Config: defaults.Config()}
+ s.Config.MergeIn(cfgs...)
+ s.Config.Logger.Log("ERROR:", msg, "Error:", err)
+ s.Handlers.Validate.PushBack(func(r *request.Request) {
+ r.Error = err
+ })
+ }
+
+ return s
+ }
+
+ s := deprecatedNewSession(cfgs...)
+ if envCfg.CSMEnabled {
+ enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
+ }
+
+ return s
+}
+
+// NewSession returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. Once the Session is created
+// it can be mutated to modify the Config or Handlers. The Session is safe to
+// be read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+// See the NewSessionWithOptions func for information on how to override or
+// control through code how the Session will be created. Such as specifying the
+// config profile, and controlling if shared config is enabled or not.
+func NewSession(cfgs ...*aws.Config) (*Session, error) {
+ opts := Options{}
+ opts.Config.MergeIn(cfgs...)
+
+ return NewSessionWithOptions(opts)
+}
+
+// SharedConfigState provides the ability to optionally override the state
+// of the session's creation based on the shared config being enabled or
+// disabled.
+type SharedConfigState int
+
+const (
+ // SharedConfigStateFromEnv does not override any state of the
+ // AWS_SDK_LOAD_CONFIG env var. It is the default value of the
+ // SharedConfigState type.
+ SharedConfigStateFromEnv SharedConfigState = iota
+
+ // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
+ // and disables the shared config functionality.
+ SharedConfigDisable
+
+ // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
+ // and enables the shared config functionality.
+ SharedConfigEnable
+)
+
+// Options provides the means to control how a Session is created and what
+// configuration values will be loaded.
+//
+type Options struct {
+ // Provides config values for the SDK to use when creating service clients
+ // and making API requests to services. Any value set in with this field
+ // will override the associated value provided by the SDK defaults,
+ // environment or config files where relevant.
+ //
+ // If not set, configuration values from from SDK defaults, environment,
+ // config will be used.
+ Config aws.Config
+
+ // Overrides the config profile the Session should be created from. If not
+ // set the value of the environment variable will be loaded (AWS_PROFILE,
+ // or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
+ //
+ // If not set and environment variables are not set the "default"
+ // (DefaultSharedConfigProfile) will be used as the profile to load the
+ // session config from.
+ Profile string
+
+ // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
+ // environment variable. By default a Session will be created using the
+ // value provided by the AWS_SDK_LOAD_CONFIG environment variable.
+ //
+ // Setting this value to SharedConfigEnable or SharedConfigDisable
+ // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
+ // and enable or disable the shared config functionality.
+ SharedConfigState SharedConfigState
+
+ // Ordered list of files the session will load configuration from.
+ // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE.
+ SharedConfigFiles []string
+
+ // When the SDK's shared config is configured to assume a role with MFA
+ // this option is required in order to provide the mechanism that will
+ // retrieve the MFA token. There is no default value for this field. If
+ // it is not set an error will be returned when creating the session.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed. Within the context of service clients
+ // all sharing the same session the SDK will ensure calls to the token
+ // provider are atomic. When sharing a token provider across multiple
+ // sessions additional synchronization logic is needed to ensure the
+ // token providers do not introduce race conditions. It is recommend to
+ // share the session where possible.
+ //
+ // stscreds.StdinTokenProvider is a basic implementation that will prompt
+ // from stdin for the MFA token code.
+ //
+ // This field is only used if the shared configuration is enabled, and
+ // the config enables assume role wit MFA via the mfa_serial field.
+ AssumeRoleTokenProvider func() (string, error)
+
+ // Reader for a custom Credentials Authority (CA) bundle in PEM format that
+ // the SDK will use instead of the default system's root CA bundle. Use this
+ // only if you want to replace the CA bundle the SDK uses for TLS requests.
+ //
+ // Enabling this option will attempt to merge the Transport into the SDK's HTTP
+ // client. If the client's Transport is not a http.Transport an error will be
+ // returned. If the Transport's TLS config is set this option will cause the SDK
+ // to overwrite the Transport's TLS config's RootCAs value. If the CA
+ // bundle reader contains multiple certificates all of them will be loaded.
+ //
+ // The Session option CustomCABundle is also available when creating sessions
+ // to also enable this feature. CustomCABundle session option field has priority
+ // over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+ CustomCABundle io.Reader
+}
+
+// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. This func uses the Options
+// values to configure how the Session is created.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+// // Equivalent to session.New
+// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
+//
+// // Specify profile to load for the session's config
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Profile: "profile_name",
+// }))
+//
+// // Specify profile for config and region for requests
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Config: aws.Config{Region: aws.String("us-east-1")},
+// Profile: "profile_name",
+// }))
+//
+// // Force enable Shared Config support
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// SharedConfigState: session.SharedConfigEnable,
+// }))
+func NewSessionWithOptions(opts Options) (*Session, error) {
+ var envCfg envConfig
+ if opts.SharedConfigState == SharedConfigEnable {
+ envCfg = loadSharedEnvConfig()
+ } else {
+ envCfg = loadEnvConfig()
+ }
+
+ if len(opts.Profile) > 0 {
+ envCfg.Profile = opts.Profile
+ }
+
+ switch opts.SharedConfigState {
+ case SharedConfigDisable:
+ envCfg.EnableSharedConfig = false
+ case SharedConfigEnable:
+ envCfg.EnableSharedConfig = true
+ }
+
+ // Only use AWS_CA_BUNDLE if session option is not provided.
+ if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
+ f, err := os.Open(envCfg.CustomCABundle)
+ if err != nil {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to open custom CA bundle PEM file", err)
+ }
+ defer f.Close()
+ opts.CustomCABundle = f
+ }
+
+ return newSession(opts, envCfg, &opts.Config)
+}
+
+// Must is a helper function to ensure the Session is valid and there was no
+// error when calling a NewSession function.
+//
+// This helper is intended to be used in variable initialization to load the
+// Session and configuration at startup. Such as:
+//
+// var sess = session.Must(session.NewSession())
+func Must(sess *Session, err error) *Session {
+ if err != nil {
+ panic(err)
+ }
+
+ return sess
+}
+
+func deprecatedNewSession(cfgs ...*aws.Config) *Session {
+ cfg := defaults.Config()
+ handlers := defaults.Handlers()
+
+ // Apply the passed in configs so the configuration can be applied to the
+ // default credential chain
+ cfg.MergeIn(cfgs...)
+ if cfg.EndpointResolver == nil {
+ // An endpoint resolver is required for a session to be able to provide
+ // endpoints for service client configurations.
+ cfg.EndpointResolver = endpoints.DefaultResolver()
+ }
+ cfg.Credentials = defaults.CredChain(cfg, handlers)
+
+ // Reapply any passed in configs to override credentials if set
+ cfg.MergeIn(cfgs...)
+
+ s := &Session{
+ Config: cfg,
+ Handlers: handlers,
+ }
+
+ initHandlers(s)
+ return s
+}
+
+func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) {
+ logger.Log("Enabling CSM")
+ if len(port) == 0 {
+ port = csm.DefaultPort
+ }
+
+ r, err := csm.Start(clientID, "127.0.0.1:"+port)
+ if err != nil {
+ return
+ }
+ r.InjectHandlers(handlers)
+}
+
+func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
+ cfg := defaults.Config()
+ handlers := defaults.Handlers()
+
+ // Get a merged version of the user provided config to determine if
+ // credentials were.
+ userCfg := &aws.Config{}
+ userCfg.MergeIn(cfgs...)
+
+ // Ordered config files will be loaded in with later files overwriting
+ // previous config file values.
+ var cfgFiles []string
+ if opts.SharedConfigFiles != nil {
+ cfgFiles = opts.SharedConfigFiles
+ } else {
+ cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
+ if !envCfg.EnableSharedConfig {
+ // The shared config file (~/.aws/config) is only loaded if instructed
+ // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
+ cfgFiles = cfgFiles[1:]
+ }
+ }
+
+ // Load additional config from file(s)
+ sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
+ return nil, err
+ }
+
+ s := &Session{
+ Config: cfg,
+ Handlers: handlers,
+ }
+
+ initHandlers(s)
+ if envCfg.CSMEnabled {
+ enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
+ }
+
+ // Setup HTTP client with custom cert bundle if enabled
+ if opts.CustomCABundle != nil {
+ if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil {
+ return nil, err
+ }
+ }
+
+ return s, nil
+}
+
+func loadCustomCABundle(s *Session, bundle io.Reader) error {
+ var t *http.Transport
+ switch v := s.Config.HTTPClient.Transport.(type) {
+ case *http.Transport:
+ t = v
+ default:
+ if s.Config.HTTPClient.Transport != nil {
+ return awserr.New("LoadCustomCABundleError",
+ "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil)
+ }
+ }
+ if t == nil {
+ // Nil transport implies `http.DefaultTransport` should be used. Since
+ // the SDK cannot modify, nor copy the `DefaultTransport` specifying
+ // the values the next closest behavior.
+ t = getCABundleTransport()
+ }
+
+ p, err := loadCertPool(bundle)
+ if err != nil {
+ return err
+ }
+ if t.TLSClientConfig == nil {
+ t.TLSClientConfig = &tls.Config{}
+ }
+ t.TLSClientConfig.RootCAs = p
+
+ s.Config.HTTPClient.Transport = t
+
+ return nil
+}
+
+func loadCertPool(r io.Reader) (*x509.CertPool, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to read custom CA bundle PEM file", err)
+ }
+
+ p := x509.NewCertPool()
+ if !p.AppendCertsFromPEM(b) {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to load custom CA bundle PEM file", err)
+ }
+
+ return p, nil
+}
+
+func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error {
+ // Merge in user provided configuration
+ cfg.MergeIn(userCfg)
+
+ // Region if not already set by user
+ if len(aws.StringValue(cfg.Region)) == 0 {
+ if len(envCfg.Region) > 0 {
+ cfg.WithRegion(envCfg.Region)
+ } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
+ cfg.WithRegion(sharedCfg.Region)
+ }
+ }
+
+ if cfg.EnableEndpointDiscovery == nil {
+ if envCfg.EnableEndpointDiscovery != nil {
+ cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery)
+ } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil {
+ cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery)
+ }
+ }
+
+ // Configure credentials if not already set
+ if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
+
+ // inspect the profile to see if a credential source has been specified.
+ if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 {
+
+ // if both credential_source and source_profile have been set, return an error
+ // as this is undefined behavior.
+ if len(sharedCfg.AssumeRole.SourceProfile) > 0 {
+ return ErrSharedConfigSourceCollision
+ }
+
+ // valid credential source values
+ const (
+ credSourceEc2Metadata = "Ec2InstanceMetadata"
+ credSourceEnvironment = "Environment"
+ credSourceECSContainer = "EcsContainer"
+ )
+
+ switch sharedCfg.AssumeRole.CredentialSource {
+ case credSourceEc2Metadata:
+ cfgCp := *cfg
+ p := defaults.RemoteCredProvider(cfgCp, handlers)
+ cfgCp.Credentials = credentials.NewCredentials(p)
+
+ if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
+ // AssumeRole Token provider is required if doing Assume Role
+ // with MFA.
+ return AssumeRoleTokenProviderNotSetError{}
+ }
+
+ cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
+ case credSourceEnvironment:
+ cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
+ envCfg.Creds,
+ )
+ case credSourceECSContainer:
+ if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
+ return ErrSharedConfigECSContainerEnvVarEmpty
+ }
+
+ cfgCp := *cfg
+ p := defaults.RemoteCredProvider(cfgCp, handlers)
+ creds := credentials.NewCredentials(p)
+
+ cfg.Credentials = creds
+ default:
+ return ErrSharedConfigInvalidCredSource
+ }
+
+ return nil
+ }
+
+ if len(envCfg.Creds.AccessKeyID) > 0 {
+ cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
+ envCfg.Creds,
+ )
+ } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
+ cfgCp := *cfg
+ cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
+ sharedCfg.AssumeRoleSource.Creds,
+ )
+
+ if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
+ // AssumeRole Token provider is required if doing Assume Role
+ // with MFA.
+ return AssumeRoleTokenProviderNotSetError{}
+ }
+
+ cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
+ } else if len(sharedCfg.Creds.AccessKeyID) > 0 {
+ cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
+ sharedCfg.Creds,
+ )
+ } else if len(sharedCfg.CredentialProcess) > 0 {
+ cfg.Credentials = processcreds.NewCredentials(
+ sharedCfg.CredentialProcess,
+ )
+ } else {
+ // Fallback to default credentials provider, include mock errors
+ // for the credential chain so user can identify why credentials
+ // failed to be retrieved.
+ cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: []credentials.Provider{
+ &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
+ &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
+ defaults.RemoteCredProvider(*cfg, handlers),
+ },
+ })
+ }
+ }
+
+ return nil
+}
+
+func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials {
+ return stscreds.NewCredentials(
+ &Session{
+ Config: &cfg,
+ Handlers: handlers.Copy(),
+ },
+ sharedCfg.AssumeRole.RoleARN,
+ func(opt *stscreds.AssumeRoleProvider) {
+ opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
+
+ // Assume role with external ID
+ if len(sharedCfg.AssumeRole.ExternalID) > 0 {
+ opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
+ }
+
+ // Assume role with MFA
+ if len(sharedCfg.AssumeRole.MFASerial) > 0 {
+ opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
+ opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
+ }
+ },
+ )
+}
+
+// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
+// MFAToken option is not set when shared config is configured load assume a
+// role with an MFA token.
+type AssumeRoleTokenProviderNotSetError struct{}
+
+// Code is the short id of the error.
+func (e AssumeRoleTokenProviderNotSetError) Code() string {
+ return "AssumeRoleTokenProviderNotSetError"
+}
+
+// Message is the description of the error
+func (e AssumeRoleTokenProviderNotSetError) Message() string {
+ return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
+ return nil
+}
+
+// Error satisfies the error interface.
+func (e AssumeRoleTokenProviderNotSetError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
+
+type credProviderError struct {
+ Err error
+}
+
+var emptyCreds = credentials.Value{}
+
+func (c credProviderError) Retrieve() (credentials.Value, error) {
+ return credentials.Value{}, c.Err
+}
+func (c credProviderError) IsExpired() bool {
+ return true
+}
+
+func initHandlers(s *Session) {
+ // Add the Validate parameter handler if it is not disabled.
+ s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
+ if !aws.BoolValue(s.Config.DisableParamValidation) {
+ s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
+ }
+}
+
+// Copy creates and returns a copy of the current Session, coping the config
+// and handlers. If any additional configs are provided they will be merged
+// on top of the Session's copied config.
+//
+// // Create a copy of the current Session, configured for the us-west-2 region.
+// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
+func (s *Session) Copy(cfgs ...*aws.Config) *Session {
+ newSession := &Session{
+ Config: s.Config.Copy(cfgs...),
+ Handlers: s.Handlers.Copy(),
+ }
+
+ initHandlers(newSession)
+
+ return newSession
+}
+
+// ClientConfig satisfies the client.ConfigProvider interface and is used to
+// configure the service client instances. Passing the Session to the service
+// client's constructor (New) will use this method to configure the client.
+func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
+ // Backwards compatibility, the error will be eaten if user calls ClientConfig
+ // directly. All SDK services will use ClientconfigWithError.
+ cfg, _ := s.clientConfigWithErr(serviceName, cfgs...)
+
+ return cfg
+}
+
+func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) {
+ s = s.Copy(cfgs...)
+
+ var resolved endpoints.ResolvedEndpoint
+ var err error
+
+ region := aws.StringValue(s.Config.Region)
+
+ if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 {
+ resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL))
+ resolved.SigningRegion = region
+ } else {
+ resolved, err = s.Config.EndpointResolver.EndpointFor(
+ serviceName, region,
+ func(opt *endpoints.Options) {
+ opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL)
+ opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack)
+
+ // Support the condition where the service is modeled but its
+ // endpoint metadata is not available.
+ opt.ResolveUnknownService = true
+ },
+ )
+ }
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: resolved.URL,
+ SigningRegion: resolved.SigningRegion,
+ SigningNameDerived: resolved.SigningNameDerived,
+ SigningName: resolved.SigningName,
+ }, err
+}
+
+// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
+// that the EndpointResolver will not be used to resolve the endpoint. The only
+// endpoint set must come from the aws.Config.Endpoint field.
+func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config {
+ s = s.Copy(cfgs...)
+
+ var resolved endpoints.ResolvedEndpoint
+
+ region := aws.StringValue(s.Config.Region)
+
+ if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
+ resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
+ resolved.SigningRegion = region
+ }
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: resolved.URL,
+ SigningRegion: resolved.SigningRegion,
+ SigningNameDerived: resolved.SigningNameDerived,
+ SigningName: resolved.SigningName,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
new file mode 100755
index 0000000..7cb4402
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -0,0 +1,329 @@
+package session
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+
+ "github.com/aws/aws-sdk-go/internal/ini"
+)
+
+const (
+ // Static Credentials group
+ accessKeyIDKey = `aws_access_key_id` // group required
+ secretAccessKey = `aws_secret_access_key` // group required
+ sessionTokenKey = `aws_session_token` // optional
+
+ // Assume Role Credentials group
+ roleArnKey = `role_arn` // group required
+ sourceProfileKey = `source_profile` // group required (or credential_source)
+ credentialSourceKey = `credential_source` // group required (or source_profile)
+ externalIDKey = `external_id` // optional
+ mfaSerialKey = `mfa_serial` // optional
+ roleSessionNameKey = `role_session_name` // optional
+
+ // Additional Config fields
+ regionKey = `region`
+
+ // endpoint discovery group
+ enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
+ // External Credential Process
+ credentialProcessKey = `credential_process`
+
+ // DefaultSharedConfigProfile is the default profile to be used when
+ // loading configuration from the config files if another profile name
+ // is not provided.
+ DefaultSharedConfigProfile = `default`
+)
+
+type assumeRoleConfig struct {
+ RoleARN string
+ SourceProfile string
+ CredentialSource string
+ ExternalID string
+ MFASerial string
+ RoleSessionName string
+}
+
+// sharedConfig represents the configuration fields of the SDK config files.
+type sharedConfig struct {
+ // Credentials values from the config file. Both aws_access_key_id
+ // and aws_secret_access_key must be provided together in the same file
+ // to be considered valid. The values will be ignored if not a complete group.
+ // aws_session_token is an optional field that can be provided if both of the
+ // other two fields are also provided.
+ //
+ // aws_access_key_id
+ // aws_secret_access_key
+ // aws_session_token
+ Creds credentials.Value
+
+ AssumeRole assumeRoleConfig
+ AssumeRoleSource *sharedConfig
+
+ // An external process to request credentials
+ CredentialProcess string
+
+ // Region is the region the SDK should use for looking up AWS service endpoints
+ // and signing requests.
+ //
+ // region
+ Region string
+
+ // EnableEndpointDiscovery can be enabled in the shared config by setting
+ // endpoint_discovery_enabled to true
+ //
+ // endpoint_discovery_enabled = true
+ EnableEndpointDiscovery *bool
+}
+
+type sharedConfigFile struct {
+ Filename string
+ IniData ini.Sections
+}
+
+// loadSharedConfig retrieves the configuration from the list of files
+// using the profile provided. The order the files are listed will determine
+// precedence. Values in subsequent files will overwrite values defined in
+// earlier files.
+//
+// For example, given two files A and B. Both define credentials. If the order
+// of the files are A then B, B's credential values will be used instead of A's.
+//
+// See sharedConfig.setFromFile for information how the config files
+// will be loaded.
+func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) {
+ if len(profile) == 0 {
+ profile = DefaultSharedConfigProfile
+ }
+
+ files, err := loadSharedConfigIniFiles(filenames)
+ if err != nil {
+ return sharedConfig{}, err
+ }
+
+ cfg := sharedConfig{}
+ if err = cfg.setFromIniFiles(profile, files); err != nil {
+ return sharedConfig{}, err
+ }
+
+ if len(cfg.AssumeRole.SourceProfile) > 0 {
+ if err := cfg.setAssumeRoleSource(profile, files); err != nil {
+ return sharedConfig{}, err
+ }
+ }
+
+ return cfg, nil
+}
+
+func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
+ files := make([]sharedConfigFile, 0, len(filenames))
+
+ for _, filename := range filenames {
+ sections, err := ini.OpenFile(filename)
+ if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile {
+ // Skip files which can't be opened and read for whatever reason
+ continue
+ } else if err != nil {
+ return nil, SharedConfigLoadError{Filename: filename, Err: err}
+ }
+
+ files = append(files, sharedConfigFile{
+ Filename: filename, IniData: sections,
+ })
+ }
+
+ return files, nil
+}
+
+func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
+ var assumeRoleSrc sharedConfig
+
+ if len(cfg.AssumeRole.CredentialSource) > 0 {
+ // setAssumeRoleSource is only called when source_profile is found.
+ // If both source_profile and credential_source are set, then
+ // ErrSharedConfigSourceCollision will be returned
+ return ErrSharedConfigSourceCollision
+ }
+
+ // Multiple level assume role chains are not support
+ if cfg.AssumeRole.SourceProfile == origProfile {
+ assumeRoleSrc = *cfg
+ assumeRoleSrc.AssumeRole = assumeRoleConfig{}
+ } else {
+ err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files)
+ if err != nil {
+ return err
+ }
+ }
+
+ if len(assumeRoleSrc.Creds.AccessKeyID) == 0 {
+ return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN}
+ }
+
+ cfg.AssumeRoleSource = &assumeRoleSrc
+
+ return nil
+}
+
+func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
+ // Trim files from the list that don't exist.
+ for _, f := range files {
+ if err := cfg.setFromIniFile(profile, f); err != nil {
+ if _, ok := err.(SharedConfigProfileNotExistsError); ok {
+ // Ignore proviles missings
+ continue
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+// setFromFile loads the configuration from the file using
+// the profile provided. A sharedConfig pointer type value is used so that
+// multiple config file loadings can be chained.
+//
+// Only loads complete logically grouped values, and will not set fields in cfg
+// for incomplete grouped values in the config. Such as credentials. For example
+// if a config file only includes aws_access_key_id but no aws_secret_access_key
+// the aws_access_key_id will be ignored.
+func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
+ section, ok := file.IniData.GetSection(profile)
+ if !ok {
+ // Fallback to to alternate profile name: profile
+ section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
+ if !ok {
+ return SharedConfigProfileNotExistsError{Profile: profile, Err: nil}
+ }
+ }
+
+ // Shared Credentials
+ akid := section.String(accessKeyIDKey)
+ secret := section.String(secretAccessKey)
+ if len(akid) > 0 && len(secret) > 0 {
+ cfg.Creds = credentials.Value{
+ AccessKeyID: akid,
+ SecretAccessKey: secret,
+ SessionToken: section.String(sessionTokenKey),
+ ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
+ }
+ }
+
+ // Assume Role
+ roleArn := section.String(roleArnKey)
+ srcProfile := section.String(sourceProfileKey)
+ credentialSource := section.String(credentialSourceKey)
+ hasSource := len(srcProfile) > 0 || len(credentialSource) > 0
+ if len(roleArn) > 0 && hasSource {
+ cfg.AssumeRole = assumeRoleConfig{
+ RoleARN: roleArn,
+ SourceProfile: srcProfile,
+ CredentialSource: credentialSource,
+ ExternalID: section.String(externalIDKey),
+ MFASerial: section.String(mfaSerialKey),
+ RoleSessionName: section.String(roleSessionNameKey),
+ }
+ }
+
+ // `credential_process`
+ if credProc := section.String(credentialProcessKey); len(credProc) > 0 {
+ cfg.CredentialProcess = credProc
+ }
+
+ // Region
+ if v := section.String(regionKey); len(v) > 0 {
+ cfg.Region = v
+ }
+
+ // Endpoint discovery
+ if section.Has(enableEndpointDiscoveryKey) {
+ v := section.Bool(enableEndpointDiscoveryKey)
+ cfg.EnableEndpointDiscovery = &v
+ }
+
+ return nil
+}
+
+// SharedConfigLoadError is an error for the shared config file failed to load.
+type SharedConfigLoadError struct {
+ Filename string
+ Err error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigLoadError) Code() string {
+ return "SharedConfigLoadError"
+}
+
+// Message is the description of the error
+func (e SharedConfigLoadError) Message() string {
+ return fmt.Sprintf("failed to load config file, %s", e.Filename)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigLoadError) OrigErr() error {
+ return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigLoadError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigProfileNotExistsError is an error for the shared config when
+// the profile was not find in the config file.
+type SharedConfigProfileNotExistsError struct {
+ Profile string
+ Err error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigProfileNotExistsError) Code() string {
+ return "SharedConfigProfileNotExistsError"
+}
+
+// Message is the description of the error
+func (e SharedConfigProfileNotExistsError) Message() string {
+ return fmt.Sprintf("failed to get profile, %s", e.Profile)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigProfileNotExistsError) OrigErr() error {
+ return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigProfileNotExistsError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigAssumeRoleError is an error for the shared config when the
+// profile contains assume role information, but that information is invalid
+// or not complete.
+type SharedConfigAssumeRoleError struct {
+ RoleARN string
+}
+
+// Code is the short id of the error.
+func (e SharedConfigAssumeRoleError) Code() string {
+ return "SharedConfigAssumeRoleError"
+}
+
+// Message is the description of the error
+func (e SharedConfigAssumeRoleError) Message() string {
+ return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials",
+ e.RoleARN)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigAssumeRoleError) OrigErr() error {
+ return nil
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigAssumeRoleError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
new file mode 100755
index 0000000..244c86d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
@@ -0,0 +1,82 @@
+package v4
+
+import (
+ "net/http"
+ "strings"
+)
+
+// validator houses a set of rule needed for validation of a
+// string value
+type rules []rule
+
+// rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that rule
+type rule interface {
+ IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r rules) IsValid(value string) bool {
+ for _, rule := range r {
+ if rule.IsValid(value) {
+ return true
+ }
+ }
+ return false
+}
+
+// mapRule generic rule for maps
+type mapRule map[string]struct{}
+
+// IsValid for the map rule satisfies whether it exists in the map
+func (m mapRule) IsValid(value string) bool {
+ _, ok := m[value]
+ return ok
+}
+
+// whitelist is a generic rule for whitelisting
+type whitelist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (w whitelist) IsValid(value string) bool {
+ return w.rule.IsValid(value)
+}
+
+// blacklist is a generic rule for blacklisting
+type blacklist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (b blacklist) IsValid(value string) bool {
+ return !b.rule.IsValid(value)
+}
+
+type patterns []string
+
+// IsValid for patterns checks each pattern and returns if a match has
+// been found
+func (p patterns) IsValid(value string) bool {
+ for _, pattern := range p {
+ if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
+ return true
+ }
+ }
+ return false
+}
+
+// inclusiveRules rules allow for rules to depend on one another
+type inclusiveRules []rule
+
+// IsValid will return true if all rules are true
+func (r inclusiveRules) IsValid(value string) bool {
+ for _, rule := range r {
+ if !rule.IsValid(value) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
new file mode 100755
index 0000000..6aa2ed2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
@@ -0,0 +1,7 @@
+package v4
+
+// WithUnsignedPayload will enable and set the UnsignedPayload field to
+// true of the signer.
+func WithUnsignedPayload(v4 *Signer) {
+ v4.UnsignedPayload = true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
new file mode 100755
index 0000000..bd082e9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
@@ -0,0 +1,24 @@
+// +build go1.5
+
+package v4
+
+import (
+ "net/url"
+ "strings"
+)
+
+func getURIPath(u *url.URL) string {
+ var uri string
+
+ if len(u.Opaque) > 0 {
+ uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
+ } else {
+ uri = u.EscapedPath()
+ }
+
+ if len(uri) == 0 {
+ uri = "/"
+ }
+
+ return uri
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
new file mode 100755
index 0000000..523db79
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -0,0 +1,796 @@
+// Package v4 implements signing for AWS V4 signer
+//
+// Provides request signing for request that need to be signed with
+// AWS V4 Signatures.
+//
+// Standalone Signer
+//
+// Generally using the signer outside of the SDK should not require any additional
+// logic when using Go v1.5 or higher. The signer does this by taking advantage
+// of the URL.EscapedPath method. If your request URI requires additional escaping
+// you many need to use the URL.Opaque to define what the raw URI should be sent
+// to the service as.
+//
+// The signer will first check the URL.Opaque field, and use its value if set.
+// The signer does require the URL.Opaque field to be set in the form of:
+//
+// "///"
+//
+// // e.g.
+// "//example.com/some/path"
+//
+// The leading "//" and hostname are required or the URL.Opaque escaping will
+// not work correctly.
+//
+// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
+// method and using the returned value. If you're using Go v1.4 you must set
+// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
+// Go v1.5 the signer will fallback to URL.Path.
+//
+// AWS v4 signature validation requires that the canonical string's URI path
+// element must be the URI escaped form of the HTTP request's path.
+// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+//
+// The Go HTTP client will perform escaping automatically on the request. Some
+// of these escaping may cause signature validation errors because the HTTP
+// request differs from the URI path or query that the signature was generated.
+// https://golang.org/pkg/net/url/#URL.EscapedPath
+//
+// Because of this, it is recommended that when using the signer outside of the
+// SDK that explicitly escaping the request prior to being signed is preferable,
+// and will help prevent signature validation errors. This can be done by setting
+// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
+// call URL.EscapedPath() if Opaque is not set.
+//
+// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
+// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
+// request URL. https://github.com/golang/go/issues/16847 points to a bug in
+// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
+// message. URL.Opaque generally will force Go to make requests with absolute URL.
+// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
+// or url.EscapedPath will ignore the RawPath escaping.
+//
+// Test `TestStandaloneSign` provides a complete example of using the signer
+// outside of the SDK and pre-escaping the URI path.
+package v4
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+const (
+ authHeaderPrefix = "AWS4-HMAC-SHA256"
+ timeFormat = "20060102T150405Z"
+ shortTimeFormat = "20060102"
+
+ // emptyStringSHA256 is a SHA256 of an empty string
+ emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
+)
+
+var ignoredHeaders = rules{
+ blacklist{
+ mapRule{
+ "Authorization": struct{}{},
+ "User-Agent": struct{}{},
+ "X-Amzn-Trace-Id": struct{}{},
+ },
+ },
+}
+
+// requiredSignedHeaders is a whitelist for build canonical headers.
+var requiredSignedHeaders = rules{
+ whitelist{
+ mapRule{
+ "Cache-Control": struct{}{},
+ "Content-Disposition": struct{}{},
+ "Content-Encoding": struct{}{},
+ "Content-Language": struct{}{},
+ "Content-Md5": struct{}{},
+ "Content-Type": struct{}{},
+ "Expires": struct{}{},
+ "If-Match": struct{}{},
+ "If-Modified-Since": struct{}{},
+ "If-None-Match": struct{}{},
+ "If-Unmodified-Since": struct{}{},
+ "Range": struct{}{},
+ "X-Amz-Acl": struct{}{},
+ "X-Amz-Copy-Source": struct{}{},
+ "X-Amz-Copy-Source-If-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
+ "X-Amz-Copy-Source-If-None-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
+ "X-Amz-Copy-Source-Range": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Grant-Full-control": struct{}{},
+ "X-Amz-Grant-Read": struct{}{},
+ "X-Amz-Grant-Read-Acp": struct{}{},
+ "X-Amz-Grant-Write": struct{}{},
+ "X-Amz-Grant-Write-Acp": struct{}{},
+ "X-Amz-Metadata-Directive": struct{}{},
+ "X-Amz-Mfa": struct{}{},
+ "X-Amz-Request-Payer": struct{}{},
+ "X-Amz-Server-Side-Encryption": struct{}{},
+ "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Storage-Class": struct{}{},
+ "X-Amz-Tagging": struct{}{},
+ "X-Amz-Website-Redirect-Location": struct{}{},
+ "X-Amz-Content-Sha256": struct{}{},
+ },
+ },
+ patterns{"X-Amz-Meta-"},
+}
+
+// allowedHoisting is a whitelist for build query headers. The boolean value
+// represents whether or not it is a pattern.
+var allowedQueryHoisting = inclusiveRules{
+ blacklist{requiredSignedHeaders},
+ patterns{"X-Amz-"},
+}
+
+// Signer applies AWS v4 signing to given request. Use this to sign requests
+// that need to be signed with AWS V4 Signatures.
+type Signer struct {
+ // The authentication credentials the request will be signed against.
+ // This value must be set to sign requests.
+ Credentials *credentials.Credentials
+
+ // Sets the log level the signer should use when reporting information to
+ // the logger. If the logger is nil nothing will be logged. See
+ // aws.LogLevelType for more information on available logging levels
+ //
+ // By default nothing will be logged.
+ Debug aws.LogLevelType
+
+ // The logger loging information will be written to. If there the logger
+ // is nil, nothing will be logged.
+ Logger aws.Logger
+
+ // Disables the Signer's moving HTTP header key/value pairs from the HTTP
+ // request header to the request's query string. This is most commonly used
+ // with pre-signed requests preventing headers from being added to the
+ // request's query string.
+ DisableHeaderHoisting bool
+
+ // Disables the automatic escaping of the URI path of the request for the
+ // siganture's canonical string's path. For services that do not need additional
+ // escaping then use this to disable the signer escaping the path.
+ //
+ // S3 is an example of a service that does not need additional escaping.
+ //
+ // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ DisableURIPathEscaping bool
+
+ // Disables the automatical setting of the HTTP request's Body field with the
+ // io.ReadSeeker passed in to the signer. This is useful if you're using a
+ // custom wrapper around the body for the io.ReadSeeker and want to preserve
+ // the Body value on the Request.Body.
+ //
+ // This does run the risk of signing a request with a body that will not be
+ // sent in the request. Need to ensure that the underlying data of the Body
+ // values are the same.
+ DisableRequestBodyOverwrite bool
+
+ // currentTimeFn returns the time value which represents the current time.
+ // This value should only be used for testing. If it is nil the default
+ // time.Now will be used.
+ currentTimeFn func() time.Time
+
+ // UnsignedPayload will prevent signing of the payload. This will only
+ // work for services that have support for this.
+ UnsignedPayload bool
+}
+
+// NewSigner returns a Signer pointer configured with the credentials and optional
+// option values provided. If not options are provided the Signer will use its
+// default configuration.
+func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
+ v4 := &Signer{
+ Credentials: credentials,
+ }
+
+ for _, option := range options {
+ option(v4)
+ }
+
+ return v4
+}
+
+type signingCtx struct {
+ ServiceName string
+ Region string
+ Request *http.Request
+ Body io.ReadSeeker
+ Query url.Values
+ Time time.Time
+ ExpireTime time.Duration
+ SignedHeaderVals http.Header
+
+ DisableURIPathEscaping bool
+
+ credValues credentials.Value
+ isPresign bool
+ formattedTime string
+ formattedShortTime string
+ unsignedPayload bool
+
+ bodyDigest string
+ signedHeaders string
+ canonicalHeaders string
+ canonicalString string
+ credentialString string
+ stringToSign string
+ signature string
+ authorization string
+}
+
+// Sign signs AWS v4 requests with the provided body, service name, region the
+// request is made to, and time the request is signed at. The signTime allows
+// you to specify that a request is signed for the future, and cannot be
+// used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. Generally for signed requests this value
+// is not needed as the full request context will be captured by the http.Request
+// value. It is included for reference though.
+//
+// Sign will set the request's Body to be the `body` parameter passed in. If
+// the body is not already an io.ReadCloser, it will be wrapped within one. If
+// a `nil` body parameter passed to Sign, the request's Body field will be
+// also set to nil. Its important to note that this functionality will not
+// change the request's ContentLength of the request.
+//
+// Sign differs from Presign in that it will sign the request using HTTP
+// header values. This type of signing is intended for http.Request values that
+// will not be shared, or are shared in a way the header values on the request
+// will not be lost.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, 0, false, signTime)
+}
+
+// Presign signs AWS v4 requests with the provided body, service name, region
+// the request is made to, and time the request is signed at. The signTime
+// allows you to specify that a request is signed for the future, and cannot
+// be used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. For presigned requests these headers
+// and their values must be included on the HTTP request when it is made. This
+// is helpful to know what header values need to be shared with the party the
+// presigned request will be distributed to.
+//
+// Presign differs from Sign in that it will sign the request using query string
+// instead of header values. This allows you to share the Presigned Request's
+// URL with third parties, or distribute it throughout your system with minimal
+// dependencies.
+//
+// Presign also takes an exp value which is the duration the
+// signed request will be valid after the signing time. This is allows you to
+// set when the request will expire.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+//
+// Presigning a S3 request will not compute the body's SHA256 hash by default.
+// This is done due to the general use case for S3 presigned URLs is to share
+// PUT/GET capabilities. If you would like to include the body's SHA256 in the
+// presigned request's signature you can set the "X-Amz-Content-Sha256"
+// HTTP header and that will be included in the request's signature.
+func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, exp, true, signTime)
+}
+
+func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) {
+ currentTimeFn := v4.currentTimeFn
+ if currentTimeFn == nil {
+ currentTimeFn = time.Now
+ }
+
+ ctx := &signingCtx{
+ Request: r,
+ Body: body,
+ Query: r.URL.Query(),
+ Time: signTime,
+ ExpireTime: exp,
+ isPresign: isPresign,
+ ServiceName: service,
+ Region: region,
+ DisableURIPathEscaping: v4.DisableURIPathEscaping,
+ unsignedPayload: v4.UnsignedPayload,
+ }
+
+ for key := range ctx.Query {
+ sort.Strings(ctx.Query[key])
+ }
+
+ if ctx.isRequestSigned() {
+ ctx.Time = currentTimeFn()
+ ctx.handlePresignRemoval()
+ }
+
+ var err error
+ ctx.credValues, err = v4.Credentials.Get()
+ if err != nil {
+ return http.Header{}, err
+ }
+
+ ctx.sanitizeHostForHeader()
+ ctx.assignAmzQueryValues()
+ if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
+ return nil, err
+ }
+
+ // If the request is not presigned the body should be attached to it. This
+ // prevents the confusion of wanting to send a signed request without
+ // the body the request was signed for attached.
+ if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
+ var reader io.ReadCloser
+ if body != nil {
+ var ok bool
+ if reader, ok = body.(io.ReadCloser); !ok {
+ reader = ioutil.NopCloser(body)
+ }
+ }
+ r.Body = reader
+ }
+
+ if v4.Debug.Matches(aws.LogDebugWithSigning) {
+ v4.logSigningInfo(ctx)
+ }
+
+ return ctx.SignedHeaderVals, nil
+}
+
+func (ctx *signingCtx) sanitizeHostForHeader() {
+ request.SanitizeHostForHeader(ctx.Request)
+}
+
+func (ctx *signingCtx) handlePresignRemoval() {
+ if !ctx.isPresign {
+ return
+ }
+
+ // The credentials have expired for this request. The current signing
+ // is invalid, and needs to be request because the request will fail.
+ ctx.removePresign()
+
+ // Update the request's query string to ensure the values stays in
+ // sync in the case retrieving the new credentials fails.
+ ctx.Request.URL.RawQuery = ctx.Query.Encode()
+}
+
+func (ctx *signingCtx) assignAmzQueryValues() {
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
+ if ctx.credValues.SessionToken != "" {
+ ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ } else {
+ ctx.Query.Del("X-Amz-Security-Token")
+ }
+
+ return
+ }
+
+ if ctx.credValues.SessionToken != "" {
+ ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ }
+}
+
+// SignRequestHandler is a named request handler the SDK will use to sign
+// service client request with using the V4 signature.
+var SignRequestHandler = request.NamedHandler{
+ Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
+}
+
+// SignSDKRequest signs an AWS request with the V4 signature. This
+// request handler should only be used with the SDK's built in service client's
+// API operation requests.
+//
+// This function should not be used on its on its own, but in conjunction with
+// an AWS service client's API operation call. To sign a standalone request
+// not created by a service client's API operation method use the "Sign" or
+// "Presign" functions of the "Signer" type.
+//
+// If the credentials of the request's config are set to
+// credentials.AnonymousCredentials the request will not be signed.
+func SignSDKRequest(req *request.Request) {
+ SignSDKRequestWithCurrentTime(req, time.Now)
+}
+
+// BuildNamedHandler will build a generic handler for signing.
+func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
+ return request.NamedHandler{
+ Name: name,
+ Fn: func(req *request.Request) {
+ SignSDKRequestWithCurrentTime(req, time.Now, opts...)
+ },
+ }
+}
+
+// SignSDKRequestWithCurrentTime will sign the SDK's request using the time
+// function passed in. Behaves the same as SignSDKRequest with the exception
+// the request is signed with the value returned by the current time function.
+func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
+ // If the request does not need to be signed ignore the signing of the
+ // request if the AnonymousCredentials object is used.
+ if req.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ region := req.ClientInfo.SigningRegion
+ if region == "" {
+ region = aws.StringValue(req.Config.Region)
+ }
+
+ name := req.ClientInfo.SigningName
+ if name == "" {
+ name = req.ClientInfo.ServiceName
+ }
+
+ v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
+ v4.Debug = req.Config.LogLevel.Value()
+ v4.Logger = req.Config.Logger
+ v4.DisableHeaderHoisting = req.NotHoist
+ v4.currentTimeFn = curTimeFn
+ if name == "s3" {
+ // S3 service should not have any escaping applied
+ v4.DisableURIPathEscaping = true
+ }
+ // Prevents setting the HTTPRequest's Body. Since the Body could be
+ // wrapped in a custom io.Closer that we do not want to be stompped
+ // on top of by the signer.
+ v4.DisableRequestBodyOverwrite = true
+ })
+
+ for _, opt := range opts {
+ opt(v4)
+ }
+
+ curTime := curTimeFn()
+ signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
+ name, region, req.ExpireTime, req.ExpireTime > 0, curTime,
+ )
+ if err != nil {
+ req.Error = err
+ req.SignedHeaderVals = nil
+ return
+ }
+
+ req.SignedHeaderVals = signedHeaders
+ req.LastSignedAt = curTime
+}
+
+const logSignInfoMsg = `DEBUG: Request Signature:
+---[ CANONICAL STRING ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
+
+func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
+ signedURLMsg := ""
+ if ctx.isPresign {
+ signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
+ }
+ msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
+ v4.Logger.Log(msg)
+}
+
+func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
+ ctx.buildTime() // no depends
+ ctx.buildCredentialString() // no depends
+
+ if err := ctx.buildBodyDigest(); err != nil {
+ return err
+ }
+
+ unsignedHeaders := ctx.Request.Header
+ if ctx.isPresign {
+ if !disableHeaderHoisting {
+ urlValues := url.Values{}
+ urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
+ for k := range urlValues {
+ ctx.Query[k] = urlValues[k]
+ }
+ }
+ }
+
+ ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
+ ctx.buildCanonicalString() // depends on canon headers / signed headers
+ ctx.buildStringToSign() // depends on canon string
+ ctx.buildSignature() // depends on string to sign
+
+ if ctx.isPresign {
+ ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
+ } else {
+ parts := []string{
+ authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
+ "SignedHeaders=" + ctx.signedHeaders,
+ "Signature=" + ctx.signature,
+ }
+ ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
+ }
+
+ return nil
+}
+
+func (ctx *signingCtx) buildTime() {
+ ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
+ ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
+
+ if ctx.isPresign {
+ duration := int64(ctx.ExpireTime / time.Second)
+ ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
+ ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
+ } else {
+ ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
+ }
+}
+
+func (ctx *signingCtx) buildCredentialString() {
+ ctx.credentialString = strings.Join([]string{
+ ctx.formattedShortTime,
+ ctx.Region,
+ ctx.ServiceName,
+ "aws4_request",
+ }, "/")
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
+ }
+}
+
+func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
+ query := url.Values{}
+ unsignedHeaders := http.Header{}
+ for k, h := range header {
+ if r.IsValid(k) {
+ query[k] = h
+ } else {
+ unsignedHeaders[k] = h
+ }
+ }
+
+ return query, unsignedHeaders
+}
+func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
+ var headers []string
+ headers = append(headers, "host")
+ for k, v := range header {
+ canonicalKey := http.CanonicalHeaderKey(k)
+ if !r.IsValid(canonicalKey) {
+ continue // ignored header
+ }
+ if ctx.SignedHeaderVals == nil {
+ ctx.SignedHeaderVals = make(http.Header)
+ }
+
+ lowerCaseKey := strings.ToLower(k)
+ if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
+ // include additional values
+ ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
+ continue
+ }
+
+ headers = append(headers, lowerCaseKey)
+ ctx.SignedHeaderVals[lowerCaseKey] = v
+ }
+ sort.Strings(headers)
+
+ ctx.signedHeaders = strings.Join(headers, ";")
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
+ }
+
+ headerValues := make([]string, len(headers))
+ for i, k := range headers {
+ if k == "host" {
+ if ctx.Request.Host != "" {
+ headerValues[i] = "host:" + ctx.Request.Host
+ } else {
+ headerValues[i] = "host:" + ctx.Request.URL.Host
+ }
+ } else {
+ headerValues[i] = k + ":" +
+ strings.Join(ctx.SignedHeaderVals[k], ",")
+ }
+ }
+ stripExcessSpaces(headerValues)
+ ctx.canonicalHeaders = strings.Join(headerValues, "\n")
+}
+
+func (ctx *signingCtx) buildCanonicalString() {
+ ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
+
+ uri := getURIPath(ctx.Request.URL)
+
+ if !ctx.DisableURIPathEscaping {
+ uri = rest.EscapePath(uri, false)
+ }
+
+ ctx.canonicalString = strings.Join([]string{
+ ctx.Request.Method,
+ uri,
+ ctx.Request.URL.RawQuery,
+ ctx.canonicalHeaders + "\n",
+ ctx.signedHeaders,
+ ctx.bodyDigest,
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildStringToSign() {
+ ctx.stringToSign = strings.Join([]string{
+ authHeaderPrefix,
+ ctx.formattedTime,
+ ctx.credentialString,
+ hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildSignature() {
+ secret := ctx.credValues.SecretAccessKey
+ date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
+ region := makeHmac(date, []byte(ctx.Region))
+ service := makeHmac(region, []byte(ctx.ServiceName))
+ credentials := makeHmac(service, []byte("aws4_request"))
+ signature := makeHmac(credentials, []byte(ctx.stringToSign))
+ ctx.signature = hex.EncodeToString(signature)
+}
+
+func (ctx *signingCtx) buildBodyDigest() error {
+ hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
+ if hash == "" {
+ includeSHA256Header := ctx.unsignedPayload ||
+ ctx.ServiceName == "s3" ||
+ ctx.ServiceName == "glacier"
+
+ s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
+
+ if ctx.unsignedPayload || s3Presign {
+ hash = "UNSIGNED-PAYLOAD"
+ includeSHA256Header = !s3Presign
+ } else if ctx.Body == nil {
+ hash = emptyStringSHA256
+ } else {
+ if !aws.IsReaderSeekable(ctx.Body) {
+ return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
+ }
+ hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
+ }
+
+ if includeSHA256Header {
+ ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
+ }
+ }
+ ctx.bodyDigest = hash
+
+ return nil
+}
+
+// isRequestSigned returns if the request is currently signed or presigned
+func (ctx *signingCtx) isRequestSigned() bool {
+ if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
+ return true
+ }
+ if ctx.Request.Header.Get("Authorization") != "" {
+ return true
+ }
+
+ return false
+}
+
+// unsign removes signing flags for both signed and presigned requests.
+func (ctx *signingCtx) removePresign() {
+ ctx.Query.Del("X-Amz-Algorithm")
+ ctx.Query.Del("X-Amz-Signature")
+ ctx.Query.Del("X-Amz-Security-Token")
+ ctx.Query.Del("X-Amz-Date")
+ ctx.Query.Del("X-Amz-Expires")
+ ctx.Query.Del("X-Amz-Credential")
+ ctx.Query.Del("X-Amz-SignedHeaders")
+}
+
+func makeHmac(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256Reader(reader io.ReadSeeker) []byte {
+ hash := sha256.New()
+ start, _ := reader.Seek(0, sdkio.SeekCurrent)
+ defer reader.Seek(start, sdkio.SeekStart)
+
+ // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
+ // smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
+ size, err := aws.SeekerLen(reader)
+ if err != nil {
+ io.Copy(hash, reader)
+ } else {
+ io.CopyN(hash, reader, size)
+ }
+
+ return hash.Sum(nil)
+}
+
+const doubleSpace = " "
+
+// stripExcessSpaces will rewrite the passed in slice's string values to not
+// contain multiple side-by-side spaces.
+func stripExcessSpaces(vals []string) {
+ var j, k, l, m, spaces int
+ for i, str := range vals {
+ // Trim trailing spaces
+ for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
+ }
+
+ // Trim leading spaces
+ for k = 0; k < j && str[k] == ' '; k++ {
+ }
+ str = str[k : j+1]
+
+ // Strip multiple spaces.
+ j = strings.Index(str, doubleSpace)
+ if j < 0 {
+ vals[i] = str
+ continue
+ }
+
+ buf := []byte(str)
+ for k, m, l = j, j, len(buf); k < l; k++ {
+ if buf[k] == ' ' {
+ if spaces == 0 {
+ // First space.
+ buf[m] = buf[k]
+ m++
+ }
+ spaces++
+ } else {
+ // End of multiple spaces.
+ spaces = 0
+ buf[m] = buf[k]
+ m++
+ }
+ }
+
+ vals[i] = string(buf[:m])
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go
new file mode 100755
index 0000000..8b6f234
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go
@@ -0,0 +1,201 @@
+package aws
+
+import (
+ "io"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
+// only be used with an io.Reader that is also an io.Seeker. Doing so may
+// cause request signature errors, or request body's not sent for GET, HEAD
+// and DELETE HTTP methods.
+//
+// Deprecated: Should only be used with io.ReadSeeker. If using for
+// S3 PutObject to stream content use s3manager.Uploader instead.
+func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
+ return ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+ r io.Reader
+}
+
+// IsReaderSeekable returns if the underlying reader type can be seeked. A
+// io.Reader might not actually be seekable if it is the ReaderSeekerCloser
+// type.
+func IsReaderSeekable(r io.Reader) bool {
+ switch v := r.(type) {
+ case ReaderSeekerCloser:
+ return v.IsSeeker()
+ case *ReaderSeekerCloser:
+ return v.IsSeeker()
+ case io.ReadSeeker:
+ return true
+ default:
+ return false
+ }
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
+ switch t := r.r.(type) {
+ case io.Reader:
+ return t.Read(p)
+ }
+ return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+ switch t := r.r.(type) {
+ case io.Seeker:
+ return t.Seek(offset, whence)
+ }
+ return int64(0), nil
+}
+
+// IsSeeker returns if the underlying reader is also a seeker.
+func (r ReaderSeekerCloser) IsSeeker() bool {
+ _, ok := r.r.(io.Seeker)
+ return ok
+}
+
+// HasLen returns the length of the underlying reader if the value implements
+// the Len() int method.
+func (r ReaderSeekerCloser) HasLen() (int, bool) {
+ type lenner interface {
+ Len() int
+ }
+
+ if lr, ok := r.r.(lenner); ok {
+ return lr.Len(), true
+ }
+
+ return 0, false
+}
+
+// GetLen returns the length of the bytes remaining in the underlying reader.
+// Checks first for Len(), then io.Seeker to determine the size of the
+// underlying reader.
+//
+// Will return -1 if the length cannot be determined.
+func (r ReaderSeekerCloser) GetLen() (int64, error) {
+ if l, ok := r.HasLen(); ok {
+ return int64(l), nil
+ }
+
+ if s, ok := r.r.(io.Seeker); ok {
+ return seekerLen(s)
+ }
+
+ return -1, nil
+}
+
+// SeekerLen attempts to get the number of bytes remaining at the seeker's
+// current position. Returns the number of bytes remaining or error.
+func SeekerLen(s io.Seeker) (int64, error) {
+ // Determine if the seeker is actually seekable. ReaderSeekerCloser
+ // hides the fact that a io.Readers might not actually be seekable.
+ switch v := s.(type) {
+ case ReaderSeekerCloser:
+ return v.GetLen()
+ case *ReaderSeekerCloser:
+ return v.GetLen()
+ }
+
+ return seekerLen(s)
+}
+
+func seekerLen(s io.Seeker) (int64, error) {
+ curOffset, err := s.Seek(0, sdkio.SeekCurrent)
+ if err != nil {
+ return 0, err
+ }
+
+ endOffset, err := s.Seek(0, sdkio.SeekEnd)
+ if err != nil {
+ return 0, err
+ }
+
+ _, err = s.Seek(curOffset, sdkio.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ return endOffset - curOffset, nil
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r ReaderSeekerCloser) Close() error {
+ switch t := r.r.(type) {
+ case io.Closer:
+ return t.Close()
+ }
+ return nil
+}
+
+// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
+// Can be used with the s3manager.Downloader to download content to a buffer
+// in memory. Safe to use concurrently.
+type WriteAtBuffer struct {
+ buf []byte
+ m sync.Mutex
+
+ // GrowthCoeff defines the growth rate of the internal buffer. By
+ // default, the growth rate is 1, where expanding the internal
+ // buffer will allocate only enough capacity to fit the new expected
+ // length.
+ GrowthCoeff float64
+}
+
+// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
+// provided by buf.
+func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
+ return &WriteAtBuffer{buf: buf}
+}
+
+// WriteAt writes a slice of bytes to a buffer starting at the position provided
+// The number of bytes written will be returned, or error. Can overwrite previous
+// written slices if the write ats overlap.
+func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
+ pLen := len(p)
+ expLen := pos + int64(pLen)
+ b.m.Lock()
+ defer b.m.Unlock()
+ if int64(len(b.buf)) < expLen {
+ if int64(cap(b.buf)) < expLen {
+ if b.GrowthCoeff < 1 {
+ b.GrowthCoeff = 1
+ }
+ newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+ b.buf = b.buf[:expLen]
+ }
+ copy(b.buf[pos:], p)
+ return pLen, nil
+}
+
+// Bytes returns a slice of bytes written to the buffer.
+func (b *WriteAtBuffer) Bytes() []byte {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.buf
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go
new file mode 100755
index 0000000..6192b24
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go
@@ -0,0 +1,12 @@
+// +build go1.8
+
+package aws
+
+import "net/url"
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Wrapper of net/url#URL.Hostname for backwards Go version compatibility.
+func URLHostname(url *url.URL) string {
+ return url.Hostname()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
new file mode 100755
index 0000000..0210d27
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
@@ -0,0 +1,29 @@
+// +build !go1.8
+
+package aws
+
+import (
+ "net/url"
+ "strings"
+)
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Copy of Go 1.8's net/url#URL.Hostname functionality.
+func URLHostname(url *url.URL) string {
+ return stripPort(url.Host)
+
+}
+
+// stripPort is copy of Go 1.8 url#URL.Hostname functionality.
+// https://golang.org/src/net/url/url.go
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
new file mode 100755
index 0000000..3e2ee89
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = "1.19.21"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go
new file mode 100755
index 0000000..e83a998
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go
@@ -0,0 +1,120 @@
+package ini
+
+// ASTKind represents different states in the parse table
+// and the type of AST that is being constructed
+type ASTKind int
+
+// ASTKind* is used in the parse table to transition between
+// the different states
+const (
+ ASTKindNone = ASTKind(iota)
+ ASTKindStart
+ ASTKindExpr
+ ASTKindEqualExpr
+ ASTKindStatement
+ ASTKindSkipStatement
+ ASTKindExprStatement
+ ASTKindSectionStatement
+ ASTKindNestedSectionStatement
+ ASTKindCompletedNestedSectionStatement
+ ASTKindCommentStatement
+ ASTKindCompletedSectionStatement
+)
+
+func (k ASTKind) String() string {
+ switch k {
+ case ASTKindNone:
+ return "none"
+ case ASTKindStart:
+ return "start"
+ case ASTKindExpr:
+ return "expr"
+ case ASTKindStatement:
+ return "stmt"
+ case ASTKindSectionStatement:
+ return "section_stmt"
+ case ASTKindExprStatement:
+ return "expr_stmt"
+ case ASTKindCommentStatement:
+ return "comment"
+ case ASTKindNestedSectionStatement:
+ return "nested_section_stmt"
+ case ASTKindCompletedSectionStatement:
+ return "completed_stmt"
+ case ASTKindSkipStatement:
+ return "skip"
+ default:
+ return ""
+ }
+}
+
+// AST interface allows us to determine what kind of node we
+// are on and casting may not need to be necessary.
+//
+// The root is always the first node in Children
+type AST struct {
+ Kind ASTKind
+ Root Token
+ RootToken bool
+ Children []AST
+}
+
+func newAST(kind ASTKind, root AST, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Children: append([]AST{root}, children...),
+ }
+}
+
+func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Root: root,
+ RootToken: true,
+ Children: children,
+ }
+}
+
+// AppendChild will append to the list of children an AST has.
+func (a *AST) AppendChild(child AST) {
+ a.Children = append(a.Children, child)
+}
+
+// GetRoot will return the root AST which can be the first entry
+// in the children list or a token.
+func (a *AST) GetRoot() AST {
+ if a.RootToken {
+ return *a
+ }
+
+ if len(a.Children) == 0 {
+ return AST{}
+ }
+
+ return a.Children[0]
+}
+
+// GetChildren will return the current AST's list of children
+func (a *AST) GetChildren() []AST {
+ if len(a.Children) == 0 {
+ return []AST{}
+ }
+
+ if a.RootToken {
+ return a.Children
+ }
+
+ return a.Children[1:]
+}
+
+// SetChildren will set and override all children of the AST.
+func (a *AST) SetChildren(children []AST) {
+ if a.RootToken {
+ a.Children = children
+ } else {
+ a.Children = append(a.Children[:1], children...)
+ }
+}
+
+// Start is used to indicate the starting state of the parse table.
+var Start = newAST(ASTKindStart, AST{})
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go
new file mode 100755
index 0000000..0895d53
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go
@@ -0,0 +1,11 @@
+package ini
+
+var commaRunes = []rune(",")
+
+func isComma(b rune) bool {
+ return b == ','
+}
+
+func newCommaToken() Token {
+ return newToken(TokenComma, commaRunes, NoneType)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go
new file mode 100755
index 0000000..0b76999
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go
@@ -0,0 +1,35 @@
+package ini
+
+// isComment will return whether or not the next byte(s) is a
+// comment.
+func isComment(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case ';':
+ return true
+ case '#':
+ return true
+ }
+
+ return false
+}
+
+// newCommentToken will create a comment token and
+// return how many bytes were read.
+func newCommentToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if b[i] == '\n' {
+ break
+ }
+
+ if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' {
+ break
+ }
+ }
+
+ return newToken(TokenComment, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
new file mode 100755
index 0000000..25ce0fe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
@@ -0,0 +1,29 @@
+// Package ini is an LL(1) parser for configuration files.
+//
+// Example:
+// sections, err := ini.OpenFile("/path/to/file")
+// if err != nil {
+// panic(err)
+// }
+//
+// profile := "foo"
+// section, ok := sections.GetSection(profile)
+// if !ok {
+// fmt.Printf("section %q could not be found", profile)
+// }
+//
+// Below is the BNF that describes this parser
+// Grammar:
+// stmt -> value stmt'
+// stmt' -> epsilon | op stmt
+// value -> number | string | boolean | quoted_string
+//
+// section -> [ section'
+// section' -> value section_close
+// section_close -> ]
+//
+// SkipState will skip (NL WS)+
+//
+// comment -> # comment' | ; comment'
+// comment' -> epsilon | value
+package ini
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go
new file mode 100755
index 0000000..04345a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go
@@ -0,0 +1,4 @@
+package ini
+
+// emptyToken is used to satisfy the Token interface
+var emptyToken = newToken(TokenNone, []rune{}, NoneType)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go
new file mode 100755
index 0000000..91ba2a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go
@@ -0,0 +1,24 @@
+package ini
+
+// newExpression will return an expression AST.
+// Expr represents an expression
+//
+// grammar:
+// expr -> string | number
+func newExpression(tok Token) AST {
+ return newASTWithRootToken(ASTKindExpr, tok)
+}
+
+func newEqualExpr(left AST, tok Token) AST {
+ return newASTWithRootToken(ASTKindEqualExpr, tok, left)
+}
+
+// EqualExprKey will return a LHS value in the equal expr
+func EqualExprKey(ast AST) string {
+ children := ast.GetChildren()
+ if len(children) == 0 || ast.Kind != ASTKindEqualExpr {
+ return ""
+ }
+
+ return string(children[0].Root.Raw())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
new file mode 100755
index 0000000..8d462f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
@@ -0,0 +1,17 @@
+// +build gofuzz
+
+package ini
+
+import (
+ "bytes"
+)
+
+func Fuzz(data []byte) int {
+ b := bytes.NewReader(data)
+
+ if _, err := Parse(b); err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go
new file mode 100755
index 0000000..3b0ca7a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go
@@ -0,0 +1,51 @@
+package ini
+
+import (
+ "io"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// OpenFile takes a path to a given file, and will open and parse
+// that file.
+func OpenFile(path string) (Sections, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err)
+ }
+ defer f.Close()
+
+ return Parse(f)
+}
+
+// Parse will parse the given file using the shared config
+// visitor.
+func Parse(f io.Reader) (Sections, error) {
+ tree, err := ParseAST(f)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor()
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
+
+// ParseBytes will parse the given bytes and return the parsed sections.
+func ParseBytes(b []byte) (Sections, error) {
+ tree, err := ParseASTBytes(b)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor()
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go
new file mode 100755
index 0000000..582c024
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go
@@ -0,0 +1,165 @@
+package ini
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const (
+ // ErrCodeUnableToReadFile is used when a file is failed to be
+ // opened or read from.
+ ErrCodeUnableToReadFile = "FailedRead"
+)
+
+// TokenType represents the various different tokens types
+type TokenType int
+
+func (t TokenType) String() string {
+ switch t {
+ case TokenNone:
+ return "none"
+ case TokenLit:
+ return "literal"
+ case TokenSep:
+ return "sep"
+ case TokenOp:
+ return "op"
+ case TokenWS:
+ return "ws"
+ case TokenNL:
+ return "newline"
+ case TokenComment:
+ return "comment"
+ case TokenComma:
+ return "comma"
+ default:
+ return ""
+ }
+}
+
+// TokenType enums
+const (
+ TokenNone = TokenType(iota)
+ TokenLit
+ TokenSep
+ TokenComma
+ TokenOp
+ TokenWS
+ TokenNL
+ TokenComment
+)
+
+type iniLexer struct{}
+
+// Tokenize will return a list of tokens during lexical analysis of the
+// io.Reader.
+func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err)
+ }
+
+ return l.tokenize(b)
+}
+
+func (l *iniLexer) tokenize(b []byte) ([]Token, error) {
+ runes := bytes.Runes(b)
+ var err error
+ n := 0
+ tokenAmount := countTokens(runes)
+ tokens := make([]Token, tokenAmount)
+ count := 0
+
+ for len(runes) > 0 && count < tokenAmount {
+ switch {
+ case isWhitespace(runes[0]):
+ tokens[count], n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ tokens[count], n = newCommaToken(), 1
+ case isComment(runes):
+ tokens[count], n, err = newCommentToken(runes)
+ case isNewline(runes):
+ tokens[count], n, err = newNewlineToken(runes)
+ case isSep(runes):
+ tokens[count], n, err = newSepToken(runes)
+ case isOp(runes):
+ tokens[count], n, err = newOpToken(runes)
+ default:
+ tokens[count], n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ count++
+
+ runes = runes[n:]
+ }
+
+ return tokens[:count], nil
+}
+
+func countTokens(runes []rune) int {
+ count, n := 0, 0
+ var err error
+
+ for len(runes) > 0 {
+ switch {
+ case isWhitespace(runes[0]):
+ _, n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ _, n = newCommaToken(), 1
+ case isComment(runes):
+ _, n, err = newCommentToken(runes)
+ case isNewline(runes):
+ _, n, err = newNewlineToken(runes)
+ case isSep(runes):
+ _, n, err = newSepToken(runes)
+ case isOp(runes):
+ _, n, err = newOpToken(runes)
+ default:
+ _, n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return 0
+ }
+
+ count++
+ runes = runes[n:]
+ }
+
+ return count + 1
+}
+
+// Token indicates a metadata about a given value.
+type Token struct {
+ t TokenType
+ ValueType ValueType
+ base int
+ raw []rune
+}
+
+var emptyValue = Value{}
+
+func newToken(t TokenType, raw []rune, v ValueType) Token {
+ return Token{
+ t: t,
+ raw: raw,
+ ValueType: v,
+ }
+}
+
+// Raw return the raw runes that were consumed
+func (tok Token) Raw() []rune {
+ return tok.raw
+}
+
+// Type returns the token type
+func (tok Token) Type() TokenType {
+ return tok.t
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
new file mode 100755
index 0000000..f997033
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
@@ -0,0 +1,347 @@
+package ini
+
+import (
+ "fmt"
+ "io"
+)
+
+// State enums for the parse table
+const (
+ InvalidState = iota
+ // stmt -> value stmt'
+ StatementState
+ // stmt' -> MarkComplete | op stmt
+ StatementPrimeState
+ // value -> number | string | boolean | quoted_string
+ ValueState
+ // section -> [ section'
+ OpenScopeState
+ // section' -> value section_close
+ SectionState
+ // section_close -> ]
+ CloseScopeState
+ // SkipState will skip (NL WS)+
+ SkipState
+ // SkipTokenState will skip any token and push the previous
+ // state onto the stack.
+ SkipTokenState
+ // comment -> # comment' | ; comment'
+ // comment' -> MarkComplete | value
+ CommentState
+ // MarkComplete state will complete statements and move that
+ // to the completed AST list
+ MarkCompleteState
+ // TerminalState signifies that the tokens have been fully parsed
+ TerminalState
+)
+
+// parseTable is a state machine to dictate the grammar above.
+var parseTable = map[ASTKind]map[TokenType]int{
+ ASTKindStart: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+ ASTKindCommentStatement: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExpr: map[TokenType]int{
+ TokenOp: StatementPrimeState,
+ TokenLit: ValueState,
+ TokenSep: OpenScopeState,
+ TokenWS: ValueState,
+ TokenNL: SkipState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindEqualExpr: map[TokenType]int{
+ TokenLit: ValueState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipState,
+ },
+ ASTKindStatement: map[TokenType]int{
+ TokenLit: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExprStatement: map[TokenType]int{
+ TokenLit: ValueState,
+ TokenSep: OpenScopeState,
+ TokenOp: ValueState,
+ TokenWS: ValueState,
+ TokenNL: MarkCompleteState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ TokenComma: SkipState,
+ },
+ ASTKindSectionStatement: map[TokenType]int{
+ TokenLit: SectionState,
+ TokenOp: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SectionState,
+ TokenNL: SkipTokenState,
+ },
+ ASTKindCompletedSectionStatement: map[TokenType]int{
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindSkipStatement: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+}
+
+// ParseAST will parse input from an io.Reader using
+// an LL(1) parser.
+func ParseAST(r io.Reader) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.Tokenize(r)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+// ParseASTBytes will parse input from a byte slice using
+// an LL(1) parser.
+func ParseASTBytes(b []byte) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.tokenize(b)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+func parse(tokens []Token) ([]AST, error) {
+ start := Start
+ stack := newParseStack(3, len(tokens))
+
+ stack.Push(start)
+ s := newSkipper()
+
+loop:
+ for stack.Len() > 0 {
+ k := stack.Pop()
+
+ var tok Token
+ if len(tokens) == 0 {
+ // this occurs when all the tokens have been processed
+ // but reduction of what's left on the stack needs to
+ // occur.
+ tok = emptyToken
+ } else {
+ tok = tokens[0]
+ }
+
+ step := parseTable[k.Kind][tok.Type()]
+ if s.ShouldSkip(tok) {
+ // being in a skip state with no tokens will break out of
+ // the parse loop since there is nothing left to process.
+ if len(tokens) == 0 {
+ break loop
+ }
+
+ step = SkipTokenState
+ }
+
+ switch step {
+ case TerminalState:
+ // Finished parsing. Push what should be the last
+ // statement to the stack. If there is anything left
+ // on the stack, an error in parsing has occurred.
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ break loop
+ case SkipTokenState:
+ // When skipping a token, the previous state was popped off the stack.
+ // To maintain the correct state, the previous state will be pushed
+ // onto the stack.
+ stack.Push(k)
+ case StatementState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ expr := newExpression(tok)
+ stack.Push(expr)
+ case StatementPrimeState:
+ if tok.Type() != TokenOp {
+ stack.MarkComplete(k)
+ continue
+ }
+
+ if k.Kind != ASTKindExpr {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k),
+ )
+ }
+
+ k = trimSpaces(k)
+ expr := newEqualExpr(k, tok)
+ stack.Push(expr)
+ case ValueState:
+ // ValueState requires the previous state to either be an equal expression
+ // or an expression statement.
+ //
+ // This grammar occurs when the RHS is a number, word, or quoted string.
+ // equal_expr -> lit op equal_expr'
+ // equal_expr' -> number | string | quoted_string
+ // quoted_string -> " quoted_string'
+ // quoted_string' -> string quoted_string_end
+ // quoted_string_end -> "
+ //
+ // otherwise
+ // expr_stmt -> equal_expr (expr_stmt')*
+ // expr_stmt' -> ws S | op S | MarkComplete
+ // S -> equal_expr' expr_stmt'
+ switch k.Kind {
+ case ASTKindEqualExpr:
+ // assiging a value to some key
+ k.AppendChild(newExpression(tok))
+ stack.Push(newExprStatement(k))
+ case ASTKindExpr:
+ k.Root.raw = append(k.Root.raw, tok.Raw()...)
+ stack.Push(k)
+ case ASTKindExprStatement:
+ root := k.GetRoot()
+ children := root.GetChildren()
+ if len(children) == 0 {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind),
+ )
+ }
+
+ rhs := children[len(children)-1]
+
+ if rhs.Root.ValueType != QuotedStringType {
+ rhs.Root.ValueType = StringType
+ rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...)
+
+ }
+
+ children[len(children)-1] = rhs
+ k.SetChildren(children)
+
+ stack.Push(k)
+ }
+ case OpenScopeState:
+ if !runeCompare(tok.Raw(), openBrace) {
+ return nil, NewParseError("expected '['")
+ }
+
+ stmt := newStatement()
+ stack.Push(stmt)
+ case CloseScopeState:
+ if !runeCompare(tok.Raw(), closeBrace) {
+ return nil, NewParseError("expected ']'")
+ }
+
+ k = trimSpaces(k)
+ stack.Push(newCompletedSectionStatement(k))
+ case SectionState:
+ var stmt AST
+
+ switch k.Kind {
+ case ASTKindStatement:
+ // If there are multiple literals inside of a scope declaration,
+ // then the current token's raw value will be appended to the Name.
+ //
+ // This handles cases like [ profile default ]
+ //
+ // k will represent a SectionStatement with the children representing
+ // the label of the section
+ stmt = newSectionStatement(tok)
+ case ASTKindSectionStatement:
+ k.Root.raw = append(k.Root.raw, tok.Raw()...)
+ stmt = k
+ default:
+ return nil, NewParseError(
+ fmt.Sprintf("invalid statement: expected statement: %v", k.Kind),
+ )
+ }
+
+ stack.Push(stmt)
+ case MarkCompleteState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+
+ if stack.Len() == 0 {
+ stack.Push(start)
+ }
+ case SkipState:
+ stack.Push(newSkipStatement(k))
+ s.Skip()
+ case CommentState:
+ if k.Kind == ASTKindStart {
+ stack.Push(k)
+ } else {
+ stack.MarkComplete(k)
+ }
+
+ stmt := newCommentStatement(tok)
+ stack.Push(stmt)
+ default:
+ return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok))
+ }
+
+ if len(tokens) > 0 {
+ tokens = tokens[1:]
+ }
+ }
+
+ // this occurs when a statement has not been completed
+ if stack.top > 1 {
+ return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container))
+ }
+
+ // returns a sublist which excludes the start symbol
+ return stack.List(), nil
+}
+
+// trimSpaces will trim spaces on the left and right hand side of
+// the literal.
+func trimSpaces(k AST) AST {
+ // trim left hand side of spaces
+ for i := 0; i < len(k.Root.raw); i++ {
+ if !isWhitespace(k.Root.raw[i]) {
+ break
+ }
+
+ k.Root.raw = k.Root.raw[1:]
+ i--
+ }
+
+ // trim right hand side of spaces
+ for i := len(k.Root.raw) - 1; i >= 0; i-- {
+ if !isWhitespace(k.Root.raw[i]) {
+ break
+ }
+
+ k.Root.raw = k.Root.raw[:len(k.Root.raw)-1]
+ }
+
+ return k
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
new file mode 100755
index 0000000..24df543
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
@@ -0,0 +1,324 @@
+package ini
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ runesTrue = []rune("true")
+ runesFalse = []rune("false")
+)
+
+var literalValues = [][]rune{
+ runesTrue,
+ runesFalse,
+}
+
+func isBoolValue(b []rune) bool {
+ for _, lv := range literalValues {
+ if isLitValue(lv, b) {
+ return true
+ }
+ }
+ return false
+}
+
+func isLitValue(want, have []rune) bool {
+ if len(have) < len(want) {
+ return false
+ }
+
+ for i := 0; i < len(want); i++ {
+ if want[i] != have[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// isNumberValue will return whether not the leading characters in
+// a byte slice is a number. A number is delimited by whitespace or
+// the newline token.
+//
+// A number is defined to be in a binary, octal, decimal (int | float), hex format,
+// or in scientific notation.
+func isNumberValue(b []rune) bool {
+ negativeIndex := 0
+ helper := numberHelper{}
+ needDigit := false
+
+ for i := 0; i < len(b); i++ {
+ negativeIndex++
+
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return false
+ }
+ helper.Determine(b[i])
+ needDigit = true
+ continue
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ negativeIndex = 0
+ needDigit = true
+ continue
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ needDigit = true
+ if i == 0 {
+ return false
+ }
+
+ fallthrough
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ needDigit = true
+ continue
+ }
+
+ if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) {
+ return !needDigit
+ }
+
+ if !helper.CorrectByte(b[i]) {
+ return false
+ }
+ needDigit = false
+ }
+
+ return !needDigit
+}
+
+func isValid(b []rune) (bool, int, error) {
+ if len(b) == 0 {
+ // TODO: should probably return an error
+ return false, 0, nil
+ }
+
+ return isValidRune(b[0]), 1, nil
+}
+
+func isValidRune(r rune) bool {
+ return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n'
+}
+
+// ValueType is an enum that will signify what type
+// the Value is
+type ValueType int
+
+func (v ValueType) String() string {
+ switch v {
+ case NoneType:
+ return "NONE"
+ case DecimalType:
+ return "FLOAT"
+ case IntegerType:
+ return "INT"
+ case StringType:
+ return "STRING"
+ case BoolType:
+ return "BOOL"
+ }
+
+ return ""
+}
+
+// ValueType enums
+const (
+ NoneType = ValueType(iota)
+ DecimalType
+ IntegerType
+ StringType
+ QuotedStringType
+ BoolType
+)
+
+// Value is a union container
+type Value struct {
+ Type ValueType
+ raw []rune
+
+ integer int64
+ decimal float64
+ boolean bool
+ str string
+}
+
+func newValue(t ValueType, base int, raw []rune) (Value, error) {
+ v := Value{
+ Type: t,
+ raw: raw,
+ }
+ var err error
+
+ switch t {
+ case DecimalType:
+ v.decimal, err = strconv.ParseFloat(string(raw), 64)
+ case IntegerType:
+ if base != 10 {
+ raw = raw[2:]
+ }
+
+ v.integer, err = strconv.ParseInt(string(raw), base, 64)
+ case StringType:
+ v.str = string(raw)
+ case QuotedStringType:
+ v.str = string(raw[1 : len(raw)-1])
+ case BoolType:
+ v.boolean = runeCompare(v.raw, runesTrue)
+ }
+
+ // issue 2253
+ //
+ // if the value trying to be parsed is too large, then we will use
+ // the 'StringType' and raw value instead.
+ if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange {
+ v.Type = StringType
+ v.str = string(raw)
+ err = nil
+ }
+
+ return v, err
+}
+
+// Append will append values and change the type to a string
+// type.
+func (v *Value) Append(tok Token) {
+ r := tok.Raw()
+ if v.Type != QuotedStringType {
+ v.Type = StringType
+ r = tok.raw[1 : len(tok.raw)-1]
+ }
+ if tok.Type() != TokenLit {
+ v.raw = append(v.raw, tok.Raw()...)
+ } else {
+ v.raw = append(v.raw, r...)
+ }
+}
+
+func (v Value) String() string {
+ switch v.Type {
+ case DecimalType:
+ return fmt.Sprintf("decimal: %f", v.decimal)
+ case IntegerType:
+ return fmt.Sprintf("integer: %d", v.integer)
+ case StringType:
+ return fmt.Sprintf("string: %s", string(v.raw))
+ case QuotedStringType:
+ return fmt.Sprintf("quoted string: %s", string(v.raw))
+ case BoolType:
+ return fmt.Sprintf("bool: %t", v.boolean)
+ default:
+ return "union not set"
+ }
+}
+
+func newLitToken(b []rune) (Token, int, error) {
+ n := 0
+ var err error
+
+ token := Token{}
+ if b[0] == '"' {
+ n, err = getStringValue(b)
+ if err != nil {
+ return token, n, err
+ }
+
+ token = newToken(TokenLit, b[:n], QuotedStringType)
+ } else if isNumberValue(b) {
+ var base int
+ base, n, err = getNumericalValue(b)
+ if err != nil {
+ return token, 0, err
+ }
+
+ value := b[:n]
+ vType := IntegerType
+ if contains(value, '.') || hasExponent(value) {
+ vType = DecimalType
+ }
+ token = newToken(TokenLit, value, vType)
+ token.base = base
+ } else if isBoolValue(b) {
+ n, err = getBoolValue(b)
+
+ token = newToken(TokenLit, b[:n], BoolType)
+ } else {
+ n, err = getValue(b)
+ token = newToken(TokenLit, b[:n], StringType)
+ }
+
+ return token, n, err
+}
+
+// IntValue returns an integer value
+func (v Value) IntValue() int64 {
+ return v.integer
+}
+
+// FloatValue returns a float value
+func (v Value) FloatValue() float64 {
+ return v.decimal
+}
+
+// BoolValue returns a bool value
+func (v Value) BoolValue() bool {
+ return v.boolean
+}
+
+func isTrimmable(r rune) bool {
+ switch r {
+ case '\n', ' ':
+ return true
+ }
+ return false
+}
+
+// StringValue returns the string value
+func (v Value) StringValue() string {
+ switch v.Type {
+ case StringType:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ case QuotedStringType:
+ // preserve all characters in the quotes
+ return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1]))
+ default:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ }
+}
+
+func contains(runes []rune, c rune) bool {
+ for i := 0; i < len(runes); i++ {
+ if runes[i] == c {
+ return true
+ }
+ }
+
+ return false
+}
+
+func runeCompare(v1 []rune, v2 []rune) bool {
+ if len(v1) != len(v2) {
+ return false
+ }
+
+ for i := 0; i < len(v1); i++ {
+ if v1[i] != v2[i] {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go
new file mode 100755
index 0000000..e52ac39
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go
@@ -0,0 +1,30 @@
+package ini
+
+func isNewline(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ if b[0] == '\n' {
+ return true
+ }
+
+ if len(b) < 2 {
+ return false
+ }
+
+ return b[0] == '\r' && b[1] == '\n'
+}
+
+func newNewlineToken(b []rune) (Token, int, error) {
+ i := 1
+ if b[0] == '\r' && isNewline(b[1:]) {
+ i++
+ }
+
+ if !isNewline([]rune(b[:i])) {
+ return emptyToken, 0, NewParseError("invalid new line token")
+ }
+
+ return newToken(TokenNL, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go
new file mode 100755
index 0000000..a45c0bc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go
@@ -0,0 +1,152 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+const (
+ none = numberFormat(iota)
+ binary
+ octal
+ decimal
+ hex
+ exponent
+)
+
+type numberFormat int
+
+// numberHelper is used to dictate what format a number is in
+// and what to do for negative values. Since -1e-4 is a valid
+// number, we cannot just simply check for duplicate negatives.
+type numberHelper struct {
+ numberFormat numberFormat
+
+ negative bool
+ negativeExponent bool
+}
+
+func (b numberHelper) Exists() bool {
+ return b.numberFormat != none
+}
+
+func (b numberHelper) IsNegative() bool {
+ return b.negative || b.negativeExponent
+}
+
+func (b *numberHelper) Determine(c rune) error {
+ if b.Exists() {
+ return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c)))
+ }
+
+ switch c {
+ case 'b':
+ b.numberFormat = binary
+ case 'o':
+ b.numberFormat = octal
+ case 'x':
+ b.numberFormat = hex
+ case 'e', 'E':
+ b.numberFormat = exponent
+ case '-':
+ if b.numberFormat != exponent {
+ b.negative = true
+ } else {
+ b.negativeExponent = true
+ }
+ case '.':
+ b.numberFormat = decimal
+ default:
+ return NewParseError(fmt.Sprintf("invalid number character: %v", string(c)))
+ }
+
+ return nil
+}
+
+func (b numberHelper) CorrectByte(c rune) bool {
+ switch {
+ case b.numberFormat == binary:
+ if !isBinaryByte(c) {
+ return false
+ }
+ case b.numberFormat == octal:
+ if !isOctalByte(c) {
+ return false
+ }
+ case b.numberFormat == hex:
+ if !isHexByte(c) {
+ return false
+ }
+ case b.numberFormat == decimal:
+ if !isDigit(c) {
+ return false
+ }
+ case b.numberFormat == exponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negativeExponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negative:
+ if !isDigit(c) {
+ return false
+ }
+ default:
+ if !isDigit(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b numberHelper) Base() int {
+ switch b.numberFormat {
+ case binary:
+ return 2
+ case octal:
+ return 8
+ case hex:
+ return 16
+ default:
+ return 10
+ }
+}
+
+func (b numberHelper) String() string {
+ buf := bytes.Buffer{}
+ i := 0
+
+ switch b.numberFormat {
+ case binary:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": binary format\n")
+ case octal:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": octal format\n")
+ case hex:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": hex format\n")
+ case exponent:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": exponent format\n")
+ default:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": integer format\n")
+ }
+
+ if b.negative {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative format\n")
+ }
+
+ if b.negativeExponent {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n")
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go
new file mode 100755
index 0000000..8a84c7c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go
@@ -0,0 +1,39 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ equalOp = []rune("=")
+ equalColonOp = []rune(":")
+)
+
+func isOp(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '=':
+ return true
+ case ':':
+ return true
+ default:
+ return false
+ }
+}
+
+func newOpToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '=':
+ tok = newToken(TokenOp, equalOp, NoneType)
+ case ':':
+ tok = newToken(TokenOp, equalColonOp, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go
new file mode 100755
index 0000000..4572870
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go
@@ -0,0 +1,43 @@
+package ini
+
+import "fmt"
+
+const (
+ // ErrCodeParseError is returned when a parsing error
+ // has occurred.
+ ErrCodeParseError = "INIParseError"
+)
+
+// ParseError is an error which is returned during any part of
+// the parsing process.
+type ParseError struct {
+ msg string
+}
+
+// NewParseError will return a new ParseError where message
+// is the description of the error.
+func NewParseError(message string) *ParseError {
+ return &ParseError{
+ msg: message,
+ }
+}
+
+// Code will return the ErrCodeParseError
+func (err *ParseError) Code() string {
+ return ErrCodeParseError
+}
+
+// Message returns the error's message
+func (err *ParseError) Message() string {
+ return err.msg
+}
+
+// OrigError return nothing since there will never be any
+// original error.
+func (err *ParseError) OrigError() error {
+ return nil
+}
+
+func (err *ParseError) Error() string {
+ return fmt.Sprintf("%s: %s", err.Code(), err.Message())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go
new file mode 100755
index 0000000..7f01cf7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go
@@ -0,0 +1,60 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// ParseStack is a stack that contains a container, the stack portion,
+// and the list which is the list of ASTs that have been successfully
+// parsed.
+type ParseStack struct {
+ top int
+ container []AST
+ list []AST
+ index int
+}
+
+func newParseStack(sizeContainer, sizeList int) ParseStack {
+ return ParseStack{
+ container: make([]AST, sizeContainer),
+ list: make([]AST, sizeList),
+ }
+}
+
+// Pop will return and truncate the last container element.
+func (s *ParseStack) Pop() AST {
+ s.top--
+ return s.container[s.top]
+}
+
+// Push will add the new AST to the container
+func (s *ParseStack) Push(ast AST) {
+ s.container[s.top] = ast
+ s.top++
+}
+
+// MarkComplete will append the AST to the list of completed statements
+func (s *ParseStack) MarkComplete(ast AST) {
+ s.list[s.index] = ast
+ s.index++
+}
+
+// List will return the completed statements
+func (s ParseStack) List() []AST {
+ return s.list[:s.index]
+}
+
+// Len will return the length of the container
+func (s *ParseStack) Len() int {
+ return s.top
+}
+
+func (s ParseStack) String() string {
+ buf := bytes.Buffer{}
+ for i, node := range s.list {
+ buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node))
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go
new file mode 100755
index 0000000..f82095b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go
@@ -0,0 +1,41 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ emptyRunes = []rune{}
+)
+
+func isSep(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '[', ']':
+ return true
+ default:
+ return false
+ }
+}
+
+var (
+ openBrace = []rune("[")
+ closeBrace = []rune("]")
+)
+
+func newSepToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '[':
+ tok = newToken(TokenSep, openBrace, NoneType)
+ case ']':
+ tok = newToken(TokenSep, closeBrace, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
new file mode 100755
index 0000000..6bb6964
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
@@ -0,0 +1,45 @@
+package ini
+
+// skipper is used to skip certain blocks of an ini file.
+// Currently skipper is used to skip nested blocks of ini
+// files. See example below
+//
+// [ foo ]
+// nested = ; this section will be skipped
+// a=b
+// c=d
+// bar=baz ; this will be included
+type skipper struct {
+ shouldSkip bool
+ TokenSet bool
+ prevTok Token
+}
+
+func newSkipper() skipper {
+ return skipper{
+ prevTok: emptyToken,
+ }
+}
+
+func (s *skipper) ShouldSkip(tok Token) bool {
+ if s.shouldSkip &&
+ s.prevTok.Type() == TokenNL &&
+ tok.Type() != TokenWS {
+
+ s.Continue()
+ return false
+ }
+ s.prevTok = tok
+
+ return s.shouldSkip
+}
+
+func (s *skipper) Skip() {
+ s.shouldSkip = true
+ s.prevTok = emptyToken
+}
+
+func (s *skipper) Continue() {
+ s.shouldSkip = false
+ s.prevTok = emptyToken
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go
new file mode 100755
index 0000000..18f3fe8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go
@@ -0,0 +1,35 @@
+package ini
+
+// Statement is an empty AST mostly used for transitioning states.
+func newStatement() AST {
+ return newAST(ASTKindStatement, AST{})
+}
+
+// SectionStatement represents a section AST
+func newSectionStatement(tok Token) AST {
+ return newASTWithRootToken(ASTKindSectionStatement, tok)
+}
+
+// ExprStatement represents a completed expression AST
+func newExprStatement(ast AST) AST {
+ return newAST(ASTKindExprStatement, ast)
+}
+
+// CommentStatement represents a comment in the ini definition.
+//
+// grammar:
+// comment -> #comment' | ;comment'
+// comment' -> epsilon | value
+func newCommentStatement(tok Token) AST {
+ return newAST(ASTKindCommentStatement, newExpression(tok))
+}
+
+// CompletedSectionStatement represents a completed section
+func newCompletedSectionStatement(ast AST) AST {
+ return newAST(ASTKindCompletedSectionStatement, ast)
+}
+
+// SkipStatement is used to skip whole statements
+func newSkipStatement(ast AST) AST {
+ return newAST(ASTKindSkipStatement, ast)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go
new file mode 100755
index 0000000..305999d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go
@@ -0,0 +1,284 @@
+package ini
+
+import (
+ "fmt"
+)
+
+// getStringValue will return a quoted string and the amount
+// of bytes read
+//
+// an error will be returned if the string is not properly formatted
+func getStringValue(b []rune) (int, error) {
+ if b[0] != '"' {
+ return 0, NewParseError("strings must start with '\"'")
+ }
+
+ endQuote := false
+ i := 1
+
+ for ; i < len(b) && !endQuote; i++ {
+ if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped {
+ endQuote = true
+ break
+ } else if escaped {
+ /*c, err := getEscapedByte(b[i])
+ if err != nil {
+ return 0, err
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--*/
+
+ continue
+ }
+ }
+
+ if !endQuote {
+ return 0, NewParseError("missing '\"' in string value")
+ }
+
+ return i + 1, nil
+}
+
+// getBoolValue will return a boolean and the amount
+// of bytes read
+//
+// an error will be returned if the boolean is not of a correct
+// value
+func getBoolValue(b []rune) (int, error) {
+ if len(b) < 4 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ n := 0
+ for _, lv := range literalValues {
+ if len(lv) > len(b) {
+ continue
+ }
+
+ if isLitValue(lv, b) {
+ n = len(lv)
+ }
+ }
+
+ if n == 0 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ return n, nil
+}
+
+// getNumericalValue will return a numerical string, the amount
+// of bytes read, and the base of the number
+//
+// an error will be returned if the number is not of a correct
+// value
+func getNumericalValue(b []rune) (int, int, error) {
+ if !isDigit(b[0]) {
+ return 0, 0, NewParseError("invalid digit value")
+ }
+
+ i := 0
+ helper := numberHelper{}
+
+loop:
+ for negativeIndex := 0; i < len(b); i++ {
+ negativeIndex++
+
+ if !isDigit(b[i]) {
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return 0, 0, NewParseError("parse error '-'")
+ }
+
+ n := getNegativeNumber(b[i:])
+ i += (n - 1)
+ helper.Determine(b[i])
+ continue
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+
+ negativeIndex = 0
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ if i == 0 && b[i] != '0' {
+ return 0, 0, NewParseError("incorrect base format, expected leading '0'")
+ }
+
+ if i != 1 {
+ return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i))
+ }
+
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ default:
+ if isWhitespace(b[i]) {
+ break loop
+ }
+
+ if isNewline(b[i:]) {
+ break loop
+ }
+
+ if !(helper.numberFormat == hex && isHexByte(b[i])) {
+ if i+2 < len(b) && !isNewline(b[i:i+2]) {
+ return 0, 0, NewParseError("invalid numerical character")
+ } else if !isNewline([]rune{b[i]}) {
+ return 0, 0, NewParseError("invalid numerical character")
+ }
+
+ break loop
+ }
+ }
+ }
+ }
+
+ return helper.Base(), i, nil
+}
+
+// isDigit will return whether or not something is an integer
+func isDigit(b rune) bool {
+ return b >= '0' && b <= '9'
+}
+
+func hasExponent(v []rune) bool {
+ return contains(v, 'e') || contains(v, 'E')
+}
+
+func isBinaryByte(b rune) bool {
+ switch b {
+ case '0', '1':
+ return true
+ default:
+ return false
+ }
+}
+
+func isOctalByte(b rune) bool {
+ switch b {
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ return true
+ default:
+ return false
+ }
+}
+
+func isHexByte(b rune) bool {
+ if isDigit(b) {
+ return true
+ }
+ return (b >= 'A' && b <= 'F') ||
+ (b >= 'a' && b <= 'f')
+}
+
+func getValue(b []rune) (int, error) {
+ i := 0
+
+ for i < len(b) {
+ if isNewline(b[i:]) {
+ break
+ }
+
+ if isOp(b[i:]) {
+ break
+ }
+
+ valid, n, err := isValid(b[i:])
+ if err != nil {
+ return 0, err
+ }
+
+ if !valid {
+ break
+ }
+
+ i += n
+ }
+
+ return i, nil
+}
+
+// getNegativeNumber will return a negative number from a
+// byte slice. This will iterate through all characters until
+// a non-digit has been found.
+func getNegativeNumber(b []rune) int {
+ if b[0] != '-' {
+ return 0
+ }
+
+ i := 1
+ for ; i < len(b); i++ {
+ if !isDigit(b[i]) {
+ return i
+ }
+ }
+
+ return i
+}
+
+// isEscaped will return whether or not the character is an escaped
+// character.
+func isEscaped(value []rune, b rune) bool {
+ if len(value) == 0 {
+ return false
+ }
+
+ switch b {
+ case '\'': // single quote
+ case '"': // quote
+ case 'n': // newline
+ case 't': // tab
+ case '\\': // backslash
+ default:
+ return false
+ }
+
+ return value[len(value)-1] == '\\'
+}
+
+func getEscapedByte(b rune) (rune, error) {
+ switch b {
+ case '\'': // single quote
+ return '\'', nil
+ case '"': // quote
+ return '"', nil
+ case 'n': // newline
+ return '\n', nil
+ case 't': // table
+ return '\t', nil
+ case '\\': // backslash
+ return '\\', nil
+ default:
+ return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b))
+ }
+}
+
+func removeEscapedCharacters(b []rune) []rune {
+ for i := 0; i < len(b); i++ {
+ if isEscaped(b[:i], b[i]) {
+ c, err := getEscapedByte(b[i])
+ if err != nil {
+ return b
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--
+ }
+ }
+
+ return b
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
new file mode 100755
index 0000000..94841c3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
@@ -0,0 +1,166 @@
+package ini
+
+import (
+ "fmt"
+ "sort"
+)
+
+// Visitor is an interface used by walkers that will
+// traverse an array of ASTs.
+type Visitor interface {
+ VisitExpr(AST) error
+ VisitStatement(AST) error
+}
+
+// DefaultVisitor is used to visit statements and expressions
+// and ensure that they are both of the correct format.
+// In addition, upon visiting this will build sections and populate
+// the Sections field which can be used to retrieve profile
+// configuration.
+type DefaultVisitor struct {
+ scope string
+ Sections Sections
+}
+
+// NewDefaultVisitor return a DefaultVisitor
+func NewDefaultVisitor() *DefaultVisitor {
+ return &DefaultVisitor{
+ Sections: Sections{
+ container: map[string]Section{},
+ },
+ }
+}
+
+// VisitExpr visits expressions...
+func (v *DefaultVisitor) VisitExpr(expr AST) error {
+ t := v.Sections.container[v.scope]
+ if t.values == nil {
+ t.values = values{}
+ }
+
+ switch expr.Kind {
+ case ASTKindExprStatement:
+ opExpr := expr.GetRoot()
+ switch opExpr.Kind {
+ case ASTKindEqualExpr:
+ children := opExpr.GetChildren()
+ if len(children) <= 1 {
+ return NewParseError("unexpected token type")
+ }
+
+ rhs := children[1]
+
+ if rhs.Root.Type() != TokenLit {
+ return NewParseError("unexpected token type")
+ }
+
+ key := EqualExprKey(opExpr)
+ v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw())
+ if err != nil {
+ return err
+ }
+
+ t.values[key] = v
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+
+ v.Sections.container[v.scope] = t
+ return nil
+}
+
+// VisitStatement visits statements...
+func (v *DefaultVisitor) VisitStatement(stmt AST) error {
+ switch stmt.Kind {
+ case ASTKindCompletedSectionStatement:
+ child := stmt.GetRoot()
+ if child.Kind != ASTKindSectionStatement {
+ return NewParseError(fmt.Sprintf("unsupported child statement: %T", child))
+ }
+
+ name := string(child.Root.Raw())
+ v.Sections.container[name] = Section{}
+ v.scope = name
+ default:
+ return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind))
+ }
+
+ return nil
+}
+
+// Sections is a map of Section structures that represent
+// a configuration.
+type Sections struct {
+ container map[string]Section
+}
+
+// GetSection will return section p. If section p does not exist,
+// false will be returned in the second parameter.
+func (t Sections) GetSection(p string) (Section, bool) {
+ v, ok := t.container[p]
+ return v, ok
+}
+
+// values represents a map of union values.
+type values map[string]Value
+
+// List will return a list of all sections that were successfully
+// parsed.
+func (t Sections) List() []string {
+ keys := make([]string, len(t.container))
+ i := 0
+ for k := range t.container {
+ keys[i] = k
+ i++
+ }
+
+ sort.Strings(keys)
+ return keys
+}
+
+// Section contains a name and values. This represent
+// a sectioned entry in a configuration file.
+type Section struct {
+ Name string
+ values values
+}
+
+// Has will return whether or not an entry exists in a given section
+func (t Section) Has(k string) bool {
+ _, ok := t.values[k]
+ return ok
+}
+
+// ValueType will returned what type the union is set to. If
+// k was not found, the NoneType will be returned.
+func (t Section) ValueType(k string) (ValueType, bool) {
+ v, ok := t.values[k]
+ return v.Type, ok
+}
+
+// Bool returns a bool value at k
+func (t Section) Bool(k string) bool {
+ return t.values[k].BoolValue()
+}
+
+// Int returns an integer value at k
+func (t Section) Int(k string) int64 {
+ return t.values[k].IntValue()
+}
+
+// Float64 returns a float value at k
+func (t Section) Float64(k string) float64 {
+ return t.values[k].FloatValue()
+}
+
+// String returns the string value at k
+func (t Section) String(k string) string {
+ _, ok := t.values[k]
+ if !ok {
+ return ""
+ }
+ return t.values[k].StringValue()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go
new file mode 100755
index 0000000..99915f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go
@@ -0,0 +1,25 @@
+package ini
+
+// Walk will traverse the AST using the v, the Visitor.
+func Walk(tree []AST, v Visitor) error {
+ for _, node := range tree {
+ switch node.Kind {
+ case ASTKindExpr,
+ ASTKindExprStatement:
+
+ if err := v.VisitExpr(node); err != nil {
+ return err
+ }
+ case ASTKindStatement,
+ ASTKindCompletedSectionStatement,
+ ASTKindNestedSectionStatement,
+ ASTKindCompletedNestedSectionStatement:
+
+ if err := v.VisitStatement(node); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go
new file mode 100755
index 0000000..7ffb4ae
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go
@@ -0,0 +1,24 @@
+package ini
+
+import (
+ "unicode"
+)
+
+// isWhitespace will return whether or not the character is
+// a whitespace character.
+//
+// Whitespace is defined as a space or tab.
+func isWhitespace(c rune) bool {
+ return unicode.IsSpace(c) && c != '\n' && c != '\r'
+}
+
+func newWSToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if !isWhitespace(b[i]) {
+ break
+ }
+ }
+
+ return newToken(TokenWS, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go
new file mode 100755
index 0000000..0b9b0df
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go
@@ -0,0 +1,57 @@
+package s3err
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// RequestFailure provides additional S3 specific metadata for the request
+// failure.
+type RequestFailure struct {
+ awserr.RequestFailure
+
+ hostID string
+}
+
+// NewRequestFailure returns a request failure error decordated with S3
+// specific metadata.
+func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure {
+ return &RequestFailure{RequestFailure: err, hostID: hostID}
+}
+
+func (r RequestFailure) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s",
+ r.StatusCode(), r.RequestID(), r.hostID)
+ return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+func (r RequestFailure) String() string {
+ return r.Error()
+}
+
+// HostID returns the HostID request response value.
+func (r RequestFailure) HostID() string {
+ return r.hostID
+}
+
+// RequestFailureWrapperHandler returns a handler to rap an
+// awserr.RequestFailure with the S3 request ID 2 from the response.
+func RequestFailureWrapperHandler() request.NamedHandler {
+ return request.NamedHandler{
+ Name: "awssdk.s3.errorHandler",
+ Fn: func(req *request.Request) {
+ reqErr, ok := req.Error.(awserr.RequestFailure)
+ if !ok || reqErr == nil {
+ return
+ }
+
+ hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2")
+ if req.Error == nil {
+ return
+ }
+
+ req.Error = NewRequestFailure(reqErr, hostID)
+ },
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
new file mode 100755
index 0000000..5aa9137
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
@@ -0,0 +1,10 @@
+// +build !go1.7
+
+package sdkio
+
+// Copy of Go 1.7 io package's Seeker constants.
+const (
+ SeekStart = 0 // seek relative to the origin of the file
+ SeekCurrent = 1 // seek relative to the current offset
+ SeekEnd = 2 // seek relative to the end
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
new file mode 100755
index 0000000..e5f0056
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
@@ -0,0 +1,12 @@
+// +build go1.7
+
+package sdkio
+
+import "io"
+
+// Alias for Go 1.7 io package Seeker constants
+const (
+ SeekStart = io.SeekStart // seek relative to the origin of the file
+ SeekCurrent = io.SeekCurrent // seek relative to the current offset
+ SeekEnd = io.SeekEnd // seek relative to the end
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
new file mode 100755
index 0000000..0c9802d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
@@ -0,0 +1,29 @@
+package sdkrand
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+)
+
+// lockedSource is a thread-safe implementation of rand.Source
+type lockedSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
+
+// SeededRand is a new RNG using a thread safe implementation of rand.Source
+var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
new file mode 100755
index 0000000..38ea61a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
@@ -0,0 +1,23 @@
+package sdkuri
+
+import (
+ "path"
+ "strings"
+)
+
+// PathJoin will join the elements of the path delimited by the "/"
+// character. Similar to path.Join with the exception the trailing "/"
+// character is preserved if present.
+func PathJoin(elems ...string) string {
+ if len(elems) == 0 {
+ return ""
+ }
+
+ hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/")
+ str := path.Join(elems...)
+ if hasTrailing && str != "/" {
+ str += "/"
+ }
+
+ return str
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go
new file mode 100755
index 0000000..7da8a49
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go
@@ -0,0 +1,12 @@
+package shareddefaults
+
+const (
+ // ECSCredsProviderEnvVar is an environmental variable key used to
+ // determine which path needs to be hit.
+ ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
+)
+
+// ECSContainerCredentialsURI is the endpoint to retrieve container
+// credentials. This can be overridden to test to ensure the credential process
+// is behaving correctly.
+var ECSContainerCredentialsURI = "http://169.254.170.2"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
new file mode 100755
index 0000000..ebcbc2b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
@@ -0,0 +1,40 @@
+package shareddefaults
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "credentials")
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "config")
+}
+
+// UserHomeDir returns the home directory for the user the process is
+// running under.
+func UserHomeDir() string {
+ if runtime.GOOS == "windows" { // Windows
+ return os.Getenv("USERPROFILE")
+ }
+
+ // *nix
+ return os.Getenv("HOME")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go
new file mode 100755
index 0000000..ecc7bf8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go
@@ -0,0 +1,144 @@
+package eventstream
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strconv"
+)
+
+type decodedMessage struct {
+ rawMessage
+ Headers decodedHeaders `json:"headers"`
+}
+type jsonMessage struct {
+ Length json.Number `json:"total_length"`
+ HeadersLen json.Number `json:"headers_length"`
+ PreludeCRC json.Number `json:"prelude_crc"`
+ Headers decodedHeaders `json:"headers"`
+ Payload []byte `json:"payload"`
+ CRC json.Number `json:"message_crc"`
+}
+
+func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) {
+ var jsonMsg jsonMessage
+ if err = json.Unmarshal(b, &jsonMsg); err != nil {
+ return err
+ }
+
+ d.Length, err = numAsUint32(jsonMsg.Length)
+ if err != nil {
+ return err
+ }
+ d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen)
+ if err != nil {
+ return err
+ }
+ d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC)
+ if err != nil {
+ return err
+ }
+ d.Headers = jsonMsg.Headers
+ d.Payload = jsonMsg.Payload
+ d.CRC, err = numAsUint32(jsonMsg.CRC)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (d *decodedMessage) MarshalJSON() ([]byte, error) {
+ jsonMsg := jsonMessage{
+ Length: json.Number(strconv.Itoa(int(d.Length))),
+ HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))),
+ PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))),
+ Headers: d.Headers,
+ Payload: d.Payload,
+ CRC: json.Number(strconv.Itoa(int(d.CRC))),
+ }
+
+ return json.Marshal(jsonMsg)
+}
+
+func numAsUint32(n json.Number) (uint32, error) {
+ v, err := n.Int64()
+ if err != nil {
+ return 0, fmt.Errorf("failed to get int64 json number, %v", err)
+ }
+
+ return uint32(v), nil
+}
+
+func (d decodedMessage) Message() Message {
+ return Message{
+ Headers: Headers(d.Headers),
+ Payload: d.Payload,
+ }
+}
+
+type decodedHeaders Headers
+
+func (hs *decodedHeaders) UnmarshalJSON(b []byte) error {
+ var jsonHeaders []struct {
+ Name string `json:"name"`
+ Type valueType `json:"type"`
+ Value interface{} `json:"value"`
+ }
+
+ decoder := json.NewDecoder(bytes.NewReader(b))
+ decoder.UseNumber()
+ if err := decoder.Decode(&jsonHeaders); err != nil {
+ return err
+ }
+
+ var headers Headers
+ for _, h := range jsonHeaders {
+ value, err := valueFromType(h.Type, h.Value)
+ if err != nil {
+ return err
+ }
+ headers.Set(h.Name, value)
+ }
+ (*hs) = decodedHeaders(headers)
+
+ return nil
+}
+
+func valueFromType(typ valueType, val interface{}) (Value, error) {
+ switch typ {
+ case trueValueType:
+ return BoolValue(true), nil
+ case falseValueType:
+ return BoolValue(false), nil
+ case int8ValueType:
+ v, err := val.(json.Number).Int64()
+ return Int8Value(int8(v)), err
+ case int16ValueType:
+ v, err := val.(json.Number).Int64()
+ return Int16Value(int16(v)), err
+ case int32ValueType:
+ v, err := val.(json.Number).Int64()
+ return Int32Value(int32(v)), err
+ case int64ValueType:
+ v, err := val.(json.Number).Int64()
+ return Int64Value(v), err
+ case bytesValueType:
+ v, err := base64.StdEncoding.DecodeString(val.(string))
+ return BytesValue(v), err
+ case stringValueType:
+ v, err := base64.StdEncoding.DecodeString(val.(string))
+ return StringValue(string(v)), err
+ case timestampValueType:
+ v, err := val.(json.Number).Int64()
+ return TimestampValue(timeFromEpochMilli(v)), err
+ case uuidValueType:
+ v, err := base64.StdEncoding.DecodeString(val.(string))
+ var tv UUIDValue
+ copy(tv[:], v)
+ return tv, err
+ default:
+ panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val))
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go
new file mode 100755
index 0000000..4b972b2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go
@@ -0,0 +1,199 @@
+package eventstream
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "hash"
+ "hash/crc32"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+// Decoder provides decoding of an Event Stream messages.
+type Decoder struct {
+ r io.Reader
+ logger aws.Logger
+}
+
+// NewDecoder initializes and returns a Decoder for decoding event
+// stream messages from the reader provided.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ r: r,
+ }
+}
+
+// Decode attempts to decode a single message from the event stream reader.
+// Will return the event stream message, or error if Decode fails to read
+// the message from the stream.
+func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) {
+ reader := d.r
+ if d.logger != nil {
+ debugMsgBuf := bytes.NewBuffer(nil)
+ reader = io.TeeReader(reader, debugMsgBuf)
+ defer func() {
+ logMessageDecode(d.logger, debugMsgBuf, m, err)
+ }()
+ }
+
+ crc := crc32.New(crc32IEEETable)
+ hashReader := io.TeeReader(reader, crc)
+
+ prelude, err := decodePrelude(hashReader, crc)
+ if err != nil {
+ return Message{}, err
+ }
+
+ if prelude.HeadersLen > 0 {
+ lr := io.LimitReader(hashReader, int64(prelude.HeadersLen))
+ m.Headers, err = decodeHeaders(lr)
+ if err != nil {
+ return Message{}, err
+ }
+ }
+
+ if payloadLen := prelude.PayloadLen(); payloadLen > 0 {
+ buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen)))
+ if err != nil {
+ return Message{}, err
+ }
+ m.Payload = buf
+ }
+
+ msgCRC := crc.Sum32()
+ if err := validateCRC(reader, msgCRC); err != nil {
+ return Message{}, err
+ }
+
+ return m, nil
+}
+
+// UseLogger specifies the Logger that that the decoder should use to log the
+// message decode to.
+func (d *Decoder) UseLogger(logger aws.Logger) {
+ d.logger = logger
+}
+
+func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) {
+ w := bytes.NewBuffer(nil)
+ defer func() { logger.Log(w.String()) }()
+
+ fmt.Fprintf(w, "Raw message:\n%s\n",
+ hex.Dump(msgBuf.Bytes()))
+
+ if decodeErr != nil {
+ fmt.Fprintf(w, "Decode error: %v\n", decodeErr)
+ return
+ }
+
+ rawMsg, err := msg.rawMessage()
+ if err != nil {
+ fmt.Fprintf(w, "failed to create raw message, %v\n", err)
+ return
+ }
+
+ decodedMsg := decodedMessage{
+ rawMessage: rawMsg,
+ Headers: decodedHeaders(msg.Headers),
+ }
+
+ fmt.Fprintf(w, "Decoded message:\n")
+ encoder := json.NewEncoder(w)
+ if err := encoder.Encode(decodedMsg); err != nil {
+ fmt.Fprintf(w, "failed to generate decoded message, %v\n", err)
+ }
+}
+
+func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) {
+ var p messagePrelude
+
+ var err error
+ p.Length, err = decodeUint32(r)
+ if err != nil {
+ return messagePrelude{}, err
+ }
+
+ p.HeadersLen, err = decodeUint32(r)
+ if err != nil {
+ return messagePrelude{}, err
+ }
+
+ if err := p.ValidateLens(); err != nil {
+ return messagePrelude{}, err
+ }
+
+ preludeCRC := crc.Sum32()
+ if err := validateCRC(r, preludeCRC); err != nil {
+ return messagePrelude{}, err
+ }
+
+ p.PreludeCRC = preludeCRC
+
+ return p, nil
+}
+
+func decodePayload(buf []byte, r io.Reader) ([]byte, error) {
+ w := bytes.NewBuffer(buf[0:0])
+
+ _, err := io.Copy(w, r)
+ return w.Bytes(), err
+}
+
+func decodeUint8(r io.Reader) (uint8, error) {
+ type byteReader interface {
+ ReadByte() (byte, error)
+ }
+
+ if br, ok := r.(byteReader); ok {
+ v, err := br.ReadByte()
+ return uint8(v), err
+ }
+
+ var b [1]byte
+ _, err := io.ReadFull(r, b[:])
+ return uint8(b[0]), err
+}
+func decodeUint16(r io.Reader) (uint16, error) {
+ var b [2]byte
+ bs := b[:]
+ _, err := io.ReadFull(r, bs)
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint16(bs), nil
+}
+func decodeUint32(r io.Reader) (uint32, error) {
+ var b [4]byte
+ bs := b[:]
+ _, err := io.ReadFull(r, bs)
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint32(bs), nil
+}
+func decodeUint64(r io.Reader) (uint64, error) {
+ var b [8]byte
+ bs := b[:]
+ _, err := io.ReadFull(r, bs)
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint64(bs), nil
+}
+
+func validateCRC(r io.Reader, expect uint32) error {
+ msgCRC, err := decodeUint32(r)
+ if err != nil {
+ return err
+ }
+
+ if msgCRC != expect {
+ return ChecksumError{}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go
new file mode 100755
index 0000000..150a609
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go
@@ -0,0 +1,114 @@
+package eventstream
+
+import (
+ "bytes"
+ "encoding/binary"
+ "hash"
+ "hash/crc32"
+ "io"
+)
+
+// Encoder provides EventStream message encoding.
+type Encoder struct {
+ w io.Writer
+
+ headersBuf *bytes.Buffer
+}
+
+// NewEncoder initializes and returns an Encoder to encode Event Stream
+// messages to an io.Writer.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: w,
+ headersBuf: bytes.NewBuffer(nil),
+ }
+}
+
+// Encode encodes a single EventStream message to the io.Writer the Encoder
+// was created with. An error is returned if writing the message fails.
+func (e *Encoder) Encode(msg Message) error {
+ e.headersBuf.Reset()
+
+ err := encodeHeaders(e.headersBuf, msg.Headers)
+ if err != nil {
+ return err
+ }
+
+ crc := crc32.New(crc32IEEETable)
+ hashWriter := io.MultiWriter(e.w, crc)
+
+ headersLen := uint32(e.headersBuf.Len())
+ payloadLen := uint32(len(msg.Payload))
+
+ if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil {
+ return err
+ }
+
+ if headersLen > 0 {
+ if _, err := io.Copy(hashWriter, e.headersBuf); err != nil {
+ return err
+ }
+ }
+
+ if payloadLen > 0 {
+ if _, err := hashWriter.Write(msg.Payload); err != nil {
+ return err
+ }
+ }
+
+ msgCRC := crc.Sum32()
+ return binary.Write(e.w, binary.BigEndian, msgCRC)
+}
+
+func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error {
+ p := messagePrelude{
+ Length: minMsgLen + headersLen + payloadLen,
+ HeadersLen: headersLen,
+ }
+ if err := p.ValidateLens(); err != nil {
+ return err
+ }
+
+ err := binaryWriteFields(w, binary.BigEndian,
+ p.Length,
+ p.HeadersLen,
+ )
+ if err != nil {
+ return err
+ }
+
+ p.PreludeCRC = crc.Sum32()
+ err = binary.Write(w, binary.BigEndian, p.PreludeCRC)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func encodeHeaders(w io.Writer, headers Headers) error {
+ for _, h := range headers {
+ hn := headerName{
+ Len: uint8(len(h.Name)),
+ }
+ copy(hn.Name[:hn.Len], h.Name)
+ if err := hn.encode(w); err != nil {
+ return err
+ }
+
+ if err := h.Value.encode(w); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error {
+ for _, v := range vs {
+ if err := binary.Write(w, order, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go
new file mode 100755
index 0000000..5481ef3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go
@@ -0,0 +1,23 @@
+package eventstream
+
+import "fmt"
+
+// LengthError provides the error for items being larger than a maximum length.
+type LengthError struct {
+ Part string
+ Want int
+ Have int
+ Value interface{}
+}
+
+func (e LengthError) Error() string {
+ return fmt.Sprintf("%s length invalid, %d/%d, %v",
+ e.Part, e.Want, e.Have, e.Value)
+}
+
+// ChecksumError provides the error for message checksum invalidation errors.
+type ChecksumError struct{}
+
+func (e ChecksumError) Error() string {
+ return "message checksum mismatch"
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
new file mode 100755
index 0000000..97937c8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
@@ -0,0 +1,196 @@
+package eventstreamapi
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/eventstream"
+)
+
+// Unmarshaler provides the interface for unmarshaling a EventStream
+// message into a SDK type.
+type Unmarshaler interface {
+ UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error
+}
+
+// EventStream headers with specific meaning to async API functionality.
+const (
+ MessageTypeHeader = `:message-type` // Identifies type of message.
+ EventMessageType = `event`
+ ErrorMessageType = `error`
+ ExceptionMessageType = `exception`
+
+ // Message Events
+ EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats".
+
+ // Message Error
+ ErrorCodeHeader = `:error-code`
+ ErrorMessageHeader = `:error-message`
+
+ // Message Exception
+ ExceptionTypeHeader = `:exception-type`
+)
+
+// EventReader provides reading from the EventStream of an reader.
+type EventReader struct {
+ reader io.ReadCloser
+ decoder *eventstream.Decoder
+
+ unmarshalerForEventType func(string) (Unmarshaler, error)
+ payloadUnmarshaler protocol.PayloadUnmarshaler
+
+ payloadBuf []byte
+}
+
+// NewEventReader returns a EventReader built from the reader and unmarshaler
+// provided. Use ReadStream method to start reading from the EventStream.
+func NewEventReader(
+ reader io.ReadCloser,
+ payloadUnmarshaler protocol.PayloadUnmarshaler,
+ unmarshalerForEventType func(string) (Unmarshaler, error),
+) *EventReader {
+ return &EventReader{
+ reader: reader,
+ decoder: eventstream.NewDecoder(reader),
+ payloadUnmarshaler: payloadUnmarshaler,
+ unmarshalerForEventType: unmarshalerForEventType,
+ payloadBuf: make([]byte, 10*1024),
+ }
+}
+
+// UseLogger instructs the EventReader to use the logger and log level
+// specified.
+func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) {
+ if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) {
+ r.decoder.UseLogger(logger)
+ }
+}
+
+// ReadEvent attempts to read a message from the EventStream and return the
+// unmarshaled event value that the message is for.
+//
+// For EventStream API errors check if the returned error satisfies the
+// awserr.Error interface to get the error's Code and Message components.
+//
+// EventUnmarshalers called with EventStream messages must take copies of the
+// message's Payload. The payload will is reused between events read.
+func (r *EventReader) ReadEvent() (event interface{}, err error) {
+ msg, err := r.decoder.Decode(r.payloadBuf)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ // Reclaim payload buffer for next message read.
+ r.payloadBuf = msg.Payload[0:0]
+ }()
+
+ typ, err := GetHeaderString(msg, MessageTypeHeader)
+ if err != nil {
+ return nil, err
+ }
+
+ switch typ {
+ case EventMessageType:
+ return r.unmarshalEventMessage(msg)
+ case ExceptionMessageType:
+ err = r.unmarshalEventException(msg)
+ return nil, err
+ case ErrorMessageType:
+ return nil, r.unmarshalErrorMessage(msg)
+ default:
+ return nil, fmt.Errorf("unknown eventstream message type, %v", typ)
+ }
+}
+
+func (r *EventReader) unmarshalEventMessage(
+ msg eventstream.Message,
+) (event interface{}, err error) {
+ eventType, err := GetHeaderString(msg, EventTypeHeader)
+ if err != nil {
+ return nil, err
+ }
+
+ ev, err := r.unmarshalerForEventType(eventType)
+ if err != nil {
+ return nil, err
+ }
+
+ err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg)
+ if err != nil {
+ return nil, err
+ }
+
+ return ev, nil
+}
+
+func (r *EventReader) unmarshalEventException(
+ msg eventstream.Message,
+) (err error) {
+ eventType, err := GetHeaderString(msg, ExceptionTypeHeader)
+ if err != nil {
+ return err
+ }
+
+ ev, err := r.unmarshalerForEventType(eventType)
+ if err != nil {
+ return err
+ }
+
+ err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg)
+ if err != nil {
+ return err
+ }
+
+ var ok bool
+ err, ok = ev.(error)
+ if !ok {
+ err = messageError{
+ code: "SerializationError",
+ msg: fmt.Sprintf(
+ "event stream exception %s mapped to non-error %T, %v",
+ eventType, ev, ev,
+ ),
+ }
+ }
+
+ return err
+}
+
+func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) {
+ var msgErr messageError
+
+ msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader)
+ if err != nil {
+ return err
+ }
+
+ msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader)
+ if err != nil {
+ return err
+ }
+
+ return msgErr
+}
+
+// Close closes the EventReader's EventStream reader.
+func (r *EventReader) Close() error {
+ return r.reader.Close()
+}
+
+// GetHeaderString returns the value of the header as a string. If the header
+// is not set or the value is not a string an error will be returned.
+func GetHeaderString(msg eventstream.Message, headerName string) (string, error) {
+ headerVal := msg.Headers.Get(headerName)
+ if headerVal == nil {
+ return "", fmt.Errorf("error header %s not present", headerName)
+ }
+
+ v, ok := headerVal.Get().(string)
+ if !ok {
+ return "", fmt.Errorf("error header value is not a string, %T", headerVal)
+ }
+
+ return v, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go
new file mode 100755
index 0000000..5ea5a98
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go
@@ -0,0 +1,24 @@
+package eventstreamapi
+
+import "fmt"
+
+type messageError struct {
+ code string
+ msg string
+}
+
+func (e messageError) Code() string {
+ return e.code
+}
+
+func (e messageError) Message() string {
+ return e.msg
+}
+
+func (e messageError) Error() string {
+ return fmt.Sprintf("%s: %s", e.code, e.msg)
+}
+
+func (e messageError) OrigErr() error {
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go
new file mode 100755
index 0000000..3b44dde
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go
@@ -0,0 +1,166 @@
+package eventstream
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+// Headers are a collection of EventStream header values.
+type Headers []Header
+
+// Header is a single EventStream Key Value header pair.
+type Header struct {
+ Name string
+ Value Value
+}
+
+// Set associates the name with a value. If the header name already exists in
+// the Headers the value will be replaced with the new one.
+func (hs *Headers) Set(name string, value Value) {
+ var i int
+ for ; i < len(*hs); i++ {
+ if (*hs)[i].Name == name {
+ (*hs)[i].Value = value
+ return
+ }
+ }
+
+ *hs = append(*hs, Header{
+ Name: name, Value: value,
+ })
+}
+
+// Get returns the Value associated with the header. Nil is returned if the
+// value does not exist.
+func (hs Headers) Get(name string) Value {
+ for i := 0; i < len(hs); i++ {
+ if h := hs[i]; h.Name == name {
+ return h.Value
+ }
+ }
+ return nil
+}
+
+// Del deletes the value in the Headers if it exists.
+func (hs *Headers) Del(name string) {
+ for i := 0; i < len(*hs); i++ {
+ if (*hs)[i].Name == name {
+ copy((*hs)[i:], (*hs)[i+1:])
+ (*hs) = (*hs)[:len(*hs)-1]
+ }
+ }
+}
+
+func decodeHeaders(r io.Reader) (Headers, error) {
+ hs := Headers{}
+
+ for {
+ name, err := decodeHeaderName(r)
+ if err != nil {
+ if err == io.EOF {
+ // EOF while getting header name means no more headers
+ break
+ }
+ return nil, err
+ }
+
+ value, err := decodeHeaderValue(r)
+ if err != nil {
+ return nil, err
+ }
+
+ hs.Set(name, value)
+ }
+
+ return hs, nil
+}
+
+func decodeHeaderName(r io.Reader) (string, error) {
+ var n headerName
+
+ var err error
+ n.Len, err = decodeUint8(r)
+ if err != nil {
+ return "", err
+ }
+
+ name := n.Name[:n.Len]
+ if _, err := io.ReadFull(r, name); err != nil {
+ return "", err
+ }
+
+ return string(name), nil
+}
+
+func decodeHeaderValue(r io.Reader) (Value, error) {
+ var raw rawValue
+
+ typ, err := decodeUint8(r)
+ if err != nil {
+ return nil, err
+ }
+ raw.Type = valueType(typ)
+
+ var v Value
+
+ switch raw.Type {
+ case trueValueType:
+ v = BoolValue(true)
+ case falseValueType:
+ v = BoolValue(false)
+ case int8ValueType:
+ var tv Int8Value
+ err = tv.decode(r)
+ v = tv
+ case int16ValueType:
+ var tv Int16Value
+ err = tv.decode(r)
+ v = tv
+ case int32ValueType:
+ var tv Int32Value
+ err = tv.decode(r)
+ v = tv
+ case int64ValueType:
+ var tv Int64Value
+ err = tv.decode(r)
+ v = tv
+ case bytesValueType:
+ var tv BytesValue
+ err = tv.decode(r)
+ v = tv
+ case stringValueType:
+ var tv StringValue
+ err = tv.decode(r)
+ v = tv
+ case timestampValueType:
+ var tv TimestampValue
+ err = tv.decode(r)
+ v = tv
+ case uuidValueType:
+ var tv UUIDValue
+ err = tv.decode(r)
+ v = tv
+ default:
+ panic(fmt.Sprintf("unknown value type %d", raw.Type))
+ }
+
+ // Error could be EOF, let caller deal with it
+ return v, err
+}
+
+const maxHeaderNameLen = 255
+
+type headerName struct {
+ Len uint8
+ Name [maxHeaderNameLen]byte
+}
+
+func (v headerName) encode(w io.Writer) error {
+ if err := binary.Write(w, binary.BigEndian, v.Len); err != nil {
+ return err
+ }
+
+ _, err := w.Write(v.Name[:v.Len])
+ return err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
new file mode 100755
index 0000000..e3fc076
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
@@ -0,0 +1,501 @@
+package eventstream
+
+import (
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+)
+
+const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1
+
+// valueType is the EventStream header value type.
+type valueType uint8
+
+// Header value types
+const (
+ trueValueType valueType = iota
+ falseValueType
+ int8ValueType // Byte
+ int16ValueType // Short
+ int32ValueType // Integer
+ int64ValueType // Long
+ bytesValueType
+ stringValueType
+ timestampValueType
+ uuidValueType
+)
+
+func (t valueType) String() string {
+ switch t {
+ case trueValueType:
+ return "bool"
+ case falseValueType:
+ return "bool"
+ case int8ValueType:
+ return "int8"
+ case int16ValueType:
+ return "int16"
+ case int32ValueType:
+ return "int32"
+ case int64ValueType:
+ return "int64"
+ case bytesValueType:
+ return "byte_array"
+ case stringValueType:
+ return "string"
+ case timestampValueType:
+ return "timestamp"
+ case uuidValueType:
+ return "uuid"
+ default:
+ return fmt.Sprintf("unknown value type %d", uint8(t))
+ }
+}
+
+type rawValue struct {
+ Type valueType
+ Len uint16 // Only set for variable length slices
+ Value []byte // byte representation of value, BigEndian encoding.
+}
+
+func (r rawValue) encodeScalar(w io.Writer, v interface{}) error {
+ return binaryWriteFields(w, binary.BigEndian,
+ r.Type,
+ v,
+ )
+}
+
+func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error {
+ binary.Write(w, binary.BigEndian, r.Type)
+
+ _, err := w.Write(v)
+ return err
+}
+
+func (r rawValue) encodeBytes(w io.Writer, v []byte) error {
+ if len(v) > maxHeaderValueLen {
+ return LengthError{
+ Part: "header value",
+ Want: maxHeaderValueLen, Have: len(v),
+ Value: v,
+ }
+ }
+ r.Len = uint16(len(v))
+
+ err := binaryWriteFields(w, binary.BigEndian,
+ r.Type,
+ r.Len,
+ )
+ if err != nil {
+ return err
+ }
+
+ _, err = w.Write(v)
+ return err
+}
+
+func (r rawValue) encodeString(w io.Writer, v string) error {
+ if len(v) > maxHeaderValueLen {
+ return LengthError{
+ Part: "header value",
+ Want: maxHeaderValueLen, Have: len(v),
+ Value: v,
+ }
+ }
+ r.Len = uint16(len(v))
+
+ type stringWriter interface {
+ WriteString(string) (int, error)
+ }
+
+ err := binaryWriteFields(w, binary.BigEndian,
+ r.Type,
+ r.Len,
+ )
+ if err != nil {
+ return err
+ }
+
+ if sw, ok := w.(stringWriter); ok {
+ _, err = sw.WriteString(v)
+ } else {
+ _, err = w.Write([]byte(v))
+ }
+
+ return err
+}
+
+func decodeFixedBytesValue(r io.Reader, buf []byte) error {
+ _, err := io.ReadFull(r, buf)
+ return err
+}
+
+func decodeBytesValue(r io.Reader) ([]byte, error) {
+ var raw rawValue
+ var err error
+ raw.Len, err = decodeUint16(r)
+ if err != nil {
+ return nil, err
+ }
+
+ buf := make([]byte, raw.Len)
+ _, err = io.ReadFull(r, buf)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf, nil
+}
+
+func decodeStringValue(r io.Reader) (string, error) {
+ v, err := decodeBytesValue(r)
+ return string(v), err
+}
+
+// Value represents the abstract header value.
+type Value interface {
+ Get() interface{}
+ String() string
+ valueType() valueType
+ encode(io.Writer) error
+}
+
+// An BoolValue provides eventstream encoding, and representation
+// of a Go bool value.
+type BoolValue bool
+
+// Get returns the underlying type
+func (v BoolValue) Get() interface{} {
+ return bool(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (v BoolValue) valueType() valueType {
+ if v {
+ return trueValueType
+ }
+ return falseValueType
+}
+
+func (v BoolValue) String() string {
+ return strconv.FormatBool(bool(v))
+}
+
+// encode encodes the BoolValue into an eventstream binary value
+// representation.
+func (v BoolValue) encode(w io.Writer) error {
+ return binary.Write(w, binary.BigEndian, v.valueType())
+}
+
+// An Int8Value provides eventstream encoding, and representation of a Go
+// int8 value.
+type Int8Value int8
+
+// Get returns the underlying value.
+func (v Int8Value) Get() interface{} {
+ return int8(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (Int8Value) valueType() valueType {
+ return int8ValueType
+}
+
+func (v Int8Value) String() string {
+ return fmt.Sprintf("0x%02x", int8(v))
+}
+
+// encode encodes the Int8Value into an eventstream binary value
+// representation.
+func (v Int8Value) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ return raw.encodeScalar(w, v)
+}
+
+func (v *Int8Value) decode(r io.Reader) error {
+ n, err := decodeUint8(r)
+ if err != nil {
+ return err
+ }
+
+ *v = Int8Value(n)
+ return nil
+}
+
+// An Int16Value provides eventstream encoding, and representation of a Go
+// int16 value.
+type Int16Value int16
+
+// Get returns the underlying value.
+func (v Int16Value) Get() interface{} {
+ return int16(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (Int16Value) valueType() valueType {
+ return int16ValueType
+}
+
+func (v Int16Value) String() string {
+ return fmt.Sprintf("0x%04x", int16(v))
+}
+
+// encode encodes the Int16Value into an eventstream binary value
+// representation.
+func (v Int16Value) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+ return raw.encodeScalar(w, v)
+}
+
+func (v *Int16Value) decode(r io.Reader) error {
+ n, err := decodeUint16(r)
+ if err != nil {
+ return err
+ }
+
+ *v = Int16Value(n)
+ return nil
+}
+
+// An Int32Value provides eventstream encoding, and representation of a Go
+// int32 value.
+type Int32Value int32
+
+// Get returns the underlying value.
+func (v Int32Value) Get() interface{} {
+ return int32(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (Int32Value) valueType() valueType {
+ return int32ValueType
+}
+
+func (v Int32Value) String() string {
+ return fmt.Sprintf("0x%08x", int32(v))
+}
+
+// encode encodes the Int32Value into an eventstream binary value
+// representation.
+func (v Int32Value) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+ return raw.encodeScalar(w, v)
+}
+
+func (v *Int32Value) decode(r io.Reader) error {
+ n, err := decodeUint32(r)
+ if err != nil {
+ return err
+ }
+
+ *v = Int32Value(n)
+ return nil
+}
+
+// An Int64Value provides eventstream encoding, and representation of a Go
+// int64 value.
+type Int64Value int64
+
+// Get returns the underlying value.
+func (v Int64Value) Get() interface{} {
+ return int64(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (Int64Value) valueType() valueType {
+ return int64ValueType
+}
+
+func (v Int64Value) String() string {
+ return fmt.Sprintf("0x%016x", int64(v))
+}
+
+// encode encodes the Int64Value into an eventstream binary value
+// representation.
+func (v Int64Value) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+ return raw.encodeScalar(w, v)
+}
+
+func (v *Int64Value) decode(r io.Reader) error {
+ n, err := decodeUint64(r)
+ if err != nil {
+ return err
+ }
+
+ *v = Int64Value(n)
+ return nil
+}
+
+// An BytesValue provides eventstream encoding, and representation of a Go
+// byte slice.
+type BytesValue []byte
+
+// Get returns the underlying value.
+func (v BytesValue) Get() interface{} {
+ return []byte(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (BytesValue) valueType() valueType {
+ return bytesValueType
+}
+
+func (v BytesValue) String() string {
+ return base64.StdEncoding.EncodeToString([]byte(v))
+}
+
+// encode encodes the BytesValue into an eventstream binary value
+// representation.
+func (v BytesValue) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ return raw.encodeBytes(w, []byte(v))
+}
+
+func (v *BytesValue) decode(r io.Reader) error {
+ buf, err := decodeBytesValue(r)
+ if err != nil {
+ return err
+ }
+
+ *v = BytesValue(buf)
+ return nil
+}
+
+// An StringValue provides eventstream encoding, and representation of a Go
+// string.
+type StringValue string
+
+// Get returns the underlying value.
+func (v StringValue) Get() interface{} {
+ return string(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (StringValue) valueType() valueType {
+ return stringValueType
+}
+
+func (v StringValue) String() string {
+ return string(v)
+}
+
+// encode encodes the StringValue into an eventstream binary value
+// representation.
+func (v StringValue) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ return raw.encodeString(w, string(v))
+}
+
+func (v *StringValue) decode(r io.Reader) error {
+ s, err := decodeStringValue(r)
+ if err != nil {
+ return err
+ }
+
+ *v = StringValue(s)
+ return nil
+}
+
+// An TimestampValue provides eventstream encoding, and representation of a Go
+// timestamp.
+type TimestampValue time.Time
+
+// Get returns the underlying value.
+func (v TimestampValue) Get() interface{} {
+ return time.Time(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (TimestampValue) valueType() valueType {
+ return timestampValueType
+}
+
+func (v TimestampValue) epochMilli() int64 {
+ nano := time.Time(v).UnixNano()
+ msec := nano / int64(time.Millisecond)
+ return msec
+}
+
+func (v TimestampValue) String() string {
+ msec := v.epochMilli()
+ return strconv.FormatInt(msec, 10)
+}
+
+// encode encodes the TimestampValue into an eventstream binary value
+// representation.
+func (v TimestampValue) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ msec := v.epochMilli()
+ return raw.encodeScalar(w, msec)
+}
+
+func (v *TimestampValue) decode(r io.Reader) error {
+ n, err := decodeUint64(r)
+ if err != nil {
+ return err
+ }
+
+ *v = TimestampValue(timeFromEpochMilli(int64(n)))
+ return nil
+}
+
+func timeFromEpochMilli(t int64) time.Time {
+ secs := t / 1e3
+ msec := t % 1e3
+ return time.Unix(secs, msec*int64(time.Millisecond)).UTC()
+}
+
+// An UUIDValue provides eventstream encoding, and representation of a UUID
+// value.
+type UUIDValue [16]byte
+
+// Get returns the underlying value.
+func (v UUIDValue) Get() interface{} {
+ return v[:]
+}
+
+// valueType returns the EventStream header value type value.
+func (UUIDValue) valueType() valueType {
+ return uuidValueType
+}
+
+func (v UUIDValue) String() string {
+ return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:])
+}
+
+// encode encodes the UUIDValue into an eventstream binary value
+// representation.
+func (v UUIDValue) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ return raw.encodeFixedSlice(w, v[:])
+}
+
+func (v *UUIDValue) decode(r io.Reader) error {
+ tv := (*v)[:]
+ return decodeFixedBytesValue(r, tv)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go
new file mode 100755
index 0000000..2dc012a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go
@@ -0,0 +1,103 @@
+package eventstream
+
+import (
+ "bytes"
+ "encoding/binary"
+ "hash/crc32"
+)
+
+const preludeLen = 8
+const preludeCRCLen = 4
+const msgCRCLen = 4
+const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen
+const maxPayloadLen = 1024 * 1024 * 16 // 16MB
+const maxHeadersLen = 1024 * 128 // 128KB
+const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen
+
+var crc32IEEETable = crc32.MakeTable(crc32.IEEE)
+
+// A Message provides the eventstream message representation.
+type Message struct {
+ Headers Headers
+ Payload []byte
+}
+
+func (m *Message) rawMessage() (rawMessage, error) {
+ var raw rawMessage
+
+ if len(m.Headers) > 0 {
+ var headers bytes.Buffer
+ if err := encodeHeaders(&headers, m.Headers); err != nil {
+ return rawMessage{}, err
+ }
+ raw.Headers = headers.Bytes()
+ raw.HeadersLen = uint32(len(raw.Headers))
+ }
+
+ raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen
+
+ hash := crc32.New(crc32IEEETable)
+ binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen)
+ raw.PreludeCRC = hash.Sum32()
+
+ binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC)
+
+ if raw.HeadersLen > 0 {
+ hash.Write(raw.Headers)
+ }
+
+ // Read payload bytes and update hash for it as well.
+ if len(m.Payload) > 0 {
+ raw.Payload = m.Payload
+ hash.Write(raw.Payload)
+ }
+
+ raw.CRC = hash.Sum32()
+
+ return raw, nil
+}
+
+type messagePrelude struct {
+ Length uint32
+ HeadersLen uint32
+ PreludeCRC uint32
+}
+
+func (p messagePrelude) PayloadLen() uint32 {
+ return p.Length - p.HeadersLen - minMsgLen
+}
+
+func (p messagePrelude) ValidateLens() error {
+ if p.Length == 0 || p.Length > maxMsgLen {
+ return LengthError{
+ Part: "message prelude",
+ Want: maxMsgLen,
+ Have: int(p.Length),
+ }
+ }
+ if p.HeadersLen > maxHeadersLen {
+ return LengthError{
+ Part: "message headers",
+ Want: maxHeadersLen,
+ Have: int(p.HeadersLen),
+ }
+ }
+ if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen {
+ return LengthError{
+ Part: "message payload",
+ Want: maxPayloadLen,
+ Have: int(payloadLen),
+ }
+ }
+
+ return nil
+}
+
+type rawMessage struct {
+ messagePrelude
+
+ Headers []byte
+ Payload []byte
+
+ CRC uint32
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
new file mode 100755
index 0000000..d7d42db
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
@@ -0,0 +1,68 @@
+package protocol
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ValidateEndpointHostHandler is a request handler that will validate the
+// request endpoint's hosts is a valid RFC 3986 host.
+var ValidateEndpointHostHandler = request.NamedHandler{
+ Name: "awssdk.protocol.ValidateEndpointHostHandler",
+ Fn: func(r *request.Request) {
+ err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host)
+ if err != nil {
+ r.Error = err
+ }
+ },
+}
+
+// ValidateEndpointHost validates that the host string passed in is a valid RFC
+// 3986 host. Returns error if the host is not valid.
+func ValidateEndpointHost(opName, host string) error {
+ paramErrs := request.ErrInvalidParams{Context: opName}
+ labels := strings.Split(host, ".")
+
+ for i, label := range labels {
+ if i == len(labels)-1 && len(label) == 0 {
+ // Allow trailing dot for FQDN hosts.
+ continue
+ }
+
+ if !ValidHostLabel(label) {
+ paramErrs.Add(request.NewErrParamFormat(
+ "endpoint host label", "[a-zA-Z0-9-]{1,63}", label))
+ }
+ }
+
+ if len(host) > 255 {
+ paramErrs.Add(request.NewErrParamMaxLen(
+ "endpoint host", 255, host,
+ ))
+ }
+
+ if paramErrs.Len() > 0 {
+ return paramErrs
+ }
+ return nil
+}
+
+// ValidHostLabel returns if the label is a valid RFC 3986 host label.
+func ValidHostLabel(label string) bool {
+ if l := len(label); l == 0 || l > 63 {
+ return false
+ }
+ for _, r := range label {
+ switch {
+ case r >= '0' && r <= '9':
+ case r >= 'A' && r <= 'Z':
+ case r >= 'a' && r <= 'z':
+ case r == '-':
+ default:
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go
new file mode 100755
index 0000000..915b0fc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go
@@ -0,0 +1,54 @@
+package protocol
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// HostPrefixHandlerName is the handler name for the host prefix request
+// handler.
+const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler"
+
+// NewHostPrefixHandler constructs a build handler
+func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler {
+ builder := HostPrefixBuilder{
+ Prefix: prefix,
+ LabelsFn: labelsFn,
+ }
+
+ return request.NamedHandler{
+ Name: HostPrefixHandlerName,
+ Fn: builder.Build,
+ }
+}
+
+// HostPrefixBuilder provides the request handler to expand and prepend
+// the host prefix into the operation's request endpoint host.
+type HostPrefixBuilder struct {
+ Prefix string
+ LabelsFn func() map[string]string
+}
+
+// Build updates the passed in Request with the HostPrefix template expanded.
+func (h HostPrefixBuilder) Build(r *request.Request) {
+ if aws.BoolValue(r.Config.DisableEndpointHostPrefix) {
+ return
+ }
+
+ var labels map[string]string
+ if h.LabelsFn != nil {
+ labels = h.LabelsFn()
+ }
+
+ prefix := h.Prefix
+ for name, value := range labels {
+ prefix = strings.Replace(prefix, "{"+name+"}", value, -1)
+ }
+
+ r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host
+ if len(r.HTTPRequest.Host) > 0 {
+ r.HTTPRequest.Host = prefix + r.HTTPRequest.Host
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
new file mode 100755
index 0000000..53831df
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
@@ -0,0 +1,75 @@
+package protocol
+
+import (
+ "crypto/rand"
+ "fmt"
+ "reflect"
+)
+
+// RandReader is the random reader the protocol package will use to read
+// random bytes from. This is exported for testing, and should not be used.
+var RandReader = rand.Reader
+
+const idempotencyTokenFillTag = `idempotencyToken`
+
+// CanSetIdempotencyToken returns true if the struct field should be
+// automatically populated with a Idempotency token.
+//
+// Only *string and string type fields that are tagged with idempotencyToken
+// which are not already set can be auto filled.
+func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
+ switch u := v.Interface().(type) {
+ // To auto fill an Idempotency token the field must be a string,
+ // tagged for auto fill, and have a zero value.
+ case *string:
+ return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ case string:
+ return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ }
+
+ return false
+}
+
+// GetIdempotencyToken returns a randomly generated idempotency token.
+func GetIdempotencyToken() string {
+ b := make([]byte, 16)
+ RandReader.Read(b)
+
+ return UUIDVersion4(b)
+}
+
+// SetIdempotencyToken will set the value provided with a Idempotency Token.
+// Given that the value can be set. Will panic if value is not setable.
+func SetIdempotencyToken(v reflect.Value) {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() && v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ v = reflect.Indirect(v)
+
+ if !v.CanSet() {
+ panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
+ }
+
+ b := make([]byte, 16)
+ _, err := rand.Read(b)
+ if err != nil {
+ // TODO handle error
+ return
+ }
+
+ v.Set(reflect.ValueOf(UUIDVersion4(b)))
+}
+
+// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
+func UUIDVersion4(u []byte) string {
+ // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
+ // 13th character is "4"
+ u[6] = (u[6] | 0x40) & 0x4F
+ // 17th character is "8", "9", "a", or "b"
+ u[8] = (u[8] | 0x80) & 0xBF
+
+ return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
new file mode 100755
index 0000000..776d110
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
@@ -0,0 +1,76 @@
+package protocol
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+// EscapeMode is the mode that should be use for escaping a value
+type EscapeMode uint
+
+// The modes for escaping a value before it is marshaled, and unmarshaled.
+const (
+ NoEscape EscapeMode = iota
+ Base64Escape
+ QuotedEscape
+)
+
+// EncodeJSONValue marshals the value into a JSON string, and optionally base64
+// encodes the string before returning it.
+//
+// Will panic if the escape mode is unknown.
+func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return "", err
+ }
+
+ switch escape {
+ case NoEscape:
+ return string(b), nil
+ case Base64Escape:
+ return base64.StdEncoding.EncodeToString(b), nil
+ case QuotedEscape:
+ return strconv.Quote(string(b)), nil
+ }
+
+ panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape))
+}
+
+// DecodeJSONValue will attempt to decode the string input as a JSONValue.
+// Optionally decoding base64 the value first before JSON unmarshaling.
+//
+// Will panic if the escape mode is unknown.
+func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) {
+ var b []byte
+ var err error
+
+ switch escape {
+ case NoEscape:
+ b = []byte(v)
+ case Base64Escape:
+ b, err = base64.StdEncoding.DecodeString(v)
+ case QuotedEscape:
+ var u string
+ u, err = strconv.Unquote(v)
+ b = []byte(u)
+ default:
+ panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ m := aws.JSONValue{}
+ err = json.Unmarshal(b, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
new file mode 100755
index 0000000..e21614a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
@@ -0,0 +1,81 @@
+package protocol
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// PayloadUnmarshaler provides the interface for unmarshaling a payload's
+// reader into a SDK shape.
+type PayloadUnmarshaler interface {
+ UnmarshalPayload(io.Reader, interface{}) error
+}
+
+// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a
+// HandlerList. This provides the support for unmarshaling a payload reader to
+// a shape without needing a SDK request first.
+type HandlerPayloadUnmarshal struct {
+ Unmarshalers request.HandlerList
+}
+
+// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using
+// the Unmarshalers HandlerList provided. Returns an error if unable
+// unmarshaling fails.
+func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error {
+ req := &request.Request{
+ HTTPRequest: &http.Request{},
+ HTTPResponse: &http.Response{
+ StatusCode: 200,
+ Header: http.Header{},
+ Body: ioutil.NopCloser(r),
+ },
+ Data: v,
+ }
+
+ h.Unmarshalers.Run(req)
+
+ return req.Error
+}
+
+// PayloadMarshaler provides the interface for marshaling a SDK shape into and
+// io.Writer.
+type PayloadMarshaler interface {
+ MarshalPayload(io.Writer, interface{}) error
+}
+
+// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList.
+// This provides support for marshaling a SDK shape into an io.Writer without
+// needing a SDK request first.
+type HandlerPayloadMarshal struct {
+ Marshalers request.HandlerList
+}
+
+// MarshalPayload marshals the SDK shape into the io.Writer using the
+// Marshalers HandlerList provided. Returns an error if unable if marshal
+// fails.
+func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error {
+ req := request.New(
+ aws.Config{},
+ metadata.ClientInfo{},
+ request.Handlers{},
+ nil,
+ &request.Operation{HTTPMethod: "GET"},
+ v,
+ nil,
+ )
+
+ h.Marshalers.Run(req)
+
+ if req.Error != nil {
+ return req.Error
+ }
+
+ io.Copy(w, req.GetBody())
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
new file mode 100755
index 0000000..60e5b09
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
@@ -0,0 +1,36 @@
+// Package query provides serialization of AWS query requests, and responses.
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
+
+import (
+ "net/url"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
+)
+
+// BuildHandler is a named request handler for building query protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build}
+
+// Build builds a request for an AWS Query service.
+func Build(r *request.Request) {
+ body := url.Values{
+ "Action": {r.Operation.Name},
+ "Version": {r.ClientInfo.APIVersion},
+ }
+ if err := queryutil.Parse(body, r.Params, false); err != nil {
+ r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
+ return
+ }
+
+ if !r.IsPresigned() {
+ r.HTTPRequest.Method = "POST"
+ r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
+ r.SetBufferBody([]byte(body.Encode()))
+ } else { // This is a pre-signed request
+ r.HTTPRequest.Method = "GET"
+ r.HTTPRequest.URL.RawQuery = body.Encode()
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
new file mode 100755
index 0000000..75866d0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -0,0 +1,246 @@
+package queryutil
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/url"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// Parse parses an object i and fills a url.Values object. The isEC2 flag
+// indicates if this is the EC2 Query sub-protocol.
+func Parse(body url.Values, i interface{}, isEC2 bool) error {
+ q := queryParser{isEC2: isEC2}
+ return q.parseValue(body, reflect.ValueOf(i), "", "")
+}
+
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+type queryParser struct {
+ isEC2 bool
+}
+
+func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ value = elemOf(value)
+
+ // no need to handle zero values
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ return q.parseStruct(v, value, prefix)
+ case "list":
+ return q.parseList(v, value, prefix, tag)
+ case "map":
+ return q.parseMap(v, value, prefix, tag)
+ default:
+ return q.parseScalar(v, value, prefix, tag)
+ }
+}
+
+func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ elemValue := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ elemValue = reflect.ValueOf(token)
+ }
+
+ var name string
+ if q.isEC2 {
+ name = field.Tag.Get("queryName")
+ }
+ if name == "" {
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+ if name != "" && q.isEC2 {
+ name = strings.ToUpper(name[0:1]) + name[1:]
+ }
+ }
+ if name == "" {
+ name = field.Name
+ }
+
+ if prefix != "" {
+ name = prefix + "." + name
+ }
+
+ if err := q.parseValue(v, elemValue, name, field.Tag); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ if _, ok := value.Interface().([]byte); ok {
+ return q.parseScalar(v, value, prefix, tag)
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ if listName := tag.Get("locationNameList"); listName == "" {
+ prefix += ".member"
+ } else {
+ prefix += "." + listName
+ }
+ }
+
+ for i := 0; i < value.Len(); i++ {
+ slicePrefix := prefix
+ if slicePrefix == "" {
+ slicePrefix = strconv.Itoa(i + 1)
+ } else {
+ slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
+ }
+ if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ prefix += ".entry"
+ }
+
+ // sort keys for improved serialization consistency.
+ // this is not strictly necessary for protocol support.
+ mapKeyValues := value.MapKeys()
+ mapKeys := map[string]reflect.Value{}
+ mapKeyNames := make([]string, len(mapKeyValues))
+ for i, mapKey := range mapKeyValues {
+ name := mapKey.String()
+ mapKeys[name] = mapKey
+ mapKeyNames[i] = name
+ }
+ sort.Strings(mapKeyNames)
+
+ for i, mapKeyName := range mapKeyNames {
+ mapKey := mapKeys[mapKeyName]
+ mapValue := value.MapIndex(mapKey)
+
+ kname := tag.Get("locationNameKey")
+ if kname == "" {
+ kname = "key"
+ }
+ vname := tag.Get("locationNameValue")
+ if vname == "" {
+ vname = "value"
+ }
+
+ // serialize key
+ var keyName string
+ if prefix == "" {
+ keyName = strconv.Itoa(i+1) + "." + kname
+ } else {
+ keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
+ }
+
+ if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
+ return err
+ }
+
+ // serialize value
+ var valueName string
+ if prefix == "" {
+ valueName = strconv.Itoa(i+1) + "." + vname
+ } else {
+ valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
+ }
+
+ if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := r.Interface().(type) {
+ case string:
+ v.Set(name, value)
+ case []byte:
+ if !r.IsNil() {
+ v.Set(name, base64.StdEncoding.EncodeToString(value))
+ }
+ case bool:
+ v.Set(name, strconv.FormatBool(value))
+ case int64:
+ v.Set(name, strconv.FormatInt(value, 10))
+ case int:
+ v.Set(name, strconv.Itoa(value))
+ case float64:
+ v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
+ case float32:
+ v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ v.Set(name, protocol.FormatTime(format, value))
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
new file mode 100755
index 0000000..3495c73
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
@@ -0,0 +1,39 @@
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
+
+import (
+ "encoding/xml"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling query protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals a response for an AWS Query service.
+func Unmarshal(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ if r.DataFilled() {
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("SerializationError", "failed decoding Query response", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+ }
+}
+
+// UnmarshalMeta unmarshals header response values for an AWS Query service.
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
new file mode 100755
index 0000000..46d354e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
@@ -0,0 +1,74 @@
+package query
+
+import (
+ "encoding/xml"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+type xmlErrorResponse struct {
+ XMLName xml.Name `xml:"ErrorResponse"`
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+ RequestID string `xml:"RequestId"`
+}
+
+type xmlServiceUnavailableResponse struct {
+ XMLName xml.Name `xml:"ServiceUnavailableException"`
+}
+
+// UnmarshalErrorHandler is a name request handler to unmarshal request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
+
+// UnmarshalError unmarshals an error response for an AWS Query service.
+func UnmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("SerializationError", "failed to read from query HTTP response body", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // First check for specific error
+ resp := xmlErrorResponse{}
+ decodeErr := xml.Unmarshal(bodyBytes, &resp)
+ if decodeErr == nil {
+ reqID := resp.RequestID
+ if reqID == "" {
+ reqID = r.RequestID
+ }
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(resp.Code, resp.Message, nil),
+ r.HTTPResponse.StatusCode,
+ reqID,
+ )
+ return
+ }
+
+ // Check for unhandled error
+ servUnavailResp := xmlServiceUnavailableResponse{}
+ unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
+ if unavailErr == nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("ServiceUnavailableException", "service is unavailable", nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // Failed to retrieve any error message from the response body
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("SerializationError",
+ "failed to decode query XML error response", decodeErr),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
new file mode 100755
index 0000000..b80f84f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
@@ -0,0 +1,300 @@
+// Package rest provides RESTful serialization of AWS requests and responses.
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// Whether the byte value can be sent without escaping in AWS URLs
+var noEscape [256]bool
+
+var errValueNotSet = fmt.Errorf("value not set")
+
+func init() {
+ for i := 0; i < len(noEscape); i++ {
+ // AWS expects every character except these to be escaped
+ noEscape[i] = (i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ i == '-' ||
+ i == '.' ||
+ i == '_' ||
+ i == '~'
+ }
+}
+
+// BuildHandler is a named request handler for building rest protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
+
+// Build builds the REST component of a service request.
+func Build(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, false)
+ buildBody(r, v)
+ }
+}
+
+// BuildAsGET builds the REST component of a service request with the ability to hoist
+// data from the body.
+func BuildAsGET(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, true)
+ buildBody(r, v)
+ }
+}
+
+func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) {
+ query := r.HTTPRequest.URL.Query()
+
+ // Setup the raw path to match the base path pattern. This is needed
+ // so that when the path is mutated a custom escaped version can be
+ // stored in RawPath that will be used by the Go client.
+ r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path
+
+ for i := 0; i < v.NumField(); i++ {
+ m := v.Field(i)
+ if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ field := v.Type().Field(i)
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+ if kind := m.Kind(); kind == reflect.Ptr {
+ m = m.Elem()
+ } else if kind == reflect.Interface {
+ if !m.Elem().IsValid() {
+ continue
+ }
+ }
+ if !m.IsValid() {
+ continue
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ var err error
+ switch field.Tag.Get("location") {
+ case "headers": // header maps
+ err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag)
+ case "header":
+ err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag)
+ case "uri":
+ err = buildURI(r.HTTPRequest.URL, m, name, field.Tag)
+ case "querystring":
+ err = buildQueryString(query, m, name, field.Tag)
+ default:
+ if buildGETQuery {
+ err = buildQueryString(query, m, name, field.Tag)
+ }
+ }
+ r.Error = err
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+
+ r.HTTPRequest.URL.RawQuery = query.Encode()
+ if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) {
+ cleanPath(r.HTTPRequest.URL)
+ }
+}
+
+func buildBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := reflect.Indirect(v.FieldByName(payloadName))
+ if payload.IsValid() && payload.Interface() != nil {
+ switch reader := payload.Interface().(type) {
+ case io.ReadSeeker:
+ r.SetReaderBody(reader)
+ case []byte:
+ r.SetBufferBody(reader)
+ case string:
+ r.SetStringBody(reader)
+ default:
+ r.Error = awserr.New("SerializationError",
+ "failed to encode REST request",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+}
+
+func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error {
+ str, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+
+ name = strings.TrimSpace(name)
+ str = strings.TrimSpace(str)
+
+ header.Add(name, str)
+
+ return nil
+}
+
+func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error {
+ prefix := tag.Get("locationName")
+ for _, key := range v.MapKeys() {
+ str, err := convertType(v.MapIndex(key), tag)
+ if err == errValueNotSet {
+ continue
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+
+ }
+ keyStr := strings.TrimSpace(key.String())
+ str = strings.TrimSpace(str)
+
+ header.Add(prefix+keyStr, str)
+ }
+ return nil
+}
+
+func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error {
+ value, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+
+ u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
+ u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1)
+
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1)
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1)
+
+ return nil
+}
+
+func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := v.Interface().(type) {
+ case []*string:
+ for _, item := range value {
+ query.Add(name, *item)
+ }
+ case map[string]*string:
+ for key, item := range value {
+ query.Add(key, *item)
+ }
+ case map[string][]*string:
+ for key, items := range value {
+ for _, item := range items {
+ query.Add(key, *item)
+ }
+ }
+ default:
+ str, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+ query.Set(name, str)
+ }
+
+ return nil
+}
+
+func cleanPath(u *url.URL) {
+ hasSlash := strings.HasSuffix(u.Path, "/")
+
+ // clean up path, removing duplicate `/`
+ u.Path = path.Clean(u.Path)
+ u.RawPath = path.Clean(u.RawPath)
+
+ if hasSlash && !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ u.RawPath += "/"
+ }
+}
+
+// EscapePath escapes part of a URL path in Amazon style
+func EscapePath(path string, encodeSep bool) string {
+ var buf bytes.Buffer
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if noEscape[c] || (c == '/' && !encodeSep) {
+ buf.WriteByte(c)
+ } else {
+ fmt.Fprintf(&buf, "%%%02X", c)
+ }
+ }
+ return buf.String()
+}
+
+func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) {
+ v = reflect.Indirect(v)
+ if !v.IsValid() {
+ return "", errValueNotSet
+ }
+
+ switch value := v.Interface().(type) {
+ case string:
+ str = value
+ case []byte:
+ str = base64.StdEncoding.EncodeToString(value)
+ case bool:
+ str = strconv.FormatBool(value)
+ case int64:
+ str = strconv.FormatInt(value, 10)
+ case float64:
+ str = strconv.FormatFloat(value, 'f', -1, 64)
+ case time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.RFC822TimeFormatName
+ if tag.Get("location") == "querystring" {
+ format = protocol.ISO8601TimeFormatName
+ }
+ }
+ str = protocol.FormatTime(format, value)
+ case aws.JSONValue:
+ if len(value) == 0 {
+ return "", errValueNotSet
+ }
+ escaping := protocol.NoEscape
+ if tag.Get("location") == "header" {
+ escaping = protocol.Base64Escape
+ }
+ str, err = protocol.EncodeJSONValue(value, escaping)
+ if err != nil {
+ return "", fmt.Errorf("unable to encode JSONValue, %v", err)
+ }
+ default:
+ err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return "", err
+ }
+ return str, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
new file mode 100755
index 0000000..4366de2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
@@ -0,0 +1,45 @@
+package rest
+
+import "reflect"
+
+// PayloadMember returns the payload field member of i if there is one, or nil.
+func PayloadMember(i interface{}) interface{} {
+ if i == nil {
+ return nil
+ }
+
+ v := reflect.ValueOf(i).Elem()
+ if !v.IsValid() {
+ return nil
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ field, _ := v.Type().FieldByName(payloadName)
+ if field.Tag.Get("type") != "structure" {
+ return nil
+ }
+
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
+ return payload.Interface()
+ }
+ }
+ }
+ return nil
+}
+
+// PayloadType returns the type of a payload field member of i if there is one, or "".
+func PayloadType(i interface{}) string {
+ v := reflect.Indirect(reflect.ValueOf(i))
+ if !v.IsValid() {
+ return ""
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ if member, ok := v.Type().FieldByName(payloadName); ok {
+ return member.Tag.Get("type")
+ }
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
new file mode 100755
index 0000000..33fd53b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
@@ -0,0 +1,225 @@
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals the REST component of a response in a REST service.
+func Unmarshal(r *request.Request) {
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalBody(r, v)
+ }
+}
+
+// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+ if r.RequestID == "" {
+ // Alternative version of request id in the header
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
+ }
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalLocationElements(r, v)
+ }
+}
+
+func unmarshalBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() {
+ switch payload.Interface().(type) {
+ case []byte:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ } else {
+ payload.Set(reflect.ValueOf(b))
+ }
+ case *string:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ } else {
+ str := string(b)
+ payload.Set(reflect.ValueOf(&str))
+ }
+ default:
+ switch payload.Type().String() {
+ case "io.ReadCloser":
+ payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
+ case "io.ReadSeeker":
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError",
+ "failed to read response body", err)
+ return
+ }
+ payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b))))
+ default:
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ defer r.HTTPResponse.Body.Close()
+ r.Error = awserr.New("SerializationError",
+ "failed to decode REST response",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func unmarshalLocationElements(r *request.Request, v reflect.Value) {
+ for i := 0; i < v.NumField(); i++ {
+ m, field := v.Field(i), v.Type().Field(i)
+ if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+
+ switch field.Tag.Get("location") {
+ case "statusCode":
+ unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
+ case "header":
+ err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ break
+ }
+ case "headers":
+ prefix := field.Tag.Get("locationName")
+ err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ break
+ }
+ }
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+}
+
+func unmarshalStatusCode(v reflect.Value, statusCode int) {
+ if !v.IsValid() {
+ return
+ }
+
+ switch v.Interface().(type) {
+ case *int64:
+ s := int64(statusCode)
+ v.Set(reflect.ValueOf(&s))
+ }
+}
+
+func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
+ switch r.Interface().(type) {
+ case map[string]*string: // we only support string map value types
+ out := map[string]*string{}
+ for k, v := range headers {
+ k = http.CanonicalHeaderKey(k)
+ if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
+ out[k[len(prefix):]] = &v[0]
+ }
+ }
+ r.Set(reflect.ValueOf(out))
+ }
+ return nil
+}
+
+func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
+ isJSONValue := tag.Get("type") == "jsonvalue"
+ if isJSONValue {
+ if len(header) == 0 {
+ return nil
+ }
+ } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
+ return nil
+ }
+
+ switch v.Interface().(type) {
+ case *string:
+ v.Set(reflect.ValueOf(&header))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *bool:
+ b, err := strconv.ParseBool(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *int64:
+ i, err := strconv.ParseInt(header, 10, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&i))
+ case *float64:
+ f, err := strconv.ParseFloat(header, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&f))
+ case *time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.RFC822TimeFormatName
+ }
+ t, err := protocol.ParseTime(format, header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&t))
+ case aws.JSONValue:
+ escaping := protocol.NoEscape
+ if tag.Get("location") == "header" {
+ escaping = protocol.Base64Escape
+ }
+ m, err := protocol.DecodeJSONValue(header, escaping)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(m))
+ default:
+ err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
new file mode 100755
index 0000000..b0f4e24
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
@@ -0,0 +1,77 @@
+// Package restxml provides RESTful XML serialization of AWS
+// requests and responses.
+package restxml
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
+
+import (
+ "bytes"
+ "encoding/xml"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// BuildHandler is a named request handler for building restxml protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build}
+
+// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError}
+
+// Build builds a request payload for the REST XML protocol.
+func Build(r *request.Request) {
+ rest.Build(r)
+
+ if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
+ var buf bytes.Buffer
+ err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf))
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("SerializationError", "failed to encode rest XML request", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+ r.SetBufferBody(buf.Bytes())
+ }
+}
+
+// Unmarshal unmarshals a payload response for the REST XML protocol.
+func Unmarshal(r *request.Request) {
+ if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
+ defer r.HTTPResponse.Body.Close()
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, "")
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("SerializationError", "failed to decode REST XML response", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+ } else {
+ rest.Unmarshal(r)
+ }
+}
+
+// UnmarshalMeta unmarshals response headers for the REST XML protocol.
+func UnmarshalMeta(r *request.Request) {
+ rest.UnmarshalMeta(r)
+}
+
+// UnmarshalError unmarshals a response error for the REST XML protocol.
+func UnmarshalError(r *request.Request) {
+ query.UnmarshalError(r)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
new file mode 100755
index 0000000..b7ed6c6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
@@ -0,0 +1,72 @@
+package protocol
+
+import (
+ "strconv"
+ "time"
+)
+
+// Names of time formats supported by the SDK
+const (
+ RFC822TimeFormatName = "rfc822"
+ ISO8601TimeFormatName = "iso8601"
+ UnixTimeFormatName = "unixTimestamp"
+)
+
+// Time formats supported by the SDK
+const (
+ // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
+ RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
+
+ // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
+ ISO8601TimeFormat = "2006-01-02T15:04:05Z"
+)
+
+// IsKnownTimestampFormat returns if the timestamp format name
+// is know to the SDK's protocols.
+func IsKnownTimestampFormat(name string) bool {
+ switch name {
+ case RFC822TimeFormatName:
+ fallthrough
+ case ISO8601TimeFormatName:
+ fallthrough
+ case UnixTimeFormatName:
+ return true
+ default:
+ return false
+ }
+}
+
+// FormatTime returns a string value of the time.
+func FormatTime(name string, t time.Time) string {
+ t = t.UTC()
+
+ switch name {
+ case RFC822TimeFormatName:
+ return t.Format(RFC822TimeFormat)
+ case ISO8601TimeFormatName:
+ return t.Format(ISO8601TimeFormat)
+ case UnixTimeFormatName:
+ return strconv.FormatInt(t.Unix(), 10)
+ default:
+ panic("unknown timestamp format name, " + name)
+ }
+}
+
+// ParseTime attempts to parse the time given the format. Returns
+// the time if it was able to be parsed, and fails otherwise.
+func ParseTime(formatName, value string) (time.Time, error) {
+ switch formatName {
+ case RFC822TimeFormatName:
+ return time.Parse(RFC822TimeFormat, value)
+ case ISO8601TimeFormatName:
+ return time.Parse(ISO8601TimeFormat, value)
+ case UnixTimeFormatName:
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return time.Unix(int64(v), 0), nil
+ default:
+ panic("unknown timestamp format name, " + formatName)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
new file mode 100755
index 0000000..da1a681
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
@@ -0,0 +1,21 @@
+package protocol
+
+import (
+ "io"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
+var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
+
+// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
+func UnmarshalDiscardBody(r *request.Request) {
+ if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
+ return
+ }
+
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ r.HTTPResponse.Body.Close()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
new file mode 100755
index 0000000..cf981fe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -0,0 +1,306 @@
+// Package xmlutil provides XML serialization of AWS requests and responses.
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// BuildXML will serialize params into an xml.Encoder. Error will be returned
+// if the serialization of any of the params or nested values fails.
+func BuildXML(params interface{}, e *xml.Encoder) error {
+ return buildXML(params, e, false)
+}
+
+func buildXML(params interface{}, e *xml.Encoder, sorted bool) error {
+ b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
+ root := NewXMLElement(xml.Name{})
+ if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
+ return err
+ }
+ for _, c := range root.Children {
+ for _, v := range c {
+ return StructToXML(e, v, sorted)
+ }
+ }
+ return nil
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+// A xmlBuilder serializes values from Go code to XML
+type xmlBuilder struct {
+ encoder *xml.Encoder
+ namespaces map[string]string
+}
+
+// buildValue generic XMLNode builder for any type. Will build value for their specific type
+// struct, list, map, scalar.
+//
+// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
+// type is not provided reflect will be used to determine the value's type.
+func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ value = elemOf(value)
+ if !value.IsValid() { // no need to handle zero values
+ return nil
+ } else if tag.Get("location") != "" { // don't handle non-body location values
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := value.Type().FieldByName("_"); ok {
+ tag = tag + reflect.StructTag(" ") + field.Tag
+ }
+ return b.buildStruct(value, current, tag)
+ case "list":
+ return b.buildList(value, current, tag)
+ case "map":
+ return b.buildMap(value, current, tag)
+ default:
+ return b.buildScalar(value, current, tag)
+ }
+}
+
+// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested
+// types are converted to XMLNodes also.
+func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ // unwrap payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := value.Type().FieldByName(payload)
+ tag = field.Tag
+ value = elemOf(value.FieldByName(payload))
+
+ if !value.IsValid() {
+ return nil
+ }
+ }
+
+ child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+
+ // there is an xmlNamespace associated with this struct
+ if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
+ ns := xml.Attr{
+ Name: xml.Name{Local: "xmlns"},
+ Value: uri,
+ }
+ if prefix != "" {
+ b.namespaces[prefix] = uri // register the namespace
+ ns.Name.Local = "xmlns:" + prefix
+ }
+
+ child.Attr = append(child.Attr, ns)
+ }
+
+ var payloadFields, nonPayloadFields int
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ member := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ mTag := field.Tag
+ if mTag.Get("location") != "" { // skip non-body members
+ nonPayloadFields++
+ continue
+ }
+ payloadFields++
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ member = reflect.ValueOf(token)
+ }
+
+ memberName := mTag.Get("locationName")
+ if memberName == "" {
+ memberName = field.Name
+ mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
+ }
+ if err := b.buildValue(member, child, mTag); err != nil {
+ return err
+ }
+ }
+
+ // Only case where the child shape is not added is if the shape only contains
+ // non-payload fields, e.g headers/query.
+ if !(payloadFields == 0 && nonPayloadFields > 0) {
+ current.AddChild(child)
+ }
+
+ return nil
+}
+
+// buildList adds the value's list items to the current XMLNode as children nodes. All
+// nested values in the list are converted to XMLNodes also.
+func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted lists
+ return nil
+ }
+
+ // check for unflattened list member
+ flattened := tag.Get("flattened") != ""
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if flattened {
+ for i := 0; i < value.Len(); i++ {
+ child := NewXMLElement(xname)
+ current.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ } else {
+ list := NewXMLElement(xname)
+ current.AddChild(list)
+
+ for i := 0; i < value.Len(); i++ {
+ iname := tag.Get("locationNameList")
+ if iname == "" {
+ iname = "member"
+ }
+
+ child := NewXMLElement(xml.Name{Local: iname})
+ list.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
+// nested values in the map are converted to XMLNodes also.
+//
+// Error will be returned if it is unable to build the map's values into XMLNodes
+func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted maps
+ return nil
+ }
+
+ maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+ current.AddChild(maproot)
+ current = maproot
+
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ // sorting is not required for compliance, but it makes testing easier
+ keys := make([]string, value.Len())
+ for i, k := range value.MapKeys() {
+ keys[i] = k.String()
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := value.MapIndex(reflect.ValueOf(k))
+
+ mapcur := current
+ if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
+ child := NewXMLElement(xml.Name{Local: "entry"})
+ mapcur.AddChild(child)
+ mapcur = child
+ }
+
+ kchild := NewXMLElement(xml.Name{Local: kname})
+ kchild.Text = k
+ vchild := NewXMLElement(xml.Name{Local: vname})
+ mapcur.AddChild(kchild)
+ mapcur.AddChild(vchild)
+
+ if err := b.buildValue(v, vchild, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// buildScalar will convert the value into a string and append it as a attribute or child
+// of the current XMLNode.
+//
+// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
+//
+// Error will be returned if the value type is unsupported.
+func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ var str string
+ switch converted := value.Interface().(type) {
+ case string:
+ str = converted
+ case []byte:
+ if !value.IsNil() {
+ str = base64.StdEncoding.EncodeToString(converted)
+ }
+ case bool:
+ str = strconv.FormatBool(converted)
+ case int64:
+ str = strconv.FormatInt(converted, 10)
+ case int:
+ str = strconv.Itoa(converted)
+ case float64:
+ str = strconv.FormatFloat(converted, 'f', -1, 64)
+ case float32:
+ str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
+ case time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ str = protocol.FormatTime(format, converted)
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)",
+ tag.Get("locationName"), value.Interface(), value.Type().Name())
+ }
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
+ attr := xml.Attr{Name: xname, Value: str}
+ current.Attr = append(current.Attr, attr)
+ } else { // regular text node
+ current.AddChild(&XMLNode{Name: xname, Text: str})
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
new file mode 100755
index 0000000..ff1ef68
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -0,0 +1,272 @@
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// UnmarshalXML deserializes an xml.Decoder into the container v. V
+// needs to match the shape of the XML expected to be decoded.
+// If the shape doesn't match unmarshaling will fail.
+func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
+ n, err := XMLToStruct(d, nil)
+ if err != nil {
+ return err
+ }
+ if n.Children != nil {
+ for _, root := range n.Children {
+ for _, c := range root {
+ if wrappedChild, ok := c.Children[wrapper]; ok {
+ c = wrappedChild[0] // pull out wrapped element
+ }
+
+ err = parse(reflect.ValueOf(v), c, "")
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ }
+ }
+ return nil
+ }
+ return nil
+}
+
+// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
+// will be used to determine the type from r.
+func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ rtype := r.Type()
+ if rtype.Kind() == reflect.Ptr {
+ rtype = rtype.Elem() // check kind of actual element type
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch rtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ if _, ok := r.Interface().(*time.Time); !ok {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := r.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := rtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return parseStruct(r, node, tag)
+ case "list":
+ return parseList(r, node, tag)
+ case "map":
+ return parseMap(r, node, tag)
+ default:
+ return parseScalar(r, node, tag)
+ }
+}
+
+// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
+// types in the structure will also be deserialized.
+func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+ if r.Kind() == reflect.Ptr {
+ if r.IsNil() { // create the structure if it's nil
+ s := reflect.New(r.Type().Elem())
+ r.Set(s)
+ r = s
+ }
+
+ r = r.Elem()
+ t = t.Elem()
+ }
+
+ // unwrap any payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := t.FieldByName(payload)
+ return parseStruct(r.FieldByName(payload), node, field.Tag)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if c := field.Name[0:1]; strings.ToLower(c) == c {
+ continue // ignore unexported fields
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ // try to find the field by name in elements
+ elems := node.Children[name]
+
+ if elems == nil { // try to find the field in attributes
+ if val, ok := node.findElem(name); ok {
+ elems = []*XMLNode{{Text: val}}
+ }
+ }
+
+ member := r.FieldByName(field.Name)
+ for _, elem := range elems {
+ err := parse(member, elem, field.Tag)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// parseList deserializes a list of values from an XML node. Each list entry
+// will also be deserialized.
+func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+
+ if tag.Get("flattened") == "" { // look at all item entries
+ mname := "member"
+ if name := tag.Get("locationNameList"); name != "" {
+ mname = name
+ }
+
+ if Children, ok := node.Children[mname]; ok {
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
+ }
+
+ for i, c := range Children {
+ err := parse(r.Index(i), c, "")
+ if err != nil {
+ return err
+ }
+ }
+ }
+ } else { // flattened list means this is a single element
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, 0, 0))
+ }
+
+ childR := reflect.Zero(t.Elem())
+ r.Set(reflect.Append(r, childR))
+ err := parse(r.Index(r.Len()-1), node, "")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
+// will also be deserialized as map entries.
+func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ if r.IsNil() {
+ r.Set(reflect.MakeMap(r.Type()))
+ }
+
+ if tag.Get("flattened") == "" { // look at all child entries
+ for _, entry := range node.Children["entry"] {
+ parseMapEntry(r, entry, tag)
+ }
+ } else { // this element is itself an entry
+ parseMapEntry(r, node, tag)
+ }
+
+ return nil
+}
+
+// parseMapEntry deserializes a map entry from a XML node.
+func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ keys, ok := node.Children[kname]
+ values := node.Children[vname]
+ if ok {
+ for i, key := range keys {
+ keyR := reflect.ValueOf(key.Text)
+ value := values[i]
+ valueR := reflect.New(r.Type().Elem()).Elem()
+
+ parse(valueR, value, "")
+ r.SetMapIndex(keyR, valueR)
+ }
+ }
+ return nil
+}
+
+// parseScaller deserializes an XMLNode value into a concrete type based on the
+// interface type of r.
+//
+// Error is returned if the deserialization fails due to invalid type conversion,
+// or unsupported interface type.
+func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ switch r.Interface().(type) {
+ case *string:
+ r.Set(reflect.ValueOf(&node.Text))
+ return nil
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(b))
+ case *bool:
+ v, err := strconv.ParseBool(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *int64:
+ v, err := strconv.ParseInt(node.Text, 10, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *float64:
+ v, err := strconv.ParseFloat(node.Text, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ t, err := protocol.ParseTime(format, node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&t))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
new file mode 100755
index 0000000..515ce15
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
@@ -0,0 +1,148 @@
+package xmlutil
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "sort"
+)
+
+// A XMLNode contains the values to be encoded or decoded.
+type XMLNode struct {
+ Name xml.Name `json:",omitempty"`
+ Children map[string][]*XMLNode `json:",omitempty"`
+ Text string `json:",omitempty"`
+ Attr []xml.Attr `json:",omitempty"`
+
+ namespaces map[string]string
+ parent *XMLNode
+}
+
+// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
+func NewXMLElement(name xml.Name) *XMLNode {
+ return &XMLNode{
+ Name: name,
+ Children: map[string][]*XMLNode{},
+ Attr: []xml.Attr{},
+ }
+}
+
+// AddChild adds child to the XMLNode.
+func (n *XMLNode) AddChild(child *XMLNode) {
+ child.parent = n
+ if _, ok := n.Children[child.Name.Local]; !ok {
+ n.Children[child.Name.Local] = []*XMLNode{}
+ }
+ n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
+}
+
+// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
+func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
+ out := &XMLNode{}
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ if err == io.EOF {
+ break
+ } else {
+ return out, err
+ }
+ }
+
+ if tok == nil {
+ break
+ }
+
+ switch typed := tok.(type) {
+ case xml.CharData:
+ out.Text = string(typed.Copy())
+ case xml.StartElement:
+ el := typed.Copy()
+ out.Attr = el.Attr
+ if out.Children == nil {
+ out.Children = map[string][]*XMLNode{}
+ }
+
+ name := typed.Name.Local
+ slice := out.Children[name]
+ if slice == nil {
+ slice = []*XMLNode{}
+ }
+ node, e := XMLToStruct(d, &el)
+ out.findNamespaces()
+ if e != nil {
+ return out, e
+ }
+ node.Name = typed.Name
+ node.findNamespaces()
+ tempOut := *out
+ // Save into a temp variable, simply because out gets squashed during
+ // loop iterations
+ node.parent = &tempOut
+ slice = append(slice, node)
+ out.Children[name] = slice
+ case xml.EndElement:
+ if s != nil && s.Name.Local == typed.Name.Local { // matching end token
+ return out, nil
+ }
+ out = &XMLNode{}
+ }
+ }
+ return out, nil
+}
+
+func (n *XMLNode) findNamespaces() {
+ ns := map[string]string{}
+ for _, a := range n.Attr {
+ if a.Name.Space == "xmlns" {
+ ns[a.Value] = a.Name.Local
+ }
+ }
+
+ n.namespaces = ns
+}
+
+func (n *XMLNode) findElem(name string) (string, bool) {
+ for node := n; node != nil; node = node.parent {
+ for _, a := range node.Attr {
+ namespace := a.Name.Space
+ if v, ok := node.namespaces[namespace]; ok {
+ namespace = v
+ }
+ if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) {
+ return a.Value, true
+ }
+ }
+ }
+ return "", false
+}
+
+// StructToXML writes an XMLNode to a xml.Encoder as tokens.
+func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
+ e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
+
+ if node.Text != "" {
+ e.EncodeToken(xml.CharData([]byte(node.Text)))
+ } else if sorted {
+ sortedNames := []string{}
+ for k := range node.Children {
+ sortedNames = append(sortedNames, k)
+ }
+ sort.Strings(sortedNames)
+
+ for _, k := range sortedNames {
+ for _, v := range node.Children[k] {
+ StructToXML(e, v, sorted)
+ }
+ }
+ } else {
+ for _, c := range node.Children {
+ for _, v := range c {
+ StructToXML(e, v, sorted)
+ }
+ }
+ }
+
+ e.EncodeToken(xml.EndElement{Name: node.Name})
+ return e.Flush()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
new file mode 100755
index 0000000..5c8ce5c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
@@ -0,0 +1,249 @@
+package s3
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+const (
+ contentMD5Header = "Content-Md5"
+ contentSha256Header = "X-Amz-Content-Sha256"
+ amzTeHeader = "X-Amz-Te"
+ amzTxEncodingHeader = "X-Amz-Transfer-Encoding"
+
+ appendMD5TxEncoding = "append-md5"
+)
+
+// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
+// require it.
+func contentMD5(r *request.Request) {
+ h := md5.New()
+
+ if !aws.IsReaderSeekable(r.Body) {
+ if r.Config.Logger != nil {
+ r.Config.Logger.Log(fmt.Sprintf(
+ "Unable to compute Content-MD5 for unseekable body, S3.%s",
+ r.Operation.Name))
+ }
+ return
+ }
+
+ if _, err := copySeekableBody(h, r.Body); err != nil {
+ r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
+ return
+ }
+
+ // encode the md5 checksum in base64 and set the request header.
+ v := base64.StdEncoding.EncodeToString(h.Sum(nil))
+ r.HTTPRequest.Header.Set(contentMD5Header, v)
+}
+
+// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the
+// request. If the body is not seekable or S3DisableContentMD5Validation set
+// this handler will be ignored.
+func computeBodyHashes(r *request.Request) {
+ if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
+ return
+ }
+ if r.IsPresigned() {
+ return
+ }
+ if r.Error != nil || !aws.IsReaderSeekable(r.Body) {
+ return
+ }
+
+ var md5Hash, sha256Hash hash.Hash
+ hashers := make([]io.Writer, 0, 2)
+
+ // Determine upfront which hashes can be set without overriding user
+ // provide header data.
+ if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 {
+ md5Hash = md5.New()
+ hashers = append(hashers, md5Hash)
+ }
+
+ if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 {
+ sha256Hash = sha256.New()
+ hashers = append(hashers, sha256Hash)
+ }
+
+ // Create the destination writer based on the hashes that are not already
+ // provided by the user.
+ var dst io.Writer
+ switch len(hashers) {
+ case 0:
+ return
+ case 1:
+ dst = hashers[0]
+ default:
+ dst = io.MultiWriter(hashers...)
+ }
+
+ if _, err := copySeekableBody(dst, r.Body); err != nil {
+ r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err)
+ return
+ }
+
+ // For the hashes created, set the associated headers that the user did not
+ // already provide.
+ if md5Hash != nil {
+ sum := make([]byte, md5.Size)
+ encoded := make([]byte, md5Base64EncLen)
+
+ base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0]))
+ r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)}
+ }
+
+ if sha256Hash != nil {
+ encoded := make([]byte, sha256HexEncLen)
+ sum := make([]byte, sha256.Size)
+
+ hex.Encode(encoded, sha256Hash.Sum(sum[0:0]))
+ r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)}
+ }
+}
+
+const (
+ md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen
+ sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen
+)
+
+func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
+ curPos, err := src.Seek(0, sdkio.SeekCurrent)
+ if err != nil {
+ return 0, err
+ }
+
+ // hash the body. seek back to the first position after reading to reset
+ // the body for transmission. copy errors may be assumed to be from the
+ // body.
+ n, err := io.Copy(dst, src)
+ if err != nil {
+ return n, err
+ }
+
+ _, err = src.Seek(curPos, sdkio.SeekStart)
+ if err != nil {
+ return n, err
+ }
+
+ return n, nil
+}
+
+// Adds the x-amz-te: append_md5 header to the request. This requests the service
+// responds with a trailing MD5 checksum.
+//
+// Will not ask for append MD5 if disabled, the request is presigned or,
+// or the API operation does not support content MD5 validation.
+func askForTxEncodingAppendMD5(r *request.Request) {
+ if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
+ return
+ }
+ if r.IsPresigned() {
+ return
+ }
+ r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding)
+}
+
+func useMD5ValidationReader(r *request.Request) {
+ if r.Error != nil {
+ return
+ }
+
+ if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding {
+ return
+ }
+
+ var bodyReader *io.ReadCloser
+ var contentLen int64
+ switch tv := r.Data.(type) {
+ case *GetObjectOutput:
+ bodyReader = &tv.Body
+ contentLen = aws.Int64Value(tv.ContentLength)
+ // Update ContentLength hiden the trailing MD5 checksum.
+ tv.ContentLength = aws.Int64(contentLen - md5.Size)
+ tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range"))
+ default:
+ r.Error = awserr.New("ChecksumValidationError",
+ fmt.Sprintf("%s: %s header received on unsupported API, %s",
+ amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name,
+ ), nil)
+ return
+ }
+
+ if contentLen < md5.Size {
+ r.Error = awserr.New("ChecksumValidationError",
+ fmt.Sprintf("invalid Content-Length %d for %s %s",
+ contentLen, appendMD5TxEncoding, amzTxEncodingHeader,
+ ), nil)
+ return
+ }
+
+ // Wrap and swap the response body reader with the validation reader.
+ *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size)
+}
+
+type md5ValidationReader struct {
+ rawReader io.ReadCloser
+ payload io.Reader
+ hash hash.Hash
+
+ payloadLen int64
+ read int64
+}
+
+func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader {
+ h := md5.New()
+ return &md5ValidationReader{
+ rawReader: reader,
+ payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h),
+ hash: h,
+ payloadLen: payloadLen,
+ }
+}
+
+func (v *md5ValidationReader) Read(p []byte) (n int, err error) {
+ n, err = v.payload.Read(p)
+ if err != nil && err != io.EOF {
+ return n, err
+ }
+
+ v.read += int64(n)
+
+ if err == io.EOF {
+ if v.read != v.payloadLen {
+ return n, io.ErrUnexpectedEOF
+ }
+ expectSum := make([]byte, md5.Size)
+ actualSum := make([]byte, md5.Size)
+ if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil {
+ return n, sumReadErr
+ }
+ actualSum = v.hash.Sum(actualSum[0:0])
+ if !bytes.Equal(expectSum, actualSum) {
+ return n, awserr.New("InvalidChecksum",
+ fmt.Sprintf("expected MD5 checksum %s, got %s",
+ hex.EncodeToString(expectSum),
+ hex.EncodeToString(actualSum),
+ ),
+ nil)
+ }
+ }
+
+ return n, err
+}
+
+func (v *md5ValidationReader) Close() error {
+ return v.rawReader.Close()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
new file mode 100755
index 0000000..bc68a46
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
@@ -0,0 +1,106 @@
+package s3
+
+import (
+ "io/ioutil"
+ "regexp"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`)
+
+// NormalizeBucketLocation is a utility function which will update the
+// passed in value to always be a region ID. Generally this would be used
+// with GetBucketLocation API operation.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+func NormalizeBucketLocation(loc string) string {
+ switch loc {
+ case "":
+ loc = "us-east-1"
+ case "EU":
+ loc = "eu-west-1"
+ }
+
+ return loc
+}
+
+// NormalizeBucketLocationHandler is a request handler which will update the
+// GetBucketLocation's result LocationConstraint value to always be a region ID.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+//
+// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
+// Bucket: aws.String(bucket),
+// })
+// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
+// err := req.Send()
+var NormalizeBucketLocationHandler = request.NamedHandler{
+ Name: "awssdk.s3.NormalizeBucketLocation",
+ Fn: func(req *request.Request) {
+ if req.Error != nil {
+ return
+ }
+
+ out := req.Data.(*GetBucketLocationOutput)
+ loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint))
+ out.LocationConstraint = aws.String(loc)
+ },
+}
+
+// WithNormalizeBucketLocation is a request option which will update the
+// GetBucketLocation's result LocationConstraint value to always be a region ID.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+//
+// result, err := svc.GetBucketLocationWithContext(ctx,
+// &s3.GetBucketLocationInput{
+// Bucket: aws.String(bucket),
+// },
+// s3.WithNormalizeBucketLocation,
+// )
+func WithNormalizeBucketLocation(r *request.Request) {
+ r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
+}
+
+func buildGetBucketLocation(r *request.Request) {
+ if r.DataFilled() {
+ out := r.Data.(*GetBucketLocationOutput)
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed reading response body", err)
+ return
+ }
+
+ match := reBucketLocation.FindSubmatch(b)
+ if len(match) > 1 {
+ loc := string(match[1])
+ out.LocationConstraint = aws.String(loc)
+ }
+ }
+}
+
+func populateLocationConstraint(r *request.Request) {
+ if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" {
+ in := r.Params.(*CreateBucketInput)
+ if in.CreateBucketConfiguration == nil {
+ r.Params = awsutil.CopyOf(r.Params)
+ in = r.Params.(*CreateBucketInput)
+ in.CreateBucketConfiguration = &CreateBucketConfiguration{
+ LocationConstraint: r.Config.Region,
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
new file mode 100755
index 0000000..95f2456
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
@@ -0,0 +1,74 @@
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/s3err"
+)
+
+func init() {
+ initClient = defaultInitClientFn
+ initRequest = defaultInitRequestFn
+}
+
+func defaultInitClientFn(c *client.Client) {
+ // Support building custom endpoints based on config
+ c.Handlers.Build.PushFront(updateEndpointForS3Config)
+
+ // Require SSL when using SSE keys
+ c.Handlers.Validate.PushBack(validateSSERequiresSSL)
+ c.Handlers.Build.PushBack(computeSSEKeys)
+
+ // S3 uses custom error unmarshaling logic
+ c.Handlers.UnmarshalError.Clear()
+ c.Handlers.UnmarshalError.PushBack(unmarshalError)
+ c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler())
+}
+
+func defaultInitRequestFn(r *request.Request) {
+ // Add reuest handlers for specific platforms.
+ // e.g. 100-continue support for PUT requests using Go 1.6
+ platformRequestHandlers(r)
+
+ switch r.Operation.Name {
+ case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
+ opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
+ opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration,
+ opPutBucketReplication:
+ // These S3 operations require Content-MD5 to be set
+ r.Handlers.Build.PushBack(contentMD5)
+ case opGetBucketLocation:
+ // GetBucketLocation has custom parsing logic
+ r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
+ case opCreateBucket:
+ // Auto-populate LocationConstraint with current region
+ r.Handlers.Validate.PushFront(populateLocationConstraint)
+ case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
+ r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
+ r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler())
+ case opPutObject, opUploadPart:
+ r.Handlers.Build.PushBack(computeBodyHashes)
+ // Disabled until #1837 root issue is resolved.
+ // case opGetObject:
+ // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5)
+ // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader)
+ }
+}
+
+// bucketGetter is an accessor interface to grab the "Bucket" field from
+// an S3 type.
+type bucketGetter interface {
+ getBucket() string
+}
+
+// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey"
+// field from an S3 type.
+type sseCustomerKeyGetter interface {
+ getSSECustomerKey() string
+}
+
+// copySourceSSECustomerKeyGetter is an accessor interface to grab the
+// "CopySourceSSECustomerKey" field from an S3 type.
+type copySourceSSECustomerKeyGetter interface {
+ getCopySourceSSECustomerKey() string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
new file mode 100755
index 0000000..0def022
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
@@ -0,0 +1,26 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package s3 provides the client and types for making API
+// requests to Amazon Simple Storage Service.
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service.
+//
+// See s3 package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/
+//
+// Using the Client
+//
+// To contact Amazon Simple Storage Service with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the Amazon Simple Storage Service client S3 for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New
+package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
new file mode 100755
index 0000000..39b912c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
@@ -0,0 +1,109 @@
+// Upload Managers
+//
+// The s3manager package's Uploader provides concurrent upload of content to S3
+// by taking advantage of S3's Multipart APIs. The Uploader also supports both
+// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker
+// for optimizations if the Body satisfies that type. Once the Uploader instance
+// is created you can call Upload concurrently from multiple goroutines safely.
+//
+// // The session the S3 Uploader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create an uploader with the session and default options
+// uploader := s3manager.NewUploader(sess)
+//
+// f, err := os.Open(filename)
+// if err != nil {
+// return fmt.Errorf("failed to open file %q, %v", filename, err)
+// }
+//
+// // Upload the file to S3.
+// result, err := uploader.Upload(&s3manager.UploadInput{
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myString),
+// Body: f,
+// })
+// if err != nil {
+// return fmt.Errorf("failed to upload file, %v", err)
+// }
+// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
+//
+// See the s3manager package's Uploader type documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader
+//
+// Download Manager
+//
+// The s3manager package's Downloader provides concurrently downloading of Objects
+// from S3. The Downloader will write S3 Object content with an io.WriterAt.
+// Once the Downloader instance is created you can call Download concurrently from
+// multiple goroutines safely.
+//
+// // The session the S3 Downloader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create a downloader with the session and default options
+// downloader := s3manager.NewDownloader(sess)
+//
+// // Create a file to write the S3 Object contents to.
+// f, err := os.Create(filename)
+// if err != nil {
+// return fmt.Errorf("failed to create file %q, %v", filename, err)
+// }
+//
+// // Write the contents of S3 Object to the file
+// n, err := downloader.Download(f, &s3.GetObjectInput{
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myString),
+// })
+// if err != nil {
+// return fmt.Errorf("failed to download file, %v", err)
+// }
+// fmt.Printf("file downloaded, %d bytes\n", n)
+//
+// See the s3manager package's Downloader type documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
+//
+// Get Bucket Region
+//
+// GetBucketRegion will attempt to get the region for a bucket using a region
+// hint to determine which AWS partition to perform the query on. Use this utility
+// to determine the region a bucket is in.
+//
+// sess := session.Must(session.NewSession())
+//
+// bucket := "my-bucket"
+// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
+// if err != nil {
+// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
+// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
+// }
+// return err
+// }
+// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
+//
+// See the s3manager package's GetBucketRegion function documentation for more information
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion
+//
+// S3 Crypto Client
+//
+// The s3crypto package provides the tools to upload and download encrypted
+// content from S3. The Encryption and Decryption clients can be used concurrently
+// once the client is created.
+//
+// sess := session.Must(session.NewSession())
+//
+// // Create the decryption client.
+// svc := s3crypto.NewDecryptionClient(sess)
+//
+// // The object will be downloaded from S3 and decrypted locally. By metadata
+// // about the object's encryption will instruct the decryption client how
+// // decrypt the content of the object. By default KMS is used for keys.
+// result, err := svc.GetObject(&s3.GetObjectInput {
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myKey),
+// })
+//
+// See the s3crypto package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
+//
+package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
new file mode 100755
index 0000000..931cb17
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
@@ -0,0 +1,48 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+const (
+
+ // ErrCodeBucketAlreadyExists for service response error code
+ // "BucketAlreadyExists".
+ //
+ // The requested bucket name is not available. The bucket namespace is shared
+ // by all users of the system. Please select a different name and try again.
+ ErrCodeBucketAlreadyExists = "BucketAlreadyExists"
+
+ // ErrCodeBucketAlreadyOwnedByYou for service response error code
+ // "BucketAlreadyOwnedByYou".
+ ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
+
+ // ErrCodeNoSuchBucket for service response error code
+ // "NoSuchBucket".
+ //
+ // The specified bucket does not exist.
+ ErrCodeNoSuchBucket = "NoSuchBucket"
+
+ // ErrCodeNoSuchKey for service response error code
+ // "NoSuchKey".
+ //
+ // The specified key does not exist.
+ ErrCodeNoSuchKey = "NoSuchKey"
+
+ // ErrCodeNoSuchUpload for service response error code
+ // "NoSuchUpload".
+ //
+ // The specified multipart upload does not exist.
+ ErrCodeNoSuchUpload = "NoSuchUpload"
+
+ // ErrCodeObjectAlreadyInActiveTierError for service response error code
+ // "ObjectAlreadyInActiveTierError".
+ //
+ // This operation is not allowed against this storage tier
+ ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError"
+
+ // ErrCodeObjectNotInActiveTierError for service response error code
+ // "ObjectNotInActiveTierError".
+ //
+ // The source object of the COPY operation is not in the active tier and is
+ // only stored in Amazon Glacier.
+ ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
new file mode 100755
index 0000000..a7fbc2d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
@@ -0,0 +1,155 @@
+package s3
+
+import (
+ "fmt"
+ "net/url"
+ "regexp"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// an operationBlacklist is a list of operation names that should a
+// request handler should not be executed with.
+type operationBlacklist []string
+
+// Continue will return true of the Request's operation name is not
+// in the blacklist. False otherwise.
+func (b operationBlacklist) Continue(r *request.Request) bool {
+ for i := 0; i < len(b); i++ {
+ if b[i] == r.Operation.Name {
+ return false
+ }
+ }
+ return true
+}
+
+var accelerateOpBlacklist = operationBlacklist{
+ opListBuckets, opCreateBucket, opDeleteBucket,
+}
+
+// Request handler to automatically add the bucket name to the endpoint domain
+// if possible. This style of bucket is valid for all bucket names which are
+// DNS compatible and do not contain "."
+func updateEndpointForS3Config(r *request.Request) {
+ forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
+ accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
+
+ if accelerate && accelerateOpBlacklist.Continue(r) {
+ if forceHostStyle {
+ if r.Config.Logger != nil {
+ r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
+ }
+ }
+ updateEndpointForAccelerate(r)
+ } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
+ updateEndpointForHostStyle(r)
+ }
+}
+
+func updateEndpointForHostStyle(r *request.Request) {
+ bucket, ok := bucketNameFromReqParams(r.Params)
+ if !ok {
+ // Ignore operation requests if the bucketname was not provided
+ // if this is an input validation error the validation handler
+ // will report it.
+ return
+ }
+
+ if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
+ // bucket name must be valid to put into the host
+ return
+ }
+
+ moveBucketToHost(r.HTTPRequest.URL, bucket)
+}
+
+var (
+ accelElem = []byte("s3-accelerate.dualstack.")
+)
+
+func updateEndpointForAccelerate(r *request.Request) {
+ bucket, ok := bucketNameFromReqParams(r.Params)
+ if !ok {
+ // Ignore operation requests if the bucketname was not provided
+ // if this is an input validation error the validation handler
+ // will report it.
+ return
+ }
+
+ if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
+ r.Error = awserr.New("InvalidParameterException",
+ fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket),
+ nil)
+ return
+ }
+
+ parts := strings.Split(r.HTTPRequest.URL.Host, ".")
+ if len(parts) < 3 {
+ r.Error = awserr.New("InvalidParameterExecption",
+ fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s",
+ r.HTTPRequest.URL.Host), nil)
+ return
+ }
+
+ if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") {
+ parts[0] = "s3-accelerate"
+ }
+ for i := 1; i+1 < len(parts); i++ {
+ if parts[i] == aws.StringValue(r.Config.Region) {
+ parts = append(parts[:i], parts[i+1:]...)
+ break
+ }
+ }
+
+ r.HTTPRequest.URL.Host = strings.Join(parts, ".")
+
+ moveBucketToHost(r.HTTPRequest.URL, bucket)
+}
+
+// Attempts to retrieve the bucket name from the request input parameters.
+// If no bucket is found, or the field is empty "", false will be returned.
+func bucketNameFromReqParams(params interface{}) (string, bool) {
+ if iface, ok := params.(bucketGetter); ok {
+ b := iface.getBucket()
+ return b, len(b) > 0
+ }
+
+ return "", false
+}
+
+// hostCompatibleBucketName returns true if the request should
+// put the bucket in the host. This is false if S3ForcePathStyle is
+// explicitly set or if the bucket is not DNS compatible.
+func hostCompatibleBucketName(u *url.URL, bucket string) bool {
+ // Bucket might be DNS compatible but dots in the hostname will fail
+ // certificate validation, so do not use host-style.
+ if u.Scheme == "https" && strings.Contains(bucket, ".") {
+ return false
+ }
+
+ // if the bucket is DNS compatible
+ return dnsCompatibleBucketName(bucket)
+}
+
+var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
+
+// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
+// Buckets created outside of the classic region MUST be DNS compatible.
+func dnsCompatibleBucketName(bucket string) bool {
+ return reDomain.MatchString(bucket) &&
+ !reIPAddress.MatchString(bucket) &&
+ !strings.Contains(bucket, "..")
+}
+
+// moveBucketToHost moves the bucket name from the URI path to URL host.
+func moveBucketToHost(u *url.URL, bucket string) {
+ u.Host = bucket + "." + u.Host
+ u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
+ if u.Path == "" {
+ u.Path = "/"
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
new file mode 100755
index 0000000..8e6f330
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
@@ -0,0 +1,8 @@
+// +build !go1.6
+
+package s3
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+func platformRequestHandlers(r *request.Request) {
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
new file mode 100755
index 0000000..14d05f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
@@ -0,0 +1,28 @@
+// +build go1.6
+
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func platformRequestHandlers(r *request.Request) {
+ if r.Operation.HTTPMethod == "PUT" {
+ // 100-Continue should only be used on put requests.
+ r.Handlers.Sign.PushBack(add100Continue)
+ }
+}
+
+func add100Continue(r *request.Request) {
+ if aws.BoolValue(r.Config.S3Disable100Continue) {
+ return
+ }
+ if r.HTTPRequest.ContentLength < 1024*1024*2 {
+ // Ignore requests smaller than 2MB. This helps prevent delaying
+ // requests unnecessarily.
+ return
+ }
+
+ r.HTTPRequest.Header.Set("Expect", "100-Continue")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
new file mode 100755
index 0000000..2646a42
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
@@ -0,0 +1,443 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package s3iface provides an interface to enable mocking the Amazon Simple Storage Service service client
+// for testing your code.
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters.
+package s3iface
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/s3"
+)
+
+// S3API provides an interface to enable mocking the
+// s3.S3 service client's API operation,
+// paginators, and waiters. This make unit testing your code that calls out
+// to the SDK's service client's calls easier.
+//
+// The best way to use this interface is so the SDK's service client's calls
+// can be stubbed out for unit testing your code with the SDK without needing
+// to inject custom request handlers into the SDK's request pipeline.
+//
+// // myFunc uses an SDK service client to make a request to
+// // Amazon Simple Storage Service.
+// func myFunc(svc s3iface.S3API) bool {
+// // Make svc.AbortMultipartUpload request
+// }
+//
+// func main() {
+// sess := session.New()
+// svc := s3.New(sess)
+//
+// myFunc(svc)
+// }
+//
+// In your _test.go file:
+//
+// // Define a mock struct to be used in your unit tests of myFunc.
+// type mockS3Client struct {
+// s3iface.S3API
+// }
+// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) {
+// // mock response/functionality
+// }
+//
+// func TestMyFunc(t *testing.T) {
+// // Setup Test
+// mockSvc := &mockS3Client{}
+//
+// myfunc(mockSvc)
+//
+// // Verify myFunc's functionality
+// }
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters. Its suggested to use the pattern above for testing, or using
+// tooling to generate mocks to satisfy the interfaces.
+type S3API interface {
+ AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error)
+ AbortMultipartUploadWithContext(aws.Context, *s3.AbortMultipartUploadInput, ...request.Option) (*s3.AbortMultipartUploadOutput, error)
+ AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput)
+
+ CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error)
+ CompleteMultipartUploadWithContext(aws.Context, *s3.CompleteMultipartUploadInput, ...request.Option) (*s3.CompleteMultipartUploadOutput, error)
+ CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput)
+
+ CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error)
+ CopyObjectWithContext(aws.Context, *s3.CopyObjectInput, ...request.Option) (*s3.CopyObjectOutput, error)
+ CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput)
+
+ CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error)
+ CreateBucketWithContext(aws.Context, *s3.CreateBucketInput, ...request.Option) (*s3.CreateBucketOutput, error)
+ CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput)
+
+ CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error)
+ CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error)
+ CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput)
+
+ DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error)
+ DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error)
+ DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput)
+
+ DeleteBucketAnalyticsConfiguration(*s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error)
+ DeleteBucketAnalyticsConfigurationWithContext(aws.Context, *s3.DeleteBucketAnalyticsConfigurationInput, ...request.Option) (*s3.DeleteBucketAnalyticsConfigurationOutput, error)
+ DeleteBucketAnalyticsConfigurationRequest(*s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput)
+
+ DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error)
+ DeleteBucketCorsWithContext(aws.Context, *s3.DeleteBucketCorsInput, ...request.Option) (*s3.DeleteBucketCorsOutput, error)
+ DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput)
+
+ DeleteBucketEncryption(*s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error)
+ DeleteBucketEncryptionWithContext(aws.Context, *s3.DeleteBucketEncryptionInput, ...request.Option) (*s3.DeleteBucketEncryptionOutput, error)
+ DeleteBucketEncryptionRequest(*s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput)
+
+ DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error)
+ DeleteBucketInventoryConfigurationWithContext(aws.Context, *s3.DeleteBucketInventoryConfigurationInput, ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error)
+ DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput)
+
+ DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error)
+ DeleteBucketLifecycleWithContext(aws.Context, *s3.DeleteBucketLifecycleInput, ...request.Option) (*s3.DeleteBucketLifecycleOutput, error)
+ DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput)
+
+ DeleteBucketMetricsConfiguration(*s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error)
+ DeleteBucketMetricsConfigurationWithContext(aws.Context, *s3.DeleteBucketMetricsConfigurationInput, ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error)
+ DeleteBucketMetricsConfigurationRequest(*s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput)
+
+ DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error)
+ DeleteBucketPolicyWithContext(aws.Context, *s3.DeleteBucketPolicyInput, ...request.Option) (*s3.DeleteBucketPolicyOutput, error)
+ DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput)
+
+ DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error)
+ DeleteBucketReplicationWithContext(aws.Context, *s3.DeleteBucketReplicationInput, ...request.Option) (*s3.DeleteBucketReplicationOutput, error)
+ DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput)
+
+ DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error)
+ DeleteBucketTaggingWithContext(aws.Context, *s3.DeleteBucketTaggingInput, ...request.Option) (*s3.DeleteBucketTaggingOutput, error)
+ DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput)
+
+ DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error)
+ DeleteBucketWebsiteWithContext(aws.Context, *s3.DeleteBucketWebsiteInput, ...request.Option) (*s3.DeleteBucketWebsiteOutput, error)
+ DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput)
+
+ DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
+ DeleteObjectWithContext(aws.Context, *s3.DeleteObjectInput, ...request.Option) (*s3.DeleteObjectOutput, error)
+ DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput)
+
+ DeleteObjectTagging(*s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error)
+ DeleteObjectTaggingWithContext(aws.Context, *s3.DeleteObjectTaggingInput, ...request.Option) (*s3.DeleteObjectTaggingOutput, error)
+ DeleteObjectTaggingRequest(*s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput)
+
+ DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
+ DeleteObjectsWithContext(aws.Context, *s3.DeleteObjectsInput, ...request.Option) (*s3.DeleteObjectsOutput, error)
+ DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput)
+
+ DeletePublicAccessBlock(*s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error)
+ DeletePublicAccessBlockWithContext(aws.Context, *s3.DeletePublicAccessBlockInput, ...request.Option) (*s3.DeletePublicAccessBlockOutput, error)
+ DeletePublicAccessBlockRequest(*s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput)
+
+ GetBucketAccelerateConfiguration(*s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error)
+ GetBucketAccelerateConfigurationWithContext(aws.Context, *s3.GetBucketAccelerateConfigurationInput, ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error)
+ GetBucketAccelerateConfigurationRequest(*s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput)
+
+ GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error)
+ GetBucketAclWithContext(aws.Context, *s3.GetBucketAclInput, ...request.Option) (*s3.GetBucketAclOutput, error)
+ GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput)
+
+ GetBucketAnalyticsConfiguration(*s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error)
+ GetBucketAnalyticsConfigurationWithContext(aws.Context, *s3.GetBucketAnalyticsConfigurationInput, ...request.Option) (*s3.GetBucketAnalyticsConfigurationOutput, error)
+ GetBucketAnalyticsConfigurationRequest(*s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput)
+
+ GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error)
+ GetBucketCorsWithContext(aws.Context, *s3.GetBucketCorsInput, ...request.Option) (*s3.GetBucketCorsOutput, error)
+ GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput)
+
+ GetBucketEncryption(*s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error)
+ GetBucketEncryptionWithContext(aws.Context, *s3.GetBucketEncryptionInput, ...request.Option) (*s3.GetBucketEncryptionOutput, error)
+ GetBucketEncryptionRequest(*s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput)
+
+ GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error)
+ GetBucketInventoryConfigurationWithContext(aws.Context, *s3.GetBucketInventoryConfigurationInput, ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error)
+ GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput)
+
+ GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error)
+ GetBucketLifecycleWithContext(aws.Context, *s3.GetBucketLifecycleInput, ...request.Option) (*s3.GetBucketLifecycleOutput, error)
+ GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput)
+
+ GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error)
+ GetBucketLifecycleConfigurationWithContext(aws.Context, *s3.GetBucketLifecycleConfigurationInput, ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error)
+ GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput)
+
+ GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error)
+ GetBucketLocationWithContext(aws.Context, *s3.GetBucketLocationInput, ...request.Option) (*s3.GetBucketLocationOutput, error)
+ GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput)
+
+ GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error)
+ GetBucketLoggingWithContext(aws.Context, *s3.GetBucketLoggingInput, ...request.Option) (*s3.GetBucketLoggingOutput, error)
+ GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput)
+
+ GetBucketMetricsConfiguration(*s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error)
+ GetBucketMetricsConfigurationWithContext(aws.Context, *s3.GetBucketMetricsConfigurationInput, ...request.Option) (*s3.GetBucketMetricsConfigurationOutput, error)
+ GetBucketMetricsConfigurationRequest(*s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput)
+
+ GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error)
+ GetBucketNotificationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfigurationDeprecated, error)
+ GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated)
+
+ GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error)
+ GetBucketNotificationConfigurationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfiguration, error)
+ GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration)
+
+ GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error)
+ GetBucketPolicyWithContext(aws.Context, *s3.GetBucketPolicyInput, ...request.Option) (*s3.GetBucketPolicyOutput, error)
+ GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput)
+
+ GetBucketPolicyStatus(*s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error)
+ GetBucketPolicyStatusWithContext(aws.Context, *s3.GetBucketPolicyStatusInput, ...request.Option) (*s3.GetBucketPolicyStatusOutput, error)
+ GetBucketPolicyStatusRequest(*s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput)
+
+ GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error)
+ GetBucketReplicationWithContext(aws.Context, *s3.GetBucketReplicationInput, ...request.Option) (*s3.GetBucketReplicationOutput, error)
+ GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput)
+
+ GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error)
+ GetBucketRequestPaymentWithContext(aws.Context, *s3.GetBucketRequestPaymentInput, ...request.Option) (*s3.GetBucketRequestPaymentOutput, error)
+ GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput)
+
+ GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error)
+ GetBucketTaggingWithContext(aws.Context, *s3.GetBucketTaggingInput, ...request.Option) (*s3.GetBucketTaggingOutput, error)
+ GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput)
+
+ GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error)
+ GetBucketVersioningWithContext(aws.Context, *s3.GetBucketVersioningInput, ...request.Option) (*s3.GetBucketVersioningOutput, error)
+ GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput)
+
+ GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error)
+ GetBucketWebsiteWithContext(aws.Context, *s3.GetBucketWebsiteInput, ...request.Option) (*s3.GetBucketWebsiteOutput, error)
+ GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput)
+
+ GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error)
+ GetObjectWithContext(aws.Context, *s3.GetObjectInput, ...request.Option) (*s3.GetObjectOutput, error)
+ GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput)
+
+ GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
+ GetObjectAclWithContext(aws.Context, *s3.GetObjectAclInput, ...request.Option) (*s3.GetObjectAclOutput, error)
+ GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput)
+
+ GetObjectLegalHold(*s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error)
+ GetObjectLegalHoldWithContext(aws.Context, *s3.GetObjectLegalHoldInput, ...request.Option) (*s3.GetObjectLegalHoldOutput, error)
+ GetObjectLegalHoldRequest(*s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput)
+
+ GetObjectLockConfiguration(*s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error)
+ GetObjectLockConfigurationWithContext(aws.Context, *s3.GetObjectLockConfigurationInput, ...request.Option) (*s3.GetObjectLockConfigurationOutput, error)
+ GetObjectLockConfigurationRequest(*s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput)
+
+ GetObjectRetention(*s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error)
+ GetObjectRetentionWithContext(aws.Context, *s3.GetObjectRetentionInput, ...request.Option) (*s3.GetObjectRetentionOutput, error)
+ GetObjectRetentionRequest(*s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput)
+
+ GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error)
+ GetObjectTaggingWithContext(aws.Context, *s3.GetObjectTaggingInput, ...request.Option) (*s3.GetObjectTaggingOutput, error)
+ GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput)
+
+ GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error)
+ GetObjectTorrentWithContext(aws.Context, *s3.GetObjectTorrentInput, ...request.Option) (*s3.GetObjectTorrentOutput, error)
+ GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput)
+
+ GetPublicAccessBlock(*s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error)
+ GetPublicAccessBlockWithContext(aws.Context, *s3.GetPublicAccessBlockInput, ...request.Option) (*s3.GetPublicAccessBlockOutput, error)
+ GetPublicAccessBlockRequest(*s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput)
+
+ HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error)
+ HeadBucketWithContext(aws.Context, *s3.HeadBucketInput, ...request.Option) (*s3.HeadBucketOutput, error)
+ HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput)
+
+ HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
+ HeadObjectWithContext(aws.Context, *s3.HeadObjectInput, ...request.Option) (*s3.HeadObjectOutput, error)
+ HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput)
+
+ ListBucketAnalyticsConfigurations(*s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error)
+ ListBucketAnalyticsConfigurationsWithContext(aws.Context, *s3.ListBucketAnalyticsConfigurationsInput, ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error)
+ ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput)
+
+ ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error)
+ ListBucketInventoryConfigurationsWithContext(aws.Context, *s3.ListBucketInventoryConfigurationsInput, ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error)
+ ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput)
+
+ ListBucketMetricsConfigurations(*s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error)
+ ListBucketMetricsConfigurationsWithContext(aws.Context, *s3.ListBucketMetricsConfigurationsInput, ...request.Option) (*s3.ListBucketMetricsConfigurationsOutput, error)
+ ListBucketMetricsConfigurationsRequest(*s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput)
+
+ ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error)
+ ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error)
+ ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput)
+
+ ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
+ ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error)
+ ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput)
+
+ ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error
+ ListMultipartUploadsPagesWithContext(aws.Context, *s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool, ...request.Option) error
+
+ ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error)
+ ListObjectVersionsWithContext(aws.Context, *s3.ListObjectVersionsInput, ...request.Option) (*s3.ListObjectVersionsOutput, error)
+ ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput)
+
+ ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error
+ ListObjectVersionsPagesWithContext(aws.Context, *s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool, ...request.Option) error
+
+ ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error)
+ ListObjectsWithContext(aws.Context, *s3.ListObjectsInput, ...request.Option) (*s3.ListObjectsOutput, error)
+ ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput)
+
+ ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error
+ ListObjectsPagesWithContext(aws.Context, *s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool, ...request.Option) error
+
+ ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
+ ListObjectsV2WithContext(aws.Context, *s3.ListObjectsV2Input, ...request.Option) (*s3.ListObjectsV2Output, error)
+ ListObjectsV2Request(*s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output)
+
+ ListObjectsV2Pages(*s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool) error
+ ListObjectsV2PagesWithContext(aws.Context, *s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool, ...request.Option) error
+
+ ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error)
+ ListPartsWithContext(aws.Context, *s3.ListPartsInput, ...request.Option) (*s3.ListPartsOutput, error)
+ ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput)
+
+ ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error
+ ListPartsPagesWithContext(aws.Context, *s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool, ...request.Option) error
+
+ PutBucketAccelerateConfiguration(*s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error)
+ PutBucketAccelerateConfigurationWithContext(aws.Context, *s3.PutBucketAccelerateConfigurationInput, ...request.Option) (*s3.PutBucketAccelerateConfigurationOutput, error)
+ PutBucketAccelerateConfigurationRequest(*s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput)
+
+ PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error)
+ PutBucketAclWithContext(aws.Context, *s3.PutBucketAclInput, ...request.Option) (*s3.PutBucketAclOutput, error)
+ PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput)
+
+ PutBucketAnalyticsConfiguration(*s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error)
+ PutBucketAnalyticsConfigurationWithContext(aws.Context, *s3.PutBucketAnalyticsConfigurationInput, ...request.Option) (*s3.PutBucketAnalyticsConfigurationOutput, error)
+ PutBucketAnalyticsConfigurationRequest(*s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput)
+
+ PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error)
+ PutBucketCorsWithContext(aws.Context, *s3.PutBucketCorsInput, ...request.Option) (*s3.PutBucketCorsOutput, error)
+ PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput)
+
+ PutBucketEncryption(*s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error)
+ PutBucketEncryptionWithContext(aws.Context, *s3.PutBucketEncryptionInput, ...request.Option) (*s3.PutBucketEncryptionOutput, error)
+ PutBucketEncryptionRequest(*s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput)
+
+ PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error)
+ PutBucketInventoryConfigurationWithContext(aws.Context, *s3.PutBucketInventoryConfigurationInput, ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error)
+ PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput)
+
+ PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error)
+ PutBucketLifecycleWithContext(aws.Context, *s3.PutBucketLifecycleInput, ...request.Option) (*s3.PutBucketLifecycleOutput, error)
+ PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput)
+
+ PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error)
+ PutBucketLifecycleConfigurationWithContext(aws.Context, *s3.PutBucketLifecycleConfigurationInput, ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error)
+ PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput)
+
+ PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error)
+ PutBucketLoggingWithContext(aws.Context, *s3.PutBucketLoggingInput, ...request.Option) (*s3.PutBucketLoggingOutput, error)
+ PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput)
+
+ PutBucketMetricsConfiguration(*s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error)
+ PutBucketMetricsConfigurationWithContext(aws.Context, *s3.PutBucketMetricsConfigurationInput, ...request.Option) (*s3.PutBucketMetricsConfigurationOutput, error)
+ PutBucketMetricsConfigurationRequest(*s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput)
+
+ PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error)
+ PutBucketNotificationWithContext(aws.Context, *s3.PutBucketNotificationInput, ...request.Option) (*s3.PutBucketNotificationOutput, error)
+ PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput)
+
+ PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error)
+ PutBucketNotificationConfigurationWithContext(aws.Context, *s3.PutBucketNotificationConfigurationInput, ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error)
+ PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput)
+
+ PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error)
+ PutBucketPolicyWithContext(aws.Context, *s3.PutBucketPolicyInput, ...request.Option) (*s3.PutBucketPolicyOutput, error)
+ PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput)
+
+ PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error)
+ PutBucketReplicationWithContext(aws.Context, *s3.PutBucketReplicationInput, ...request.Option) (*s3.PutBucketReplicationOutput, error)
+ PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput)
+
+ PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error)
+ PutBucketRequestPaymentWithContext(aws.Context, *s3.PutBucketRequestPaymentInput, ...request.Option) (*s3.PutBucketRequestPaymentOutput, error)
+ PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput)
+
+ PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error)
+ PutBucketTaggingWithContext(aws.Context, *s3.PutBucketTaggingInput, ...request.Option) (*s3.PutBucketTaggingOutput, error)
+ PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput)
+
+ PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error)
+ PutBucketVersioningWithContext(aws.Context, *s3.PutBucketVersioningInput, ...request.Option) (*s3.PutBucketVersioningOutput, error)
+ PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput)
+
+ PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error)
+ PutBucketWebsiteWithContext(aws.Context, *s3.PutBucketWebsiteInput, ...request.Option) (*s3.PutBucketWebsiteOutput, error)
+ PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput)
+
+ PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error)
+ PutObjectWithContext(aws.Context, *s3.PutObjectInput, ...request.Option) (*s3.PutObjectOutput, error)
+ PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput)
+
+ PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error)
+ PutObjectAclWithContext(aws.Context, *s3.PutObjectAclInput, ...request.Option) (*s3.PutObjectAclOutput, error)
+ PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput)
+
+ PutObjectLegalHold(*s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error)
+ PutObjectLegalHoldWithContext(aws.Context, *s3.PutObjectLegalHoldInput, ...request.Option) (*s3.PutObjectLegalHoldOutput, error)
+ PutObjectLegalHoldRequest(*s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput)
+
+ PutObjectLockConfiguration(*s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error)
+ PutObjectLockConfigurationWithContext(aws.Context, *s3.PutObjectLockConfigurationInput, ...request.Option) (*s3.PutObjectLockConfigurationOutput, error)
+ PutObjectLockConfigurationRequest(*s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput)
+
+ PutObjectRetention(*s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error)
+ PutObjectRetentionWithContext(aws.Context, *s3.PutObjectRetentionInput, ...request.Option) (*s3.PutObjectRetentionOutput, error)
+ PutObjectRetentionRequest(*s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput)
+
+ PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error)
+ PutObjectTaggingWithContext(aws.Context, *s3.PutObjectTaggingInput, ...request.Option) (*s3.PutObjectTaggingOutput, error)
+ PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput)
+
+ PutPublicAccessBlock(*s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error)
+ PutPublicAccessBlockWithContext(aws.Context, *s3.PutPublicAccessBlockInput, ...request.Option) (*s3.PutPublicAccessBlockOutput, error)
+ PutPublicAccessBlockRequest(*s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput)
+
+ RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error)
+ RestoreObjectWithContext(aws.Context, *s3.RestoreObjectInput, ...request.Option) (*s3.RestoreObjectOutput, error)
+ RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput)
+
+ SelectObjectContent(*s3.SelectObjectContentInput) (*s3.SelectObjectContentOutput, error)
+ SelectObjectContentWithContext(aws.Context, *s3.SelectObjectContentInput, ...request.Option) (*s3.SelectObjectContentOutput, error)
+ SelectObjectContentRequest(*s3.SelectObjectContentInput) (*request.Request, *s3.SelectObjectContentOutput)
+
+ UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error)
+ UploadPartWithContext(aws.Context, *s3.UploadPartInput, ...request.Option) (*s3.UploadPartOutput, error)
+ UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput)
+
+ UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
+ UploadPartCopyWithContext(aws.Context, *s3.UploadPartCopyInput, ...request.Option) (*s3.UploadPartCopyOutput, error)
+ UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput)
+
+ WaitUntilBucketExists(*s3.HeadBucketInput) error
+ WaitUntilBucketExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error
+
+ WaitUntilBucketNotExists(*s3.HeadBucketInput) error
+ WaitUntilBucketNotExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error
+
+ WaitUntilObjectExists(*s3.HeadObjectInput) error
+ WaitUntilObjectExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error
+
+ WaitUntilObjectNotExists(*s3.HeadObjectInput) error
+ WaitUntilObjectNotExistsWithContext(aws.Context, *s3.HeadObjectInput, ...request.WaiterOption) error
+}
+
+var _ S3API = (*s3.S3)(nil)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
new file mode 100755
index 0000000..1821557
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
@@ -0,0 +1,529 @@
+package s3manager
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
+)
+
+const (
+ // DefaultBatchSize is the batch size we initialize when constructing a batch delete client.
+ // This value is used when calling DeleteObjects. This represents how many objects to delete
+ // per DeleteObjects call.
+ DefaultBatchSize = 100
+)
+
+// BatchError will contain the key and bucket of the object that failed to
+// either upload or download.
+type BatchError struct {
+ Errors Errors
+ code string
+ message string
+}
+
+// Errors is a typed alias for a slice of errors to satisfy the error
+// interface.
+type Errors []Error
+
+func (errs Errors) Error() string {
+ buf := bytes.NewBuffer(nil)
+ for i, err := range errs {
+ buf.WriteString(err.Error())
+ if i+1 < len(errs) {
+ buf.WriteString("\n")
+ }
+ }
+ return buf.String()
+}
+
+// Error will contain the original error, bucket, and key of the operation that failed
+// during batch operations.
+type Error struct {
+ OrigErr error
+ Bucket *string
+ Key *string
+}
+
+func newError(err error, bucket, key *string) Error {
+ return Error{
+ err,
+ bucket,
+ key,
+ }
+}
+
+func (err *Error) Error() string {
+ origErr := ""
+ if err.OrigErr != nil {
+ origErr = ":\n" + err.OrigErr.Error()
+ }
+ return fmt.Sprintf("failed to perform batch operation on %q to %q%s",
+ aws.StringValue(err.Key),
+ aws.StringValue(err.Bucket),
+ origErr,
+ )
+}
+
+// NewBatchError will return a BatchError that satisfies the awserr.Error interface.
+func NewBatchError(code, message string, err []Error) awserr.Error {
+ return &BatchError{
+ Errors: err,
+ code: code,
+ message: message,
+ }
+}
+
+// Code will return the code associated with the batch error.
+func (err *BatchError) Code() string {
+ return err.code
+}
+
+// Message will return the message associated with the batch error.
+func (err *BatchError) Message() string {
+ return err.message
+}
+
+func (err *BatchError) Error() string {
+ return awserr.SprintError(err.Code(), err.Message(), "", err.Errors)
+}
+
+// OrigErr will return the original error. Which, in this case, will always be nil
+// for batched operations.
+func (err *BatchError) OrigErr() error {
+ return err.Errors
+}
+
+// BatchDeleteIterator is an interface that uses the scanner pattern to
+// iterate through what needs to be deleted.
+type BatchDeleteIterator interface {
+ Next() bool
+ Err() error
+ DeleteObject() BatchDeleteObject
+}
+
+// DeleteListIterator is an alternative iterator for the BatchDelete client. This will
+// iterate through a list of objects and delete the objects.
+//
+// Example:
+// iter := &s3manager.DeleteListIterator{
+// Client: svc,
+// Input: &s3.ListObjectsInput{
+// Bucket: aws.String("bucket"),
+// MaxKeys: aws.Int64(5),
+// },
+// Paginator: request.Pagination{
+// NewRequest: func() (*request.Request, error) {
+// var inCpy *ListObjectsInput
+// if input != nil {
+// tmp := *input
+// inCpy = &tmp
+// }
+// req, _ := c.ListObjectsRequest(inCpy)
+// return req, nil
+// },
+// },
+// }
+//
+// batcher := s3manager.NewBatchDeleteWithClient(svc)
+// if err := batcher.Delete(aws.BackgroundContext(), iter); err != nil {
+// return err
+// }
+type DeleteListIterator struct {
+ Bucket *string
+ Paginator request.Pagination
+ objects []*s3.Object
+}
+
+// NewDeleteListIterator will return a new DeleteListIterator.
+func NewDeleteListIterator(svc s3iface.S3API, input *s3.ListObjectsInput, opts ...func(*DeleteListIterator)) BatchDeleteIterator {
+ iter := &DeleteListIterator{
+ Bucket: input.Bucket,
+ Paginator: request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *s3.ListObjectsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := svc.ListObjectsRequest(inCpy)
+ return req, nil
+ },
+ },
+ }
+
+ for _, opt := range opts {
+ opt(iter)
+ }
+ return iter
+}
+
+// Next will use the S3API client to iterate through a list of objects.
+func (iter *DeleteListIterator) Next() bool {
+ if len(iter.objects) > 0 {
+ iter.objects = iter.objects[1:]
+ }
+
+ if len(iter.objects) == 0 && iter.Paginator.Next() {
+ iter.objects = iter.Paginator.Page().(*s3.ListObjectsOutput).Contents
+ }
+
+ return len(iter.objects) > 0
+}
+
+// Err will return the last known error from Next.
+func (iter *DeleteListIterator) Err() error {
+ return iter.Paginator.Err()
+}
+
+// DeleteObject will return the current object to be deleted.
+func (iter *DeleteListIterator) DeleteObject() BatchDeleteObject {
+ return BatchDeleteObject{
+ Object: &s3.DeleteObjectInput{
+ Bucket: iter.Bucket,
+ Key: iter.objects[0].Key,
+ },
+ }
+}
+
+// BatchDelete will use the s3 package's service client to perform a batch
+// delete.
+type BatchDelete struct {
+ Client s3iface.S3API
+ BatchSize int
+}
+
+// NewBatchDeleteWithClient will return a new delete client that can delete a batched amount of
+// objects.
+//
+// Example:
+// batcher := s3manager.NewBatchDeleteWithClient(client, size)
+//
+// objects := []BatchDeleteObject{
+// {
+// Object: &s3.DeleteObjectInput {
+// Key: aws.String("key"),
+// Bucket: aws.String("bucket"),
+// },
+// },
+// }
+//
+// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{
+// Objects: objects,
+// }); err != nil {
+// return err
+// }
+func NewBatchDeleteWithClient(client s3iface.S3API, options ...func(*BatchDelete)) *BatchDelete {
+ svc := &BatchDelete{
+ Client: client,
+ BatchSize: DefaultBatchSize,
+ }
+
+ for _, opt := range options {
+ opt(svc)
+ }
+
+ return svc
+}
+
+// NewBatchDelete will return a new delete client that can delete a batched amount of
+// objects.
+//
+// Example:
+// batcher := s3manager.NewBatchDelete(sess, size)
+//
+// objects := []BatchDeleteObject{
+// {
+// Object: &s3.DeleteObjectInput {
+// Key: aws.String("key"),
+// Bucket: aws.String("bucket"),
+// },
+// },
+// }
+//
+// if err := batcher.Delete(aws.BackgroundContext(), &s3manager.DeleteObjectsIterator{
+// Objects: objects,
+// }); err != nil {
+// return err
+// }
+func NewBatchDelete(c client.ConfigProvider, options ...func(*BatchDelete)) *BatchDelete {
+ client := s3.New(c)
+ return NewBatchDeleteWithClient(client, options...)
+}
+
+// BatchDeleteObject is a wrapper object for calling the batch delete operation.
+type BatchDeleteObject struct {
+ Object *s3.DeleteObjectInput
+ // After will run after each iteration during the batch process. This function will
+ // be executed whether or not the request was successful.
+ After func() error
+}
+
+// DeleteObjectsIterator is an interface that uses the scanner pattern to iterate
+// through a series of objects to be deleted.
+type DeleteObjectsIterator struct {
+ Objects []BatchDeleteObject
+ index int
+ inc bool
+}
+
+// Next will increment the default iterator's index and and ensure that there
+// is another object to iterator to.
+func (iter *DeleteObjectsIterator) Next() bool {
+ if iter.inc {
+ iter.index++
+ } else {
+ iter.inc = true
+ }
+ return iter.index < len(iter.Objects)
+}
+
+// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface
+// this will only return nil.
+func (iter *DeleteObjectsIterator) Err() error {
+ return nil
+}
+
+// DeleteObject will return the BatchDeleteObject at the current batched index.
+func (iter *DeleteObjectsIterator) DeleteObject() BatchDeleteObject {
+ object := iter.Objects[iter.index]
+ return object
+}
+
+// Delete will use the iterator to queue up objects that need to be deleted.
+// Once the batch size is met, this will call the deleteBatch function.
+func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error {
+ var errs []Error
+ objects := []BatchDeleteObject{}
+ var input *s3.DeleteObjectsInput
+
+ for iter.Next() {
+ o := iter.DeleteObject()
+
+ if input == nil {
+ input = initDeleteObjectsInput(o.Object)
+ }
+
+ parity := hasParity(input, o)
+ if parity {
+ input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{
+ Key: o.Object.Key,
+ VersionId: o.Object.VersionId,
+ })
+ objects = append(objects, o)
+ }
+
+ if len(input.Delete.Objects) == d.BatchSize || !parity {
+ if err := deleteBatch(ctx, d, input, objects); err != nil {
+ errs = append(errs, err...)
+ }
+
+ objects = objects[:0]
+ input = nil
+
+ if !parity {
+ objects = append(objects, o)
+ input = initDeleteObjectsInput(o.Object)
+ input.Delete.Objects = append(input.Delete.Objects, &s3.ObjectIdentifier{
+ Key: o.Object.Key,
+ VersionId: o.Object.VersionId,
+ })
+ }
+ }
+ }
+
+ // iter.Next() could return false (above) plus populate iter.Err()
+ if iter.Err() != nil {
+ errs = append(errs, newError(iter.Err(), nil, nil))
+ }
+
+ if input != nil && len(input.Delete.Objects) > 0 {
+ if err := deleteBatch(ctx, d, input, objects); err != nil {
+ errs = append(errs, err...)
+ }
+ }
+
+ if len(errs) > 0 {
+ return NewBatchError("BatchedDeleteIncomplete", "some objects have failed to be deleted.", errs)
+ }
+ return nil
+}
+
+func initDeleteObjectsInput(o *s3.DeleteObjectInput) *s3.DeleteObjectsInput {
+ return &s3.DeleteObjectsInput{
+ Bucket: o.Bucket,
+ MFA: o.MFA,
+ RequestPayer: o.RequestPayer,
+ Delete: &s3.Delete{},
+ }
+}
+
+const (
+ // ErrDeleteBatchFailCode represents an error code which will be returned
+ // only when DeleteObjects.Errors has an error that does not contain a code.
+ ErrDeleteBatchFailCode = "DeleteBatchError"
+ errDefaultDeleteBatchMessage = "failed to delete"
+)
+
+// deleteBatch will delete a batch of items in the objects parameters.
+func deleteBatch(ctx aws.Context, d *BatchDelete, input *s3.DeleteObjectsInput, objects []BatchDeleteObject) []Error {
+ errs := []Error{}
+
+ if result, err := d.Client.DeleteObjectsWithContext(ctx, input); err != nil {
+ for i := 0; i < len(input.Delete.Objects); i++ {
+ errs = append(errs, newError(err, input.Bucket, input.Delete.Objects[i].Key))
+ }
+ } else if len(result.Errors) > 0 {
+ for i := 0; i < len(result.Errors); i++ {
+ code := ErrDeleteBatchFailCode
+ msg := errDefaultDeleteBatchMessage
+ if result.Errors[i].Message != nil {
+ msg = *result.Errors[i].Message
+ }
+ if result.Errors[i].Code != nil {
+ code = *result.Errors[i].Code
+ }
+
+ errs = append(errs, newError(awserr.New(code, msg, err), input.Bucket, result.Errors[i].Key))
+ }
+ }
+ for _, object := range objects {
+ if object.After == nil {
+ continue
+ }
+ if err := object.After(); err != nil {
+ errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
+ }
+ }
+
+ return errs
+}
+
+func hasParity(o1 *s3.DeleteObjectsInput, o2 BatchDeleteObject) bool {
+ if o1.Bucket != nil && o2.Object.Bucket != nil {
+ if *o1.Bucket != *o2.Object.Bucket {
+ return false
+ }
+ } else if o1.Bucket != o2.Object.Bucket {
+ return false
+ }
+
+ if o1.MFA != nil && o2.Object.MFA != nil {
+ if *o1.MFA != *o2.Object.MFA {
+ return false
+ }
+ } else if o1.MFA != o2.Object.MFA {
+ return false
+ }
+
+ if o1.RequestPayer != nil && o2.Object.RequestPayer != nil {
+ if *o1.RequestPayer != *o2.Object.RequestPayer {
+ return false
+ }
+ } else if o1.RequestPayer != o2.Object.RequestPayer {
+ return false
+ }
+
+ return true
+}
+
+// BatchDownloadIterator is an interface that uses the scanner pattern to iterate
+// through a series of objects to be downloaded.
+type BatchDownloadIterator interface {
+ Next() bool
+ Err() error
+ DownloadObject() BatchDownloadObject
+}
+
+// BatchDownloadObject contains all necessary information to run a batch operation once.
+type BatchDownloadObject struct {
+ Object *s3.GetObjectInput
+ Writer io.WriterAt
+ // After will run after each iteration during the batch process. This function will
+ // be executed whether or not the request was successful.
+ After func() error
+}
+
+// DownloadObjectsIterator implements the BatchDownloadIterator interface and allows for batched
+// download of objects.
+type DownloadObjectsIterator struct {
+ Objects []BatchDownloadObject
+ index int
+ inc bool
+}
+
+// Next will increment the default iterator's index and and ensure that there
+// is another object to iterator to.
+func (batcher *DownloadObjectsIterator) Next() bool {
+ if batcher.inc {
+ batcher.index++
+ } else {
+ batcher.inc = true
+ }
+ return batcher.index < len(batcher.Objects)
+}
+
+// DownloadObject will return the BatchDownloadObject at the current batched index.
+func (batcher *DownloadObjectsIterator) DownloadObject() BatchDownloadObject {
+ object := batcher.Objects[batcher.index]
+ return object
+}
+
+// Err will return an error. Since this is just used to satisfy the BatchDeleteIterator interface
+// this will only return nil.
+func (batcher *DownloadObjectsIterator) Err() error {
+ return nil
+}
+
+// BatchUploadIterator is an interface that uses the scanner pattern to
+// iterate through what needs to be uploaded.
+type BatchUploadIterator interface {
+ Next() bool
+ Err() error
+ UploadObject() BatchUploadObject
+}
+
+// UploadObjectsIterator implements the BatchUploadIterator interface and allows for batched
+// upload of objects.
+type UploadObjectsIterator struct {
+ Objects []BatchUploadObject
+ index int
+ inc bool
+}
+
+// Next will increment the default iterator's index and and ensure that there
+// is another object to iterator to.
+func (batcher *UploadObjectsIterator) Next() bool {
+ if batcher.inc {
+ batcher.index++
+ } else {
+ batcher.inc = true
+ }
+ return batcher.index < len(batcher.Objects)
+}
+
+// Err will return an error. Since this is just used to satisfy the BatchUploadIterator interface
+// this will only return nil.
+func (batcher *UploadObjectsIterator) Err() error {
+ return nil
+}
+
+// UploadObject will return the BatchUploadObject at the current batched index.
+func (batcher *UploadObjectsIterator) UploadObject() BatchUploadObject {
+ object := batcher.Objects[batcher.index]
+ return object
+}
+
+// BatchUploadObject contains all necessary information to run a batch operation once.
+type BatchUploadObject struct {
+ Object *UploadInput
+ // After will run after each iteration during the batch process. This function will
+ // be executed whether or not the request was successful.
+ After func() error
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go
new file mode 100755
index 0000000..f61665a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go
@@ -0,0 +1,88 @@
+package s3manager
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
+)
+
+// GetBucketRegion will attempt to get the region for a bucket using the
+// regionHint to determine which AWS partition to perform the query on.
+//
+// The request will not be signed, and will not use your AWS credentials.
+//
+// A "NotFound" error code will be returned if the bucket does not exist in the
+// AWS partition the regionHint belongs to. If the regionHint parameter is an
+// empty string GetBucketRegion will fallback to the ConfigProvider's region
+// config. If the regionHint is empty, and the ConfigProvider does not have a
+// region value, an error will be returned..
+//
+// For example to get the region of a bucket which exists in "eu-central-1"
+// you could provide a region hint of "us-west-2".
+//
+// sess := session.Must(session.NewSession())
+//
+// bucket := "my-bucket"
+// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
+// if err != nil {
+// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
+// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
+// }
+// return err
+// }
+// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
+//
+func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) {
+ var cfg aws.Config
+ if len(regionHint) != 0 {
+ cfg.Region = aws.String(regionHint)
+ }
+ svc := s3.New(c, &cfg)
+ return GetBucketRegionWithClient(ctx, svc, bucket, opts...)
+}
+
+const bucketRegionHeader = "X-Amz-Bucket-Region"
+
+// GetBucketRegionWithClient is the same as GetBucketRegion with the exception
+// that it takes a S3 service client instead of a Session. The regionHint is
+// derived from the region the S3 service client was created in.
+//
+// See GetBucketRegion for more information.
+func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) {
+ req, _ := svc.HeadBucketRequest(&s3.HeadBucketInput{
+ Bucket: aws.String(bucket),
+ })
+ req.Config.S3ForcePathStyle = aws.Bool(true)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ req.SetContext(ctx)
+
+ // Disable HTTP redirects to prevent an invalid 301 from eating the response
+ // because Go's HTTP client will fail, and drop the response if an 301 is
+ // received without a location header. S3 will return a 301 without the
+ // location header for HeadObject API calls.
+ req.DisableFollowRedirects = true
+
+ var bucketRegion string
+ req.Handlers.Send.PushBack(func(r *request.Request) {
+ bucketRegion = r.HTTPResponse.Header.Get(bucketRegionHeader)
+ if len(bucketRegion) == 0 {
+ return
+ }
+ r.HTTPResponse.StatusCode = 200
+ r.HTTPResponse.Status = "OK"
+ r.Error = nil
+ })
+
+ req.ApplyOptions(opts...)
+
+ if err := req.Send(); err != nil {
+ return "", err
+ }
+
+ bucketRegion = s3.NormalizeBucketLocation(bucketRegion)
+
+ return bucketRegion, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go
new file mode 100755
index 0000000..229c0d6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go
@@ -0,0 +1,3 @@
+// Package s3manager provides utilities to upload and download objects from
+// S3 concurrently. Helpful for when working with large objects.
+package s3manager
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
new file mode 100755
index 0000000..4a9ad65
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
@@ -0,0 +1,555 @@
+package s3manager
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
+)
+
+// DefaultDownloadPartSize is the default range of bytes to get at a time when
+// using Download().
+const DefaultDownloadPartSize = 1024 * 1024 * 5
+
+// DefaultDownloadConcurrency is the default number of goroutines to spin up
+// when using Download().
+const DefaultDownloadConcurrency = 5
+
+// The Downloader structure that calls Download(). It is safe to call Download()
+// on this structure for multiple objects and across concurrent goroutines.
+// Mutating the Downloader's properties is not safe to be done concurrently.
+type Downloader struct {
+ // The buffer size (in bytes) to use when buffering data into chunks and
+ // sending them as parts to S3. The minimum allowed part size is 5MB, and
+ // if this value is set to zero, the DefaultDownloadPartSize value will be used.
+ //
+ // PartSize is ignored if the Range input parameter is provided.
+ PartSize int64
+
+ // The number of goroutines to spin up in parallel when sending parts.
+ // If this is set to zero, the DefaultDownloadConcurrency value will be used.
+ //
+ // Concurrency of 1 will download the parts sequentially.
+ //
+ // Concurrency is ignored if the Range input parameter is provided.
+ Concurrency int
+
+ // An S3 client to use when performing downloads.
+ S3 s3iface.S3API
+
+ // List of request options that will be passed down to individual API
+ // operation requests made by the downloader.
+ RequestOptions []request.Option
+}
+
+// WithDownloaderRequestOptions appends to the Downloader's API request options.
+func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) {
+ return func(d *Downloader) {
+ d.RequestOptions = append(d.RequestOptions, opts...)
+ }
+}
+
+// NewDownloader creates a new Downloader instance to downloads objects from
+// S3 in concurrent chunks. Pass in additional functional options to customize
+// the downloader behavior. Requires a client.ConfigProvider in order to create
+// a S3 service client. The session.Session satisfies the client.ConfigProvider
+// interface.
+//
+// Example:
+// // The session the S3 Downloader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create a downloader with the session and default options
+// downloader := s3manager.NewDownloader(sess)
+//
+// // Create a downloader with the session and custom options
+// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) {
+// d.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
+ d := &Downloader{
+ S3: s3.New(c),
+ PartSize: DefaultDownloadPartSize,
+ Concurrency: DefaultDownloadConcurrency,
+ }
+ for _, option := range options {
+ option(d)
+ }
+
+ return d
+}
+
+// NewDownloaderWithClient creates a new Downloader instance to downloads
+// objects from S3 in concurrent chunks. Pass in additional functional
+// options to customize the downloader behavior. Requires a S3 service client
+// to make S3 API calls.
+//
+// Example:
+// // The session the S3 Downloader will use
+// sess := session.Must(session.NewSession())
+//
+// // The S3 client the S3 Downloader will use
+// s3Svc := s3.new(sess)
+//
+// // Create a downloader with the s3 client and default options
+// downloader := s3manager.NewDownloaderWithClient(s3Svc)
+//
+// // Create a downloader with the s3 client and custom options
+// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) {
+// d.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
+ d := &Downloader{
+ S3: svc,
+ PartSize: DefaultDownloadPartSize,
+ Concurrency: DefaultDownloadConcurrency,
+ }
+ for _, option := range options {
+ option(d)
+ }
+
+ return d
+}
+
+type maxRetrier interface {
+ MaxRetries() int
+}
+
+// Download downloads an object in S3 and writes the payload into w using
+// concurrent GET requests.
+//
+// Additional functional options can be provided to configure the individual
+// download. These options are copies of the Downloader instance Download is called from.
+// Modifying the options will not impact the original Downloader instance.
+//
+// It is safe to call this method concurrently across goroutines.
+//
+// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
+// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
+//
+// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
+// download the parts from S3 sequentially.
+//
+// If the GetObjectInput's Range value is provided that will cause the downloader
+// to perform a single GetObjectInput request for that object's range. This will
+// caused the part size, and concurrency configurations to be ignored.
+func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
+ return d.DownloadWithContext(aws.BackgroundContext(), w, input, options...)
+}
+
+// DownloadWithContext downloads an object in S3 and writes the payload into w
+// using concurrent GET requests.
+//
+// DownloadWithContext is the same as Download with the additional support for
+// Context input parameters. The Context must not be nil. A nil Context will
+// cause a panic. Use the Context to add deadlining, timeouts, etc. The
+// DownloadWithContext may create sub-contexts for individual underlying
+// requests.
+//
+// Additional functional options can be provided to configure the individual
+// download. These options are copies of the Downloader instance Download is
+// called from. Modifying the options will not impact the original Downloader
+// instance. Use the WithDownloaderRequestOptions helper function to pass in request
+// options that will be applied to all API operations made with this downloader.
+//
+// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
+// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
+//
+// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
+// download the parts from S3 sequentially.
+//
+// It is safe to call this method concurrently across goroutines.
+//
+// If the GetObjectInput's Range value is provided that will cause the downloader
+// to perform a single GetObjectInput request for that object's range. This will
+// caused the part size, and concurrency configurations to be ignored.
+func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
+ impl := downloader{w: w, in: input, cfg: d, ctx: ctx}
+
+ for _, option := range options {
+ option(&impl.cfg)
+ }
+ impl.cfg.RequestOptions = append(impl.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
+
+ if s, ok := d.S3.(maxRetrier); ok {
+ impl.partBodyMaxRetries = s.MaxRetries()
+ }
+
+ impl.totalBytes = -1
+ if impl.cfg.Concurrency == 0 {
+ impl.cfg.Concurrency = DefaultDownloadConcurrency
+ }
+
+ if impl.cfg.PartSize == 0 {
+ impl.cfg.PartSize = DefaultDownloadPartSize
+ }
+
+ return impl.download()
+}
+
+// DownloadWithIterator will download a batched amount of objects in S3 and writes them
+// to the io.WriterAt specificed in the iterator.
+//
+// Example:
+// svc := s3manager.NewDownloader(session)
+//
+// fooFile, err := os.Open("/tmp/foo.file")
+// if err != nil {
+// return err
+// }
+//
+// barFile, err := os.Open("/tmp/bar.file")
+// if err != nil {
+// return err
+// }
+//
+// objects := []s3manager.BatchDownloadObject {
+// {
+// Object: &s3.GetObjectInput {
+// Bucket: aws.String("bucket"),
+// Key: aws.String("foo"),
+// },
+// Writer: fooFile,
+// },
+// {
+// Object: &s3.GetObjectInput {
+// Bucket: aws.String("bucket"),
+// Key: aws.String("bar"),
+// },
+// Writer: barFile,
+// },
+// }
+//
+// iter := &s3manager.DownloadObjectsIterator{Objects: objects}
+// if err := svc.DownloadWithIterator(aws.BackgroundContext(), iter); err != nil {
+// return err
+// }
+func (d Downloader) DownloadWithIterator(ctx aws.Context, iter BatchDownloadIterator, opts ...func(*Downloader)) error {
+ var errs []Error
+ for iter.Next() {
+ object := iter.DownloadObject()
+ if _, err := d.DownloadWithContext(ctx, object.Writer, object.Object, opts...); err != nil {
+ errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
+ }
+
+ if object.After == nil {
+ continue
+ }
+
+ if err := object.After(); err != nil {
+ errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
+ }
+ }
+
+ if len(errs) > 0 {
+ return NewBatchError("BatchedDownloadIncomplete", "some objects have failed to download.", errs)
+ }
+ return nil
+}
+
+// downloader is the implementation structure used internally by Downloader.
+type downloader struct {
+ ctx aws.Context
+ cfg Downloader
+
+ in *s3.GetObjectInput
+ w io.WriterAt
+
+ wg sync.WaitGroup
+ m sync.Mutex
+
+ pos int64
+ totalBytes int64
+ written int64
+ err error
+
+ partBodyMaxRetries int
+}
+
+// download performs the implementation of the object download across ranged
+// GETs.
+func (d *downloader) download() (n int64, err error) {
+ // If range is specified fall back to single download of that range
+ // this enables the functionality of ranged gets with the downloader but
+ // at the cost of no multipart downloads.
+ if rng := aws.StringValue(d.in.Range); len(rng) > 0 {
+ d.downloadRange(rng)
+ return d.written, d.err
+ }
+
+ // Spin off first worker to check additional header information
+ d.getChunk()
+
+ if total := d.getTotalBytes(); total >= 0 {
+ // Spin up workers
+ ch := make(chan dlchunk, d.cfg.Concurrency)
+
+ for i := 0; i < d.cfg.Concurrency; i++ {
+ d.wg.Add(1)
+ go d.downloadPart(ch)
+ }
+
+ // Assign work
+ for d.getErr() == nil {
+ if d.pos >= total {
+ break // We're finished queuing chunks
+ }
+
+ // Queue the next range of bytes to read.
+ ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
+ d.pos += d.cfg.PartSize
+ }
+
+ // Wait for completion
+ close(ch)
+ d.wg.Wait()
+ } else {
+ // Checking if we read anything new
+ for d.err == nil {
+ d.getChunk()
+ }
+
+ // We expect a 416 error letting us know we are done downloading the
+ // total bytes. Since we do not know the content's length, this will
+ // keep grabbing chunks of data until the range of bytes specified in
+ // the request is out of range of the content. Once, this happens, a
+ // 416 should occur.
+ e, ok := d.err.(awserr.RequestFailure)
+ if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable {
+ d.err = nil
+ }
+ }
+
+ // Return error
+ return d.written, d.err
+}
+
+// downloadPart is an individual goroutine worker reading from the ch channel
+// and performing a GetObject request on the data with a given byte range.
+//
+// If this is the first worker, this operation also resolves the total number
+// of bytes to be read so that the worker manager knows when it is finished.
+func (d *downloader) downloadPart(ch chan dlchunk) {
+ defer d.wg.Done()
+ for {
+ chunk, ok := <-ch
+ if !ok {
+ break
+ }
+ if d.getErr() != nil {
+ // Drain the channel if there is an error, to prevent deadlocking
+ // of download producer.
+ continue
+ }
+
+ if err := d.downloadChunk(chunk); err != nil {
+ d.setErr(err)
+ }
+ }
+}
+
+// getChunk grabs a chunk of data from the body.
+// Not thread safe. Should only used when grabbing data on a single thread.
+func (d *downloader) getChunk() {
+ if d.getErr() != nil {
+ return
+ }
+
+ chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
+ d.pos += d.cfg.PartSize
+
+ if err := d.downloadChunk(chunk); err != nil {
+ d.setErr(err)
+ }
+}
+
+// downloadRange downloads an Object given the passed in Byte-Range value.
+// The chunk used down download the range will be configured for that range.
+func (d *downloader) downloadRange(rng string) {
+ if d.getErr() != nil {
+ return
+ }
+
+ chunk := dlchunk{w: d.w, start: d.pos}
+ // Ranges specified will short circuit the multipart download
+ chunk.withRange = rng
+
+ if err := d.downloadChunk(chunk); err != nil {
+ d.setErr(err)
+ }
+
+ // Update the position based on the amount of data received.
+ d.pos = d.written
+}
+
+// downloadChunk downloads the chunk from s3
+func (d *downloader) downloadChunk(chunk dlchunk) error {
+ in := &s3.GetObjectInput{}
+ awsutil.Copy(in, d.in)
+
+ // Get the next byte range of data
+ in.Range = aws.String(chunk.ByteRange())
+
+ var n int64
+ var err error
+ for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
+ var resp *s3.GetObjectOutput
+ resp, err = d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...)
+ if err != nil {
+ return err
+ }
+ d.setTotalBytes(resp) // Set total if not yet set.
+
+ n, err = io.Copy(&chunk, resp.Body)
+ resp.Body.Close()
+ if err == nil {
+ break
+ }
+
+ chunk.cur = 0
+ logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries,
+ fmt.Sprintf("DEBUG: object part body download interrupted %s, err, %v, retrying attempt %d",
+ aws.StringValue(in.Key), err, retry))
+ }
+
+ d.incrWritten(n)
+
+ return err
+}
+
+func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) {
+ s, ok := svc.(*s3.S3)
+ if !ok {
+ return
+ }
+
+ if s.Config.Logger == nil {
+ return
+ }
+
+ if s.Config.LogLevel.Matches(level) {
+ s.Config.Logger.Log(msg)
+ }
+}
+
+// getTotalBytes is a thread-safe getter for retrieving the total byte status.
+func (d *downloader) getTotalBytes() int64 {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ return d.totalBytes
+}
+
+// setTotalBytes is a thread-safe setter for setting the total byte status.
+// Will extract the object's total bytes from the Content-Range if the file
+// will be chunked, or Content-Length. Content-Length is used when the response
+// does not include a Content-Range. Meaning the object was not chunked. This
+// occurs when the full file fits within the PartSize directive.
+func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ if d.totalBytes >= 0 {
+ return
+ }
+
+ if resp.ContentRange == nil {
+ // ContentRange is nil when the full file contents is provided, and
+ // is not chunked. Use ContentLength instead.
+ if resp.ContentLength != nil {
+ d.totalBytes = *resp.ContentLength
+ return
+ }
+ } else {
+ parts := strings.Split(*resp.ContentRange, "/")
+
+ total := int64(-1)
+ var err error
+ // Checking for whether or not a numbered total exists
+ // If one does not exist, we will assume the total to be -1, undefined,
+ // and sequentially download each chunk until hitting a 416 error
+ totalStr := parts[len(parts)-1]
+ if totalStr != "*" {
+ total, err = strconv.ParseInt(totalStr, 10, 64)
+ if err != nil {
+ d.err = err
+ return
+ }
+ }
+
+ d.totalBytes = total
+ }
+}
+
+func (d *downloader) incrWritten(n int64) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ d.written += n
+}
+
+// getErr is a thread-safe getter for the error object
+func (d *downloader) getErr() error {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ return d.err
+}
+
+// setErr is a thread-safe setter for the error object
+func (d *downloader) setErr(e error) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ d.err = e
+}
+
+// dlchunk represents a single chunk of data to write by the worker routine.
+// This structure also implements an io.SectionReader style interface for
+// io.WriterAt, effectively making it an io.SectionWriter (which does not
+// exist).
+type dlchunk struct {
+ w io.WriterAt
+ start int64
+ size int64
+ cur int64
+
+ // specifies the byte range the chunk should be downloaded with.
+ withRange string
+}
+
+// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start
+// position to its end (or EOF).
+//
+// If a range is specified on the dlchunk the size will be ignored when writing.
+// as the total size may not of be known ahead of time.
+func (c *dlchunk) Write(p []byte) (n int, err error) {
+ if c.cur >= c.size && len(c.withRange) == 0 {
+ return 0, io.EOF
+ }
+
+ n, err = c.w.WriteAt(p, c.start+c.cur)
+ c.cur += int64(n)
+
+ return
+}
+
+// ByteRange returns a HTTP Byte-Range header value that should be used by the
+// client to request the chunk's range.
+func (c *dlchunk) ByteRange() string {
+ if len(c.withRange) != 0 {
+ return c.withRange
+ }
+
+ return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
new file mode 100755
index 0000000..da1838c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
@@ -0,0 +1,718 @@
+package s3manager
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sort"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3iface"
+)
+
+// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
+// on Amazon S3.
+const MaxUploadParts = 10000
+
+// MinUploadPartSize is the minimum allowed part size when uploading a part to
+// Amazon S3.
+const MinUploadPartSize int64 = 1024 * 1024 * 5
+
+// DefaultUploadPartSize is the default part size to buffer chunks of a
+// payload into.
+const DefaultUploadPartSize = MinUploadPartSize
+
+// DefaultUploadConcurrency is the default number of goroutines to spin up when
+// using Upload().
+const DefaultUploadConcurrency = 5
+
+// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
+// will satisfy this interface when a multi part upload failed to upload all
+// chucks to S3. In the case of a failure the UploadID is needed to operate on
+// the chunks, if any, which were uploaded.
+//
+// Example:
+//
+// u := s3manager.NewUploader(opts)
+// output, err := u.upload(input)
+// if err != nil {
+// if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
+// // Process error and its associated uploadID
+// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
+// } else {
+// // Process error generically
+// fmt.Println("Error:", err.Error())
+// }
+// }
+//
+type MultiUploadFailure interface {
+ awserr.Error
+
+ // Returns the upload id for the S3 multipart upload that failed.
+ UploadID() string
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the multiUploadError struct and not conflict with the error.Error() method.
+type awsError awserr.Error
+
+// A multiUploadError wraps the upload ID of a failed s3 multipart upload.
+// Composed of BaseError for code, message, and original error
+//
+// Should be used for an error that occurred failing a S3 multipart upload,
+// and a upload ID is available. If an uploadID is not available a more relevant
+type multiUploadError struct {
+ awsError
+
+ // ID for multipart upload which failed.
+ uploadID string
+}
+
+// Error returns the string representation of the error.
+//
+// See apierr.BaseError ErrorWithExtra for output format
+//
+// Satisfies the error interface.
+func (m multiUploadError) Error() string {
+ extra := fmt.Sprintf("upload id: %s", m.uploadID)
+ return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (m multiUploadError) String() string {
+ return m.Error()
+}
+
+// UploadID returns the id of the S3 upload which failed.
+func (m multiUploadError) UploadID() string {
+ return m.uploadID
+}
+
+// UploadOutput represents a response from the Upload() call.
+type UploadOutput struct {
+ // The URL where the object was uploaded to.
+ Location string
+
+ // The version of the object that was uploaded. Will only be populated if
+ // the S3 Bucket is versioned. If the bucket is not versioned this field
+ // will not be set.
+ VersionID *string
+
+ // The ID for a multipart upload to S3. In the case of an error the error
+ // can be cast to the MultiUploadFailure interface to extract the upload ID.
+ UploadID string
+}
+
+// WithUploaderRequestOptions appends to the Uploader's API request options.
+func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) {
+ return func(u *Uploader) {
+ u.RequestOptions = append(u.RequestOptions, opts...)
+ }
+}
+
+// The Uploader structure that calls Upload(). It is safe to call Upload()
+// on this structure for multiple objects and across concurrent goroutines.
+// Mutating the Uploader's properties is not safe to be done concurrently.
+type Uploader struct {
+ // The buffer size (in bytes) to use when buffering data into chunks and
+ // sending them as parts to S3. The minimum allowed part size is 5MB, and
+ // if this value is set to zero, the DefaultUploadPartSize value will be used.
+ PartSize int64
+
+ // The number of goroutines to spin up in parallel per call to Upload when
+ // sending parts. If this is set to zero, the DefaultUploadConcurrency value
+ // will be used.
+ //
+ // The concurrency pool is not shared between calls to Upload.
+ Concurrency int
+
+ // Setting this value to true will cause the SDK to avoid calling
+ // AbortMultipartUpload on a failure, leaving all successfully uploaded
+ // parts on S3 for manual recovery.
+ //
+ // Note that storing parts of an incomplete multipart upload counts towards
+ // space usage on S3 and will add additional costs if not cleaned up.
+ LeavePartsOnError bool
+
+ // MaxUploadParts is the max number of parts which will be uploaded to S3.
+ // Will be used to calculate the partsize of the object to be uploaded.
+ // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
+ // as 100, 50MB parts.
+ // With a limited of s3.MaxUploadParts (10,000 parts).
+ //
+ // Defaults to package const's MaxUploadParts value.
+ MaxUploadParts int
+
+ // The client to use when uploading to S3.
+ S3 s3iface.S3API
+
+ // List of request options that will be passed down to individual API
+ // operation requests made by the uploader.
+ RequestOptions []request.Option
+}
+
+// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
+// additional functional options to customize the uploader's behavior. Requires a
+// client.ConfigProvider in order to create a S3 service client. The session.Session
+// satisfies the client.ConfigProvider interface.
+//
+// Example:
+// // The session the S3 Uploader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create an uploader with the session and default options
+// uploader := s3manager.NewUploader(sess)
+//
+// // Create an uploader with the session and custom options
+// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
+// u.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
+ u := &Uploader{
+ S3: s3.New(c),
+ PartSize: DefaultUploadPartSize,
+ Concurrency: DefaultUploadConcurrency,
+ LeavePartsOnError: false,
+ MaxUploadParts: MaxUploadParts,
+ }
+
+ for _, option := range options {
+ option(u)
+ }
+
+ return u
+}
+
+// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in
+// additional functional options to customize the uploader's behavior. Requires
+// a S3 service client to make S3 API calls.
+//
+// Example:
+// // The session the S3 Uploader will use
+// sess := session.Must(session.NewSession())
+//
+// // S3 service client the Upload manager will use.
+// s3Svc := s3.New(sess)
+//
+// // Create an uploader with S3 client and default options
+// uploader := s3manager.NewUploaderWithClient(s3Svc)
+//
+// // Create an uploader with S3 client and custom options
+// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
+// u.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
+ u := &Uploader{
+ S3: svc,
+ PartSize: DefaultUploadPartSize,
+ Concurrency: DefaultUploadConcurrency,
+ LeavePartsOnError: false,
+ MaxUploadParts: MaxUploadParts,
+ }
+
+ for _, option := range options {
+ option(u)
+ }
+
+ return u
+}
+
+// Upload uploads an object to S3, intelligently buffering large files into
+// smaller chunks and sending them in parallel across multiple goroutines. You
+// can configure the buffer size and concurrency through the Uploader's parameters.
+//
+// Additional functional options can be provided to configure the individual
+// upload. These options are copies of the Uploader instance Upload is called from.
+// Modifying the options will not impact the original Uploader instance.
+//
+// Use the WithUploaderRequestOptions helper function to pass in request
+// options that will be applied to all API operations made with this uploader.
+//
+// It is safe to call this method concurrently across goroutines.
+//
+// Example:
+// // Upload input parameters
+// upParams := &s3manager.UploadInput{
+// Bucket: &bucketName,
+// Key: &keyName,
+// Body: file,
+// }
+//
+// // Perform an upload.
+// result, err := uploader.Upload(upParams)
+//
+// // Perform upload with options different than the those in the Uploader.
+// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
+// u.PartSize = 10 * 1024 * 1024 // 10MB part size
+// u.LeavePartsOnError = true // Don't delete the parts if the upload fails.
+// })
+func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
+ return u.UploadWithContext(aws.BackgroundContext(), input, options...)
+}
+
+// UploadWithContext uploads an object to S3, intelligently buffering large
+// files into smaller chunks and sending them in parallel across multiple
+// goroutines. You can configure the buffer size and concurrency through the
+// Uploader's parameters.
+//
+// UploadWithContext is the same as Upload with the additional support for
+// Context input parameters. The Context must not be nil. A nil Context will
+// cause a panic. Use the context to add deadlining, timeouts, etc. The
+// UploadWithContext may create sub-contexts for individual underlying requests.
+//
+// Additional functional options can be provided to configure the individual
+// upload. These options are copies of the Uploader instance Upload is called from.
+// Modifying the options will not impact the original Uploader instance.
+//
+// Use the WithUploaderRequestOptions helper function to pass in request
+// options that will be applied to all API operations made with this uploader.
+//
+// It is safe to call this method concurrently across goroutines.
+func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ...func(*Uploader)) (*UploadOutput, error) {
+ i := uploader{in: input, cfg: u, ctx: ctx}
+
+ for _, opt := range opts {
+ opt(&i.cfg)
+ }
+ i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
+
+ return i.upload()
+}
+
+// UploadWithIterator will upload a batched amount of objects to S3. This operation uses
+// the iterator pattern to know which object to upload next. Since this is an interface this
+// allows for custom defined functionality.
+//
+// Example:
+// svc:= s3manager.NewUploader(sess)
+//
+// objects := []BatchUploadObject{
+// {
+// Object: &s3manager.UploadInput {
+// Key: aws.String("key"),
+// Bucket: aws.String("bucket"),
+// },
+// },
+// }
+//
+// iter := &s3manager.UploadObjectsIterator{Objects: objects}
+// if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil {
+// return err
+// }
+func (u Uploader) UploadWithIterator(ctx aws.Context, iter BatchUploadIterator, opts ...func(*Uploader)) error {
+ var errs []Error
+ for iter.Next() {
+ object := iter.UploadObject()
+ if _, err := u.UploadWithContext(ctx, object.Object, opts...); err != nil {
+ s3Err := Error{
+ OrigErr: err,
+ Bucket: object.Object.Bucket,
+ Key: object.Object.Key,
+ }
+
+ errs = append(errs, s3Err)
+ }
+
+ if object.After == nil {
+ continue
+ }
+
+ if err := object.After(); err != nil {
+ s3Err := Error{
+ OrigErr: err,
+ Bucket: object.Object.Bucket,
+ Key: object.Object.Key,
+ }
+
+ errs = append(errs, s3Err)
+ }
+ }
+
+ if len(errs) > 0 {
+ return NewBatchError("BatchedUploadIncomplete", "some objects have failed to upload.", errs)
+ }
+ return nil
+}
+
+// internal structure to manage an upload to S3.
+type uploader struct {
+ ctx aws.Context
+ cfg Uploader
+
+ in *UploadInput
+
+ readerPos int64 // current reader position
+ totalSize int64 // set to -1 if the size is not known
+
+ bufferPool sync.Pool
+}
+
+// internal logic for deciding whether to upload a single part or use a
+// multipart upload.
+func (u *uploader) upload() (*UploadOutput, error) {
+ u.init()
+
+ if u.cfg.PartSize < MinUploadPartSize {
+ msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
+ return nil, awserr.New("ConfigError", msg, nil)
+ }
+
+ // Do one read to determine if we have more than one part
+ reader, _, part, err := u.nextReader()
+ if err == io.EOF { // single part
+ return u.singlePart(reader)
+ } else if err != nil {
+ return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
+ }
+
+ mu := multiuploader{uploader: u}
+ return mu.upload(reader, part)
+}
+
+// init will initialize all default options.
+func (u *uploader) init() {
+ if u.cfg.Concurrency == 0 {
+ u.cfg.Concurrency = DefaultUploadConcurrency
+ }
+ if u.cfg.PartSize == 0 {
+ u.cfg.PartSize = DefaultUploadPartSize
+ }
+ if u.cfg.MaxUploadParts == 0 {
+ u.cfg.MaxUploadParts = MaxUploadParts
+ }
+
+ u.bufferPool = sync.Pool{
+ New: func() interface{} { return make([]byte, u.cfg.PartSize) },
+ }
+
+ // Try to get the total size for some optimizations
+ u.initSize()
+}
+
+// initSize tries to detect the total stream size, setting u.totalSize. If
+// the size is not known, totalSize is set to -1.
+func (u *uploader) initSize() {
+ u.totalSize = -1
+
+ switch r := u.in.Body.(type) {
+ case io.Seeker:
+ n, err := aws.SeekerLen(r)
+ if err != nil {
+ return
+ }
+ u.totalSize = n
+
+ // Try to adjust partSize if it is too small and account for
+ // integer division truncation.
+ if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) {
+ // Add one to the part size to account for remainders
+ // during the size calculation. e.g odd number of bytes.
+ u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1
+ }
+ }
+}
+
+// nextReader returns a seekable reader representing the next packet of data.
+// This operation increases the shared u.readerPos counter, but note that it
+// does not need to be wrapped in a mutex because nextReader is only called
+// from the main thread.
+func (u *uploader) nextReader() (io.ReadSeeker, int, []byte, error) {
+ type readerAtSeeker interface {
+ io.ReaderAt
+ io.ReadSeeker
+ }
+ switch r := u.in.Body.(type) {
+ case readerAtSeeker:
+ var err error
+
+ n := u.cfg.PartSize
+ if u.totalSize >= 0 {
+ bytesLeft := u.totalSize - u.readerPos
+
+ if bytesLeft <= u.cfg.PartSize {
+ err = io.EOF
+ n = bytesLeft
+ }
+ }
+
+ reader := io.NewSectionReader(r, u.readerPos, n)
+ u.readerPos += n
+
+ return reader, int(n), nil, err
+
+ default:
+ part := u.bufferPool.Get().([]byte)
+ n, err := readFillBuf(r, part)
+ u.readerPos += int64(n)
+
+ return bytes.NewReader(part[0:n]), n, part, err
+ }
+}
+
+func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
+ for offset < len(b) && err == nil {
+ var n int
+ n, err = r.Read(b[offset:])
+ offset += n
+ }
+
+ return offset, err
+}
+
+// singlePart contains upload logic for uploading a single chunk via
+// a regular PutObject request. Multipart requests require at least two
+// parts, or at least 5MB of data.
+func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
+ params := &s3.PutObjectInput{}
+ awsutil.Copy(params, u.in)
+ params.Body = buf
+
+ // Need to use request form because URL generated in request is
+ // used in return.
+ req, out := u.cfg.S3.PutObjectRequest(params)
+ req.SetContext(u.ctx)
+ req.ApplyOptions(u.cfg.RequestOptions...)
+ if err := req.Send(); err != nil {
+ return nil, err
+ }
+
+ url := req.HTTPRequest.URL.String()
+ return &UploadOutput{
+ Location: url,
+ VersionID: out.VersionId,
+ }, nil
+}
+
+// internal structure to manage a specific multipart upload to S3.
+type multiuploader struct {
+ *uploader
+ wg sync.WaitGroup
+ m sync.Mutex
+ err error
+ uploadID string
+ parts completedParts
+}
+
+// keeps track of a single chunk of data being sent to S3.
+type chunk struct {
+ buf io.ReadSeeker
+ part []byte
+ num int64
+}
+
+// completedParts is a wrapper to make parts sortable by their part number,
+// since S3 required this list to be sent in sorted order.
+type completedParts []*s3.CompletedPart
+
+func (a completedParts) Len() int { return len(a) }
+func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
+
+// upload will perform a multipart upload using the firstBuf buffer containing
+// the first chunk of data.
+func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*UploadOutput, error) {
+ params := &s3.CreateMultipartUploadInput{}
+ awsutil.Copy(params, u.in)
+
+ // Create the multipart
+ resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
+ if err != nil {
+ return nil, err
+ }
+ u.uploadID = *resp.UploadId
+
+ // Create the workers
+ ch := make(chan chunk, u.cfg.Concurrency)
+ for i := 0; i < u.cfg.Concurrency; i++ {
+ u.wg.Add(1)
+ go u.readChunk(ch)
+ }
+
+ // Send part 1 to the workers
+ var num int64 = 1
+ ch <- chunk{buf: firstBuf, part: firstPart, num: num}
+
+ // Read and queue the rest of the parts
+ for u.geterr() == nil && err == nil {
+ num++
+ // This upload exceeded maximum number of supported parts, error now.
+ if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) {
+ var msg string
+ if num > int64(u.cfg.MaxUploadParts) {
+ msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
+ u.cfg.MaxUploadParts)
+ } else {
+ msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
+ MaxUploadParts)
+ }
+ u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
+ break
+ }
+
+ var reader io.ReadSeeker
+ var nextChunkLen int
+ var part []byte
+ reader, nextChunkLen, part, err = u.nextReader()
+
+ if err != nil && err != io.EOF {
+ u.seterr(awserr.New(
+ "ReadRequestBody",
+ "read multipart upload data failed",
+ err))
+ break
+ }
+
+ if nextChunkLen == 0 {
+ // No need to upload empty part, if file was empty to start
+ // with empty single part would of been created and never
+ // started multipart upload.
+ break
+ }
+
+ ch <- chunk{buf: reader, part: part, num: num}
+ }
+
+ // Close the channel, wait for workers, and complete upload
+ close(ch)
+ u.wg.Wait()
+ complete := u.complete()
+
+ if err := u.geterr(); err != nil {
+ return nil, &multiUploadError{
+ awsError: awserr.New(
+ "MultipartUpload",
+ "upload multipart failed",
+ err),
+ uploadID: u.uploadID,
+ }
+ }
+
+ // Create a presigned URL of the S3 Get Object in order to have parity with
+ // single part upload.
+ getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ })
+ getReq.Config.Credentials = credentials.AnonymousCredentials
+ uploadLocation, _, _ := getReq.PresignRequest(1)
+
+ return &UploadOutput{
+ Location: uploadLocation,
+ VersionID: complete.VersionId,
+ UploadID: u.uploadID,
+ }, nil
+}
+
+// readChunk runs in worker goroutines to pull chunks off of the ch channel
+// and send() them as UploadPart requests.
+func (u *multiuploader) readChunk(ch chan chunk) {
+ defer u.wg.Done()
+ for {
+ data, ok := <-ch
+
+ if !ok {
+ break
+ }
+
+ if u.geterr() == nil {
+ if err := u.send(data); err != nil {
+ u.seterr(err)
+ }
+ }
+ }
+}
+
+// send performs an UploadPart request and keeps track of the completed
+// part information.
+func (u *multiuploader) send(c chunk) error {
+ params := &s3.UploadPartInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ Body: c.buf,
+ UploadId: &u.uploadID,
+ SSECustomerAlgorithm: u.in.SSECustomerAlgorithm,
+ SSECustomerKey: u.in.SSECustomerKey,
+ PartNumber: &c.num,
+ }
+ resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...)
+ // put the byte array back into the pool to conserve memory
+ u.bufferPool.Put(c.part)
+ if err != nil {
+ return err
+ }
+
+ n := c.num
+ completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n}
+
+ u.m.Lock()
+ u.parts = append(u.parts, completed)
+ u.m.Unlock()
+
+ return nil
+}
+
+// geterr is a thread-safe getter for the error object
+func (u *multiuploader) geterr() error {
+ u.m.Lock()
+ defer u.m.Unlock()
+
+ return u.err
+}
+
+// seterr is a thread-safe setter for the error object
+func (u *multiuploader) seterr(e error) {
+ u.m.Lock()
+ defer u.m.Unlock()
+
+ u.err = e
+}
+
+// fail will abort the multipart unless LeavePartsOnError is set to true.
+func (u *multiuploader) fail() {
+ if u.cfg.LeavePartsOnError {
+ return
+ }
+
+ params := &s3.AbortMultipartUploadInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ UploadId: &u.uploadID,
+ }
+ _, err := u.cfg.S3.AbortMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
+ if err != nil {
+ logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err))
+ }
+}
+
+// complete successfully completes a multipart upload and returns the response.
+func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
+ if u.geterr() != nil {
+ u.fail()
+ return nil
+ }
+
+ // Parts must be sorted in PartNumber order.
+ sort.Sort(u.parts)
+
+ params := &s3.CompleteMultipartUploadInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ UploadId: &u.uploadID,
+ MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts},
+ }
+ resp, err := u.cfg.S3.CompleteMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
+ if err != nil {
+ u.seterr(err)
+ u.fail()
+ }
+
+ return resp
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
new file mode 100755
index 0000000..f8ce34f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
@@ -0,0 +1,123 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3manager
+
+import (
+ "io"
+ "time"
+)
+
+// UploadInput provides the input parameters for uploading a stream or buffer
+// to an object in an Amazon S3 bucket. This type is similar to the s3
+// package's PutObjectInput with the exception that the Body member is an
+// io.Reader instead of an io.ReadSeeker.
+type UploadInput struct {
+ _ struct{} `type:"structure" payload:"Body"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // The readable body payload to send to S3.
+ Body io.Reader
+
+ // Name of the bucket to which the PUT operation was initiated.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // The base64-encoded 128-bit MD5 digest of the part data. This parameter is
+ // auto-populated when using the command from the CLI
+ ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Object key for which the PUT operation was initiated.
+ //
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // The Legal Hold status that you want to apply to the specified object.
+ ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
+
+ // The Object Lock mode that you want to apply to this object.
+ ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
+
+ // The date and time when you want this object's Object Lock to expire.
+ ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters.
+ // (For example, "Key1=Value1")
+ Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
new file mode 100755
index 0000000..d17dcc9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
@@ -0,0 +1,99 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/restxml"
+)
+
+// S3 provides the API operation methods for making requests to
+// Amazon Simple Storage Service. See this package's package overview docs
+// for details on the service.
+//
+// S3 methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type S3 struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "s3" // Name of service.
+ EndpointsID = ServiceName // ID to lookup a service endpoint with.
+ ServiceID = "S3" // ServiceID is a unique identifer of a specific service.
+)
+
+// New creates a new instance of the S3 client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a S3 client from just a session.
+// svc := s3.New(mySession)
+//
+// // Create a S3 client with additional configuration
+// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 {
+ svc := &S3{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2006-03-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) {
+ s.DisableURIPathEscaping = true
+ }))
+ svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
+
+ svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a S3 operation and runs any
+// custom request initialization.
+func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
new file mode 100755
index 0000000..8010c4f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
@@ -0,0 +1,54 @@
+package s3
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
+
+func validateSSERequiresSSL(r *request.Request) {
+ if r.HTTPRequest.URL.Scheme == "https" {
+ return
+ }
+
+ if iface, ok := r.Params.(sseCustomerKeyGetter); ok {
+ if len(iface.getSSECustomerKey()) > 0 {
+ r.Error = errSSERequiresSSL
+ return
+ }
+ }
+
+ if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
+ if len(iface.getCopySourceSSECustomerKey()) > 0 {
+ r.Error = errSSERequiresSSL
+ return
+ }
+ }
+}
+
+func computeSSEKeys(r *request.Request) {
+ headers := []string{
+ "x-amz-server-side-encryption-customer-key",
+ "x-amz-copy-source-server-side-encryption-customer-key",
+ }
+
+ for _, h := range headers {
+ md5h := h + "-md5"
+ if key := r.HTTPRequest.Header.Get(h); key != "" {
+ // Base64-encode the value
+ b64v := base64.StdEncoding.EncodeToString([]byte(key))
+ r.HTTPRequest.Header.Set(h, b64v)
+
+ // Add MD5 if it wasn't computed
+ if r.HTTPRequest.Header.Get(md5h) == "" {
+ sum := md5.Sum([]byte(key))
+ b64sum := base64.StdEncoding.EncodeToString(sum[:])
+ r.HTTPRequest.Header.Set(md5h, b64sum)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
new file mode 100755
index 0000000..fde3050
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
@@ -0,0 +1,40 @@
+package s3
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("SerializationError", "unable to read response body", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+ body := bytes.NewReader(b)
+ r.HTTPResponse.Body = ioutil.NopCloser(body)
+ defer body.Seek(0, sdkio.SeekStart)
+
+ if body.Len() == 0 {
+ // If there is no body don't attempt to parse the body.
+ return
+ }
+
+ unmarshalError(r)
+ if err, ok := r.Error.(awserr.Error); ok && err != nil {
+ if err.Code() == "SerializationError" {
+ r.Error = nil
+ return
+ }
+ r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
new file mode 100755
index 0000000..1db7e13
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
@@ -0,0 +1,82 @@
+package s3
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+type xmlErrorResponse struct {
+ XMLName xml.Name `xml:"Error"`
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+
+ // Bucket exists in a different region, and request needs
+ // to be made to the correct region.
+ if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
+ msg := fmt.Sprintf(
+ "incorrect region, the bucket is not in '%s' region at endpoint '%s'",
+ aws.StringValue(r.Config.Region),
+ aws.StringValue(r.Config.Endpoint),
+ )
+ if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 {
+ msg += fmt.Sprintf(", bucket is in '%s' region", v)
+ }
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("BucketRegionError", msg, nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ var errCode, errMsg string
+
+ // Attempt to parse error from body if it is known
+ resp := &xmlErrorResponse{}
+ err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
+ if err != nil && err != io.EOF {
+ errCode = "SerializationError"
+ errMsg = "failed to decode S3 XML error response"
+ } else {
+ errCode = resp.Code
+ errMsg = resp.Message
+ err = nil
+ }
+
+ // Fallback to status code converted to message if still no error code
+ if len(errCode) == 0 {
+ statusText := http.StatusText(r.HTTPResponse.StatusCode)
+ errCode = strings.Replace(statusText, " ", "", -1)
+ errMsg = statusText
+ }
+
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(errCode, errMsg, err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+}
+
+// A RequestFailure provides access to the S3 Request ID and Host ID values
+// returned from API operation errors. Getting the error as a string will
+// return the formated error with the same information as awserr.RequestFailure,
+// while also adding the HostID value from the response.
+type RequestFailure interface {
+ awserr.RequestFailure
+
+ // Host ID is the S3 Host ID needed for debug, and contacting support
+ HostID() string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
new file mode 100755
index 0000000..2596c69
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
@@ -0,0 +1,214 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// WaitUntilBucketExists uses the Amazon S3 API operation
+// HeadBucket to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
+ return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilBucketExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 200,
+ },
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 301,
+ },
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 403,
+ },
+ {
+ State: request.RetryWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadBucketInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadBucketRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilBucketNotExists uses the Amazon S3 API operation
+// HeadBucket to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
+ return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilBucketNotExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadBucketInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadBucketRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilObjectExists uses the Amazon S3 API operation
+// HeadObject to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
+ return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilObjectExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 200,
+ },
+ {
+ State: request.RetryWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadObjectInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadObjectRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilObjectNotExists uses the Amazon S3 API operation
+// HeadObject to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
+ return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilObjectNotExists",
+ MaxAttempts: 20,
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
+ Expected: 404,
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadObjectInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadObjectRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
new file mode 100755
index 0000000..8113089
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -0,0 +1,2401 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const opAssumeRole = "AssumeRole"
+
+// AssumeRoleRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRole operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssumeRole for more information on using the AssumeRole
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssumeRoleRequest method.
+// req, resp := client.AssumeRoleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
+func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
+ op := &request.Operation{
+ Name: opAssumeRole,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleInput{}
+ }
+
+ output = &AssumeRoleOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AssumeRole API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) that you can use to access
+// AWS resources that you might not normally have access to. Typically, you
+// use AssumeRole for cross-account access or federation. For a comparison of
+// AssumeRole with the other APIs that produce temporary credentials, see Requesting
+// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// Important: You cannot call AssumeRole by using AWS root account credentials;
+// access is denied. You must use credentials for an IAM user or an IAM role
+// to call AssumeRole.
+//
+// For cross-account access, imagine that you own multiple accounts and need
+// to access resources in each account. You could create long-term credentials
+// in each account to access those resources. However, managing all those credentials
+// and remembering which one can access which account can be time consuming.
+// Instead, you can create one set of long-term credentials in one account and
+// then use temporary security credentials to access all the other accounts
+// by assuming roles in those accounts. For more information about roles, see
+// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html)
+// in the IAM User Guide.
+//
+// For federation, you can, for example, grant single sign-on access to the
+// AWS Management Console. If you already have an identity and authentication
+// system in your corporate network, you don't have to recreate user identities
+// in AWS in order to grant those user identities access to AWS. Instead, after
+// a user has been authenticated, you call AssumeRole (and specify the role
+// with the appropriate permissions) to get temporary security credentials for
+// that user. With those temporary security credentials, you construct a sign-in
+// URL that users can use to access the console. For more information, see Common
+// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
+// in the IAM User Guide.
+//
+// By default, the temporary security credentials created by AssumeRole last
+// for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. You can provide a value from 900
+// seconds (15 minutes) up to the maximum session duration setting for the role.
+// This setting can have a value from 1 hour to 12 hours. To learn how to view
+// the maximum value for your role, see View the Maximum Session Duration Setting
+// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI operations but
+// does not apply when you use those operations to create a console URL. For
+// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRole can be used to make
+// API calls to any AWS service with the following exception: you cannot call
+// the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// To assume a role, your AWS account must be trusted by the role. The trust
+// relationship is defined in the role's trust policy when the role is created.
+// That trust policy states which accounts are allowed to delegate access to
+// this account's role.
+//
+// The user who wants to access the role must also have permissions delegated
+// from the role's administrator. If the user is in a different account than
+// the role, then the user's administrator must attach a policy that allows
+// the user to call AssumeRole on the ARN of the role in the other account.
+// If the user is in the same account as the role, then you can either attach
+// a policy to the user (identical to the previous different account user),
+// or you can add the user as a principal directly in the role's trust policy.
+// In this case, the trust policy acts as the only resource-based policy in
+// IAM, and users in the same account as the role do not need explicit permission
+// to assume the role. For more information about trust policies and resource-based
+// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
+// in the IAM User Guide.
+//
+// Using MFA with AssumeRole
+//
+// You can optionally include multi-factor authentication (MFA) information
+// when you call AssumeRole. This is useful for cross-account scenarios in which
+// you want to make sure that the user who is assuming the role has been authenticated
+// using an AWS MFA device. In that scenario, the trust policy of the role being
+// assumed includes a condition that tests for MFA authentication; if the caller
+// does not include valid MFA information, the request to assume the role is
+// denied. The condition in a trust policy that tests for MFA authentication
+// might look like the following example.
+//
+// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+//
+// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
+// in the IAM User Guide guide.
+//
+// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
+// parameters. The SerialNumber value identifies the user's hardware or virtual
+// MFA device. The TokenCode is the time-based one-time password (TOTP) that
+// the MFA devices produces.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRole for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
+func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
+ req, out := c.AssumeRoleRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithContext is the same as AssumeRole with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRole for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) {
+ req, out := c.AssumeRoleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
+
+// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRoleWithSAML operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssumeRoleWithSAMLRequest method.
+// req, resp := client.AssumeRoleWithSAMLRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
+func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
+ op := &request.Operation{
+ Name: opAssumeRoleWithSAML,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithSAMLInput{}
+ }
+
+ output = &AssumeRoleWithSAMLOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// AssumeRoleWithSAML API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// via a SAML authentication response. This operation provides a mechanism for
+// tying an enterprise identity store or directory to role-based AWS access
+// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML
+// with the other APIs that produce temporary credentials, see Requesting Temporary
+// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this operation consist of
+// an access key ID, a secret access key, and a security token. Applications
+// can use these temporary security credentials to sign calls to AWS services.
+//
+// By default, the temporary security credentials created by AssumeRoleWithSAML
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. Your role session lasts for the
+// duration that you specify, or until the time specified in the SAML authentication
+// response's SessionNotOnOrAfter value, whichever is shorter. You can provide
+// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session
+// duration setting for the role. This setting can have a value from 1 hour
+// to 12 hours. To learn how to view the maximum value for your role, see View
+// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI operations but
+// does not apply when you use those operations to create a console URL. For
+// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRoleWithSAML can be used
+// to make API calls to any AWS service with the following exception: you cannot
+// call the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by the intersection of both the access policy
+// of the role that is being assumed, and the policy that you pass. This means
+// that both policies must grant the permission for the action to be allowed.
+// This gives you a way to further restrict the permissions for the resulting
+// temporary security credentials. You cannot use the passed policy to grant
+// permissions that are in excess of those allowed by the access policy of the
+// role that is being assumed. For more information, see Permissions for AssumeRole,
+// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithSAML, you must configure your
+// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
+// you must use AWS Identity and Access Management (IAM) to create a SAML provider
+// entity in your AWS account that represents your identity provider, and create
+// an IAM role that specifies this SAML provider in its trust policy.
+//
+// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
+// The identity of the caller is validated by using keys in the metadata document
+// that is uploaded for the SAML provider entity for your identity provider.
+//
+// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
+// logs. The entry includes the value in the NameID element of the SAML assertion.
+// We recommend that you use a NameIDType that is not associated with any personally
+// identifiable information (PII). For example, you could instead use the Persistent
+// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
+//
+// For more information, see the following resources:
+//
+// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
+// in the IAM User Guide.
+//
+// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
+// in the IAM User Guide.
+//
+// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
+// in the IAM User Guide.
+//
+// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
+// in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithSAML for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
+// The identity provider (IdP) reported that authentication failed. This might
+// be because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it
+// can also mean that the claim has expired or has been explicitly revoked.
+//
+// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
+// The web identity token that was passed could not be validated by AWS. Get
+// a new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeExpiredTokenException "ExpiredTokenException"
+// The web identity token that was passed is expired or is not valid. Get a
+// new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
+func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
+ req, out := c.AssumeRoleWithSAMLRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithSAML for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) {
+ req, out := c.AssumeRoleWithSAMLRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
+
+// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
+// req, resp := client.AssumeRoleWithWebIdentityRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
+func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
+ op := &request.Operation{
+ Name: opAssumeRoleWithWebIdentity,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithWebIdentityInput{}
+ }
+
+ output = &AssumeRoleWithWebIdentityOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// AssumeRoleWithWebIdentity API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// in a mobile or web application with a web identity provider, such as Amazon
+// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible
+// identity provider.
+//
+// For mobile applications, we recommend that you use Amazon Cognito. You can
+// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/)
+// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely
+// identify a user and supply the user with a consistent identity throughout
+// the lifetime of an application.
+//
+// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
+// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview
+// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
+// in the AWS SDK for iOS Developer Guide.
+//
+// Calling AssumeRoleWithWebIdentity does not require the use of AWS security
+// credentials. Therefore, you can distribute an application (for example, on
+// mobile devices) that requests temporary security credentials without including
+// long-term AWS credentials in the application, and without deploying server-based
+// proxy services that use long-term AWS credentials. Instead, the identity
+// of the caller is validated by using a token from the web identity provider.
+// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce
+// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this API consist of an access
+// key ID, a secret access key, and a security token. Applications can use these
+// temporary security credentials to sign calls to AWS service APIs.
+//
+// By default, the temporary security credentials created by AssumeRoleWithWebIdentity
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. You can provide a value from 900
+// seconds (15 minutes) up to the maximum session duration setting for the role.
+// This setting can have a value from 1 hour to 12 hours. To learn how to view
+// the maximum value for your role, see View the Maximum Session Duration Setting
+// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI operations but
+// does not apply when you use those operations to create a console URL. For
+// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRoleWithWebIdentity can
+// be used to make API calls to any AWS service with the following exception:
+// you cannot call the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithWebIdentity, you must have
+// an identity token from a supported identity provider and create a role that
+// the application can assume. The role that your application assumes must trust
+// the identity provider that is associated with the identity token. In other
+// words, the identity provider must be specified in the role's trust policy.
+//
+// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
+// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
+// of the provided Web Identity Token. We recommend that you avoid using any
+// personally identifiable information (PII) in this field. For example, you
+// could instead use a GUID or a pairwise identifier, as suggested in the OIDC
+// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
+//
+// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
+// API, see the following resources:
+//
+// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
+// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+//
+// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
+// This interactive website lets you walk through the process of authenticating
+// via Login with Amazon, Facebook, or Google, getting temporary security
+// credentials, and then using those credentials to make a request to AWS.
+//
+//
+// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android
+// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample
+// apps that show how to invoke the identity providers, and then how to use
+// the information from these providers to get and use temporary security
+// credentials.
+//
+// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
+// This article discusses web identity federation and shows an example of
+// how to use web identity federation to get access to content in Amazon
+// S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithWebIdentity for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
+// The identity provider (IdP) reported that authentication failed. This might
+// be because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it
+// can also mean that the claim has expired or has been explicitly revoked.
+//
+// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError"
+// The request could not be fulfilled because the non-AWS identity provider
+// (IDP) that was asked to verify the incoming identity token could not be reached.
+// This is often a transient error caused by network conditions. Retry the request
+// a limited number of times so that you don't exceed the request rate. If the
+// error persists, the non-AWS identity provider might be down or not responding.
+//
+// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
+// The web identity token that was passed could not be validated by AWS. Get
+// a new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeExpiredTokenException "ExpiredTokenException"
+// The web identity token that was passed is expired or is not valid. Get a
+// new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
+func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
+ req, out := c.AssumeRoleWithWebIdentityRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithWebIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) {
+ req, out := c.AssumeRoleWithWebIdentityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
+
+// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
+// client's request for the DecodeAuthorizationMessage operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DecodeAuthorizationMessageRequest method.
+// req, resp := client.DecodeAuthorizationMessageRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
+func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
+ op := &request.Operation{
+ Name: opDecodeAuthorizationMessage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DecodeAuthorizationMessageInput{}
+ }
+
+ output = &DecodeAuthorizationMessageOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DecodeAuthorizationMessage API operation for AWS Security Token Service.
+//
+// Decodes additional information about the authorization status of a request
+// from an encoded message returned in response to an AWS request.
+//
+// For example, if a user is not authorized to perform an action that he or
+// she has requested, the request returns a Client.UnauthorizedOperation response
+// (an HTTP 403 response). Some AWS actions additionally return an encoded message
+// that can provide details about this authorization failure.
+//
+// Only certain AWS actions return an encoded authorization message. The documentation
+// for an individual action indicates whether that action returns an encoded
+// message in addition to returning an HTTP code.
+//
+// The message is encoded because the details of the authorization status can
+// constitute privileged information that the user who requested the action
+// should not see. To decode an authorization status message, a user must be
+// granted permissions via an IAM policy to request the DecodeAuthorizationMessage
+// (sts:DecodeAuthorizationMessage) action.
+//
+// The decoded message includes the following type of information:
+//
+// * Whether the request was denied due to an explicit deny or due to the
+// absence of an explicit allow. For more information, see Determining Whether
+// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
+// in the IAM User Guide.
+//
+// * The principal who made the request.
+//
+// * The requested action.
+//
+// * The requested resource.
+//
+// * The values of condition keys in the context of the user's request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation DecodeAuthorizationMessage for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
+// The error returned if the message passed to DecodeAuthorizationMessage was
+// invalid. This can happen if the token contains invalid characters, such as
+// linebreaks.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
+func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
+ req, out := c.DecodeAuthorizationMessageRequest(input)
+ return out, req.Send()
+}
+
+// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DecodeAuthorizationMessage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) {
+ req, out := c.DecodeAuthorizationMessageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetCallerIdentity = "GetCallerIdentity"
+
+// GetCallerIdentityRequest generates a "aws/request.Request" representing the
+// client's request for the GetCallerIdentity operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetCallerIdentity for more information on using the GetCallerIdentity
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetCallerIdentityRequest method.
+// req, resp := client.GetCallerIdentityRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
+func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) {
+ op := &request.Operation{
+ Name: opGetCallerIdentity,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetCallerIdentityInput{}
+ }
+
+ output = &GetCallerIdentityOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetCallerIdentity API operation for AWS Security Token Service.
+//
+// Returns details about the IAM identity whose credentials are used to call
+// the API.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetCallerIdentity for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
+func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) {
+ req, out := c.GetCallerIdentityRequest(input)
+ return out, req.Send()
+}
+
+// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetCallerIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) {
+ req, out := c.GetCallerIdentityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetFederationToken = "GetFederationToken"
+
+// GetFederationTokenRequest generates a "aws/request.Request" representing the
+// client's request for the GetFederationToken operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetFederationToken for more information on using the GetFederationToken
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetFederationTokenRequest method.
+// req, resp := client.GetFederationTokenRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
+func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
+ op := &request.Operation{
+ Name: opGetFederationToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetFederationTokenInput{}
+ }
+
+ output = &GetFederationTokenOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetFederationToken API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) for a federated user.
+// A typical use is in a proxy application that gets temporary security credentials
+// on behalf of distributed applications inside a corporate network. Because
+// you must call the GetFederationToken action using the long-term security
+// credentials of an IAM user, this call is appropriate in contexts where those
+// credentials can be safely stored, usually in a server-based application.
+// For a comparison of GetFederationToken with the other APIs that produce temporary
+// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// If you are creating a mobile-based or browser-based app that can authenticate
+// users using a web identity provider like Login with Amazon, Facebook, Google,
+// or an OpenID Connect-compatible identity provider, we recommend that you
+// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
+// For more information, see Federation Through a Web-based Identity Provider
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+// The GetFederationToken action must be called by using the long-term AWS security
+// credentials of an IAM user. You can also call GetFederationToken using the
+// security credentials of an AWS root account, but we do not recommended it.
+// Instead, we recommend that you create an IAM user for the purpose of the
+// proxy application and then attach a policy to the IAM user that limits federated
+// users to only the actions and resources that they need access to. For more
+// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials that are obtained by using the long-term
+// credentials of an IAM user are valid for the specified duration, from 900
+// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default
+// is 43200 seconds (12 hours). Temporary credentials that are obtained by using
+// AWS root account credentials have a maximum duration of 3600 seconds (1 hour).
+//
+// The temporary security credentials created by GetFederationToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+// * You cannot use these credentials to call any IAM APIs.
+//
+// * You cannot call any STS APIs except GetCallerIdentity.
+//
+// Permissions
+//
+// The permissions for the temporary security credentials returned by GetFederationToken
+// are determined by a combination of the following:
+//
+// * The policy or policies that are attached to the IAM user whose credentials
+// are used to call GetFederationToken.
+//
+// * The policy that is passed as a parameter in the call.
+//
+// The passed policy is attached to the temporary security credentials that
+// result from the GetFederationToken API call--that is, to the federated user.
+// When the federated user makes an AWS request, AWS evaluates the policy attached
+// to the federated user in combination with the policy or policies attached
+// to the IAM user whose credentials were used to call GetFederationToken. AWS
+// allows the federated user's request only when both the federated user and
+// the IAM user are explicitly allowed to perform the requested action. The
+// passed policy cannot grant more permissions than those that are defined in
+// the IAM user policy.
+//
+// A typical use case is that the permissions of the IAM user whose credentials
+// are used to call GetFederationToken are designed to allow access to all the
+// actions and resources that any federated user will need. Then, for individual
+// users, you pass a policy to the operation that scopes down the permissions
+// to a level that's appropriate to that individual user, using a policy that
+// allows only a subset of permissions that are granted to the IAM user.
+//
+// If you do not pass a policy, the resulting temporary security credentials
+// have no effective permissions. The only exception is when the temporary security
+// credentials are used to access a resource that has a resource-based policy
+// that specifically allows the federated user to access the resource.
+//
+// For more information about how permissions work, see Permissions for GetFederationToken
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+// For information about using GetFederationToken to create temporary security
+// credentials, see GetFederationToken—Federation Through a Custom Identity
+// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetFederationToken for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
+func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
+ req, out := c.GetFederationTokenRequest(input)
+ return out, req.Send()
+}
+
+// GetFederationTokenWithContext is the same as GetFederationToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetFederationToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) {
+ req, out := c.GetFederationTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetSessionToken = "GetSessionToken"
+
+// GetSessionTokenRequest generates a "aws/request.Request" representing the
+// client's request for the GetSessionToken operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetSessionToken for more information on using the GetSessionToken
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetSessionTokenRequest method.
+// req, resp := client.GetSessionTokenRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
+func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
+ op := &request.Operation{
+ Name: opGetSessionToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetSessionTokenInput{}
+ }
+
+ output = &GetSessionTokenOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetSessionToken API operation for AWS Security Token Service.
+//
+// Returns a set of temporary credentials for an AWS account or IAM user. The
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use GetSessionToken if you want to use MFA to protect
+// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled
+// IAM users would need to call GetSessionToken and submit an MFA code that
+// is associated with their MFA device. Using the temporary security credentials
+// that are returned from the call, IAM users can then make programmatic calls
+// to APIs that require MFA authentication. If you do not supply a correct MFA
+// code, then the API returns an access denied error. For a comparison of GetSessionToken
+// with the other APIs that produce temporary credentials, see Requesting Temporary
+// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The GetSessionToken action must be called by using the long-term AWS security
+// credentials of the AWS account or an IAM user. Credentials that are created
+// by IAM users are valid for the duration that you specify, from 900 seconds
+// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default
+// of 43200 seconds (12 hours); credentials that are created by using account
+// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600
+// seconds (1 hour), with a default of 1 hour.
+//
+// The temporary security credentials created by GetSessionToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+// * You cannot call any IAM APIs unless MFA authentication information is
+// included in the request.
+//
+// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity.
+//
+// We recommend that you do not call GetSessionToken with root account credentials.
+// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
+// by creating one or more IAM users, giving them the necessary permissions,
+// and using IAM users for everyday interaction with AWS.
+//
+// The permissions associated with the temporary security credentials returned
+// by GetSessionToken are based on the permissions associated with account or
+// IAM user whose credentials are used to call the action. If GetSessionToken
+// is called using root account credentials, the temporary credentials have
+// root account permissions. Similarly, if GetSessionToken is called using the
+// credentials of an IAM user, the temporary credentials have the same permissions
+// as the IAM user.
+//
+// For more information about using GetSessionToken to create temporary credentials,
+// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
+// in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetSessionToken for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
+func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
+ req, out := c.GetSessionTokenRequest(input)
+ return out, req.Send()
+}
+
+// GetSessionTokenWithContext is the same as GetSessionToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetSessionToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) {
+ req, out := c.GetSessionTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+type AssumeRoleInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) up to the maximum session duration setting for the role.
+ // This setting can have a value from 1 hour to 12 hours. If you specify a value
+ // higher than this setting, the operation fails. For example, if you specify
+ // a session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails. To learn how to view the maximum
+ // value for your role, see View the Maximum Session Duration Setting for a
+ // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // A unique identifier that is used by third parties when assuming roles in
+ // their customers' accounts. For each role that the third party can assume,
+ // they should instruct their customers to ensure the role's trust policy checks
+ // for the external ID that the third party generated. Each time the third party
+ // assumes the role, they should pass the customer's external ID. The external
+ // ID is useful in order to help third parties bind a role to the customer who
+ // created it. For more information about the external ID, see How to Use an
+ // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
+ // in the IAM User Guide.
+ //
+ // The regex used to validated this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@:/-
+ ExternalId *string `min:"2" type:"string"`
+
+ // An IAM policy in JSON format.
+ //
+ // This parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both (the intersection of) the access policy of the role that
+ // is being assumed, and the policy that you pass. This gives you a way to further
+ // restrict the permissions for the resulting temporary security credentials.
+ // You cannot use the passed policy to grant permissions that are in excess
+ // of those allowed by the access policy of the role that is being assumed.
+ // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+ // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the IAM User Guide.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the role to assume.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session.
+ //
+ // Use the role session name to uniquely identify a session when the same role
+ // is assumed by different principals or for different reasons. In cross-account
+ // scenarios, the role session name is visible to, and can be logged by the
+ // account that owns the role. The role session name is also used in the ARN
+ // of the assumed role principal. This means that subsequent cross-account API
+ // requests using the temporary security credentials will expose the role session
+ // name to the external account in their CloudTrail logs.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // RoleSessionName is a required field
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests
+ // for MFA). If the role being assumed requires MFA and if the TokenCode value
+ // is missing or expired, the AssumeRole call returns an "access denied" error.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.ExternalId != nil && len(*s.ExternalId) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.RoleSessionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
+ }
+ if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
+ }
+ if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+ invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
+ }
+ if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+ invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetExternalId sets the ExternalId field's value.
+func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput {
+ s.ExternalId = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput {
+ s.Policy = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput {
+ s.RoleSessionName = &v
+ return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput {
+ s.SerialNumber = &v
+ return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput {
+ s.TokenCode = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRole request, including temporary
+// AWS credentials that can be used to make AWS requests.
+type AssumeRoleOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s AssumeRoleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+type AssumeRoleWithSAMLInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. Your role session lasts for
+ // the duration that you specify for the DurationSeconds parameter, or until
+ // the time specified in the SAML authentication response's SessionNotOnOrAfter
+ // value, whichever is shorter. You can provide a DurationSeconds value from
+ // 900 seconds (15 minutes) up to the maximum session duration setting for the
+ // role. This setting can have a value from 1 hour to 12 hours. If you specify
+ // a value higher than this setting, the operation fails. For example, if you
+ // specify a session duration of 12 hours, but your administrator set the maximum
+ // session duration to 6 hours, your operation fails. To learn how to view the
+ // maximum value for your role, see View the Maximum Session Duration Setting
+ // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format.
+ //
+ // The policy parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both the access policy of the role that is being assumed,
+ // and the policy that you pass. This gives you a way to further restrict the
+ // permissions for the resulting temporary security credentials. You cannot
+ // use the passed policy to grant permissions that are in excess of those allowed
+ // by the access policy of the role that is being assumed. For more information,
+ // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
+ // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the IAM User Guide.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
+ // the IdP.
+ //
+ // PrincipalArn is a required field
+ PrincipalArn *string `min:"20" type:"string" required:"true"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // The base-64 encoded SAML authentication response provided by the IdP.
+ //
+ // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
+ // in the Using IAM guide.
+ //
+ // SAMLAssertion is a required field
+ SAMLAssertion *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithSAMLInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.PrincipalArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("PrincipalArn"))
+ }
+ if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.SAMLAssertion == nil {
+ invalidParams.Add(request.NewErrParamRequired("SAMLAssertion"))
+ }
+ if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput {
+ s.Policy = &v
+ return s
+}
+
+// SetPrincipalArn sets the PrincipalArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput {
+ s.PrincipalArn = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetSAMLAssertion sets the SAMLAssertion field's value.
+func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput {
+ s.SAMLAssertion = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRoleWithSAML request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithSAMLOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The identifiers for the temporary security credentials that the operation
+ // returns.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The value of the Recipient attribute of the SubjectConfirmationData element
+ // of the SAML assertion.
+ Audience *string `type:"string"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // The value of the Issuer element of the SAML assertion.
+ Issuer *string `type:"string"`
+
+ // A hash value based on the concatenation of the Issuer response value, the
+ // AWS account ID, and the friendly name (the last part of the ARN) of the SAML
+ // provider in IAM. The combination of NameQualifier and Subject can be used
+ // to uniquely identify a federated user.
+ //
+ // The following pseudocode shows how the hash value is calculated:
+ //
+ // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
+ // ) )
+ NameQualifier *string `type:"string"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The value of the NameID element in the Subject element of the SAML assertion.
+ Subject *string `type:"string"`
+
+ // The format of the name ID, as defined by the Format attribute in the NameID
+ // element of the SAML assertion. Typical examples of the format are transient
+ // or persistent.
+ //
+ // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
+ // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient
+ // is returned as transient. If the format includes any other prefix, the format
+ // is returned with no modifications.
+ SubjectType *string `type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput {
+ s.Audience = &v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetIssuer sets the Issuer field's value.
+func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput {
+ s.Issuer = &v
+ return s
+}
+
+// SetNameQualifier sets the NameQualifier field's value.
+func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput {
+ s.NameQualifier = &v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// SetSubject sets the Subject field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput {
+ s.Subject = &v
+ return s
+}
+
+// SetSubjectType sets the SubjectType field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput {
+ s.SubjectType = &v
+ return s
+}
+
+type AssumeRoleWithWebIdentityInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) up to the maximum session duration setting for the role.
+ // This setting can have a value from 1 hour to 12 hours. If you specify a value
+ // higher than this setting, the operation fails. For example, if you specify
+ // a session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails. To learn how to view the maximum
+ // value for your role, see View the Maximum Session Duration Setting for a
+ // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format.
+ //
+ // The policy parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both the access policy of the role that is being assumed,
+ // and the policy that you pass. This gives you a way to further restrict the
+ // permissions for the resulting temporary security credentials. You cannot
+ // use the passed policy to grant permissions that are in excess of those allowed
+ // by the access policy of the role that is being assumed. For more information,
+ // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the IAM User Guide.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The fully qualified host component of the domain name of the identity provider.
+ //
+ // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
+ // and graph.facebook.com are the only supported identity providers for OAuth
+ // 2.0 access tokens. Do not include URL schemes and port numbers.
+ //
+ // Do not specify this value for OpenID Connect ID tokens.
+ ProviderId *string `min:"4" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session. Typically, you pass the name
+ // or identifier that is associated with the user who is using your application.
+ // That way, the temporary security credentials that your application will use
+ // are associated with that user. This session name is included as part of the
+ // ARN and assumed role ID in the AssumedRoleUser response element.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // RoleSessionName is a required field
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The OAuth 2.0 access token or OpenID Connect ID token that is provided by
+ // the identity provider. Your application must get this token by authenticating
+ // the user who is using your application with a web identity provider before
+ // the application makes an AssumeRoleWithWebIdentity call.
+ //
+ // WebIdentityToken is a required field
+ WebIdentityToken *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithWebIdentityInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.ProviderId != nil && len(*s.ProviderId) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.RoleSessionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
+ }
+ if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
+ }
+ if s.WebIdentityToken == nil {
+ invalidParams.Add(request.NewErrParamRequired("WebIdentityToken"))
+ }
+ if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput {
+ s.Policy = &v
+ return s
+}
+
+// SetProviderId sets the ProviderId field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput {
+ s.ProviderId = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput {
+ s.RoleSessionName = &v
+ return s
+}
+
+// SetWebIdentityToken sets the WebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput {
+ s.WebIdentityToken = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRoleWithWebIdentity request,
+// including temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithWebIdentityOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The intended audience (also known as client ID) of the web identity token.
+ // This is traditionally the client identifier issued to the application that
+ // requested the web identity token.
+ Audience *string `type:"string"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The issuing authority of the web identity token presented. For OpenID Connect
+ // ID Tokens this contains the value of the iss field. For OAuth 2.0 access
+ // tokens, this contains the value of the ProviderId parameter that was passed
+ // in the AssumeRoleWithWebIdentity request.
+ Provider *string `type:"string"`
+
+ // The unique user identifier that is returned by the identity provider. This
+ // identifier is associated with the WebIdentityToken that was submitted with
+ // the AssumeRoleWithWebIdentity call. The identifier is typically unique to
+ // the user and the application that acquired the WebIdentityToken (pairwise
+ // identifier). For OpenID Connect ID tokens, this field contains the value
+ // returned by the identity provider as the token's sub (Subject) claim.
+ SubjectFromWebIdentityToken *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput {
+ s.Audience = &v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// SetProvider sets the Provider field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput {
+ s.Provider = &v
+ return s
+}
+
+// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput {
+ s.SubjectFromWebIdentityToken = &v
+ return s
+}
+
+// The identifiers for the temporary security credentials that the operation
+// returns.
+type AssumedRoleUser struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN of the temporary security credentials that are returned from the
+ // AssumeRole action. For more information about ARNs and how to use them in
+ // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ //
+ // Arn is a required field
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // A unique identifier that contains the role ID and the role session name of
+ // the role that is being assumed. The role ID is generated by AWS when the
+ // role is created.
+ //
+ // AssumedRoleId is a required field
+ AssumedRoleId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumedRoleUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumedRoleUser) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser {
+ s.Arn = &v
+ return s
+}
+
+// SetAssumedRoleId sets the AssumedRoleId field's value.
+func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser {
+ s.AssumedRoleId = &v
+ return s
+}
+
+// AWS credentials for API authentication.
+type Credentials struct {
+ _ struct{} `type:"structure"`
+
+ // The access key ID that identifies the temporary security credentials.
+ //
+ // AccessKeyId is a required field
+ AccessKeyId *string `min:"16" type:"string" required:"true"`
+
+ // The date on which the current credentials expire.
+ //
+ // Expiration is a required field
+ Expiration *time.Time `type:"timestamp" required:"true"`
+
+ // The secret access key that can be used to sign requests.
+ //
+ // SecretAccessKey is a required field
+ SecretAccessKey *string `type:"string" required:"true"`
+
+ // The token that users must pass to the service API to use the temporary credentials.
+ //
+ // SessionToken is a required field
+ SessionToken *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Credentials) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Credentials) GoString() string {
+ return s.String()
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *Credentials) SetAccessKeyId(v string) *Credentials {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *Credentials) SetExpiration(v time.Time) *Credentials {
+ s.Expiration = &v
+ return s
+}
+
+// SetSecretAccessKey sets the SecretAccessKey field's value.
+func (s *Credentials) SetSecretAccessKey(v string) *Credentials {
+ s.SecretAccessKey = &v
+ return s
+}
+
+// SetSessionToken sets the SessionToken field's value.
+func (s *Credentials) SetSessionToken(v string) *Credentials {
+ s.SessionToken = &v
+ return s
+}
+
+type DecodeAuthorizationMessageInput struct {
+ _ struct{} `type:"structure"`
+
+ // The encoded message that was returned with the response.
+ //
+ // EncodedMessage is a required field
+ EncodedMessage *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DecodeAuthorizationMessageInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"}
+ if s.EncodedMessage == nil {
+ invalidParams.Add(request.NewErrParamRequired("EncodedMessage"))
+ }
+ if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEncodedMessage sets the EncodedMessage field's value.
+func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput {
+ s.EncodedMessage = &v
+ return s
+}
+
+// A document that contains additional information about the authorization status
+// of a request from an encoded message that is returned in response to an AWS
+// request.
+type DecodeAuthorizationMessageOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An XML document that contains the decoded message.
+ DecodedMessage *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageOutput) GoString() string {
+ return s.String()
+}
+
+// SetDecodedMessage sets the DecodedMessage field's value.
+func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput {
+ s.DecodedMessage = &v
+ return s
+}
+
+// Identifiers for the federated user that is associated with the credentials.
+type FederatedUser struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN that specifies the federated user that is associated with the credentials.
+ // For more information about ARNs and how to use them in policies, see IAM
+ // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ //
+ // Arn is a required field
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // The string that identifies the federated user associated with the credentials,
+ // similar to the unique ID of an IAM user.
+ //
+ // FederatedUserId is a required field
+ FederatedUserId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s FederatedUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FederatedUser) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *FederatedUser) SetArn(v string) *FederatedUser {
+ s.Arn = &v
+ return s
+}
+
+// SetFederatedUserId sets the FederatedUserId field's value.
+func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
+ s.FederatedUserId = &v
+ return s
+}
+
+type GetCallerIdentityInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityInput) GoString() string {
+ return s.String()
+}
+
+// Contains the response to a successful GetCallerIdentity request, including
+// information about the entity making the request.
+type GetCallerIdentityOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The AWS account ID number of the account that owns or contains the calling
+ // entity.
+ Account *string `type:"string"`
+
+ // The AWS ARN associated with the calling entity.
+ Arn *string `min:"20" type:"string"`
+
+ // The unique identifier of the calling entity. The exact value depends on the
+ // type of entity making the call. The values returned are those listed in the
+ // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
+ // found on the Policy Variables reference page in the IAM User Guide.
+ UserId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccount sets the Account field's value.
+func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput {
+ s.Account = &v
+ return s
+}
+
+// SetArn sets the Arn field's value.
+func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput {
+ s.Arn = &v
+ return s
+}
+
+// SetUserId sets the UserId field's value.
+func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput {
+ s.UserId = &v
+ return s
+}
+
+type GetFederationTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, that the session should last. Acceptable durations
+ // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds
+ // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained
+ // using AWS account (root) credentials are restricted to a maximum of 3600
+ // seconds (one hour). If the specified duration is longer than one hour, the
+ // session obtained by using AWS account (root) credentials defaults to one
+ // hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The name of the federated user. The name is used as an identifier for the
+ // temporary security credentials (such as Bob). For example, you can reference
+ // the federated user name in a resource-based policy, such as in an Amazon
+ // S3 bucket policy.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // Name is a required field
+ Name *string `min:"2" type:"string" required:"true"`
+
+ // An IAM policy in JSON format that is passed with the GetFederationToken call
+ // and evaluated along with the policy or policies that are attached to the
+ // IAM user whose credentials are used to call GetFederationToken. The passed
+ // policy is used to scope down the permissions that are available to the IAM
+ // user, by allowing only a subset of the permissions that are granted to the
+ // IAM user. The passed policy cannot grant more permissions than those granted
+ // to the IAM user. The final permissions for the federated user are the most
+ // restrictive set based on the intersection of the passed policy and the IAM
+ // user policy.
+ //
+ // If you do not pass a policy, the resulting temporary security credentials
+ // have no effective permissions. The only exception is when the temporary security
+ // credentials are used to access a resource that has a resource-based policy
+ // that specifically allows the federated user to access the resource.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ //
+ // For more information about how permissions work, see Permissions for GetFederationToken
+ // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+ Policy *string `min:"1" type:"string"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetFederationTokenInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Name == nil {
+ invalidParams.Add(request.NewErrParamRequired("Name"))
+ }
+ if s.Name != nil && len(*s.Name) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("Name", 2))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput {
+ s.Name = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
+ s.Policy = &v
+ return s
+}
+
+// Contains the response to a successful GetFederationToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetFederationTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // Identifiers for the federated user associated with the credentials (such
+ // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You
+ // can use the federated user's ARN in your resource-based policies, such as
+ // an Amazon S3 bucket policy.
+ FederatedUser *FederatedUser `type:"structure"`
+
+ // A percentage value indicating the size of the policy in packed form. The
+ // service rejects policies for which the packed size is greater than 100 percent
+ // of the allowed value.
+ PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetFederatedUser sets the FederatedUser field's value.
+func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput {
+ s.FederatedUser = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+type GetSessionTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, that the credentials should remain valid. Acceptable
+ // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600
+ // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions
+ // for AWS account owners are restricted to a maximum of 3600 seconds (one hour).
+ // If the duration is longer than one hour, the session for AWS account owners
+ // defaults to one hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The identification number of the MFA device that is associated with the IAM
+ // user who is making the GetSessionToken call. Specify this value if the IAM
+ // user has a policy that requires MFA authentication. The value is either the
+ // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource
+ // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ // You can find the device for an IAM user by going to the AWS Management Console
+ // and viewing the user's security credentials.
+ //
+ // The regex used to validated this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@:/-
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if MFA is required. If any policy requires
+ // the IAM user to submit an MFA code, specify this value. If MFA authentication
+ // is required, and the user does not provide a code when requesting a set of
+ // temporary security credentials, the user will receive an "access denied"
+ // response when requesting resources that require MFA authentication.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetSessionTokenInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+ invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
+ }
+ if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+ invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput {
+ s.SerialNumber = &v
+ return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput {
+ s.TokenCode = &v
+ return s
+}
+
+// Contains the response to a successful GetSessionToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetSessionTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput {
+ s.Credentials = v
+ return s
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
new file mode 100755
index 0000000..ef681ab
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
@@ -0,0 +1,72 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package sts provides the client and types for making API
+// requests to AWS Security Token Service.
+//
+// The AWS Security Token Service (STS) is a web service that enables you to
+// request temporary, limited-privilege credentials for AWS Identity and Access
+// Management (IAM) users or for users that you authenticate (federated users).
+// This guide provides descriptions of the STS API. For more detailed information
+// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+//
+// As an alternative to using the API, you can use one of the AWS SDKs, which
+// consist of libraries and sample code for various programming languages and
+// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
+// way to create programmatic access to STS. For example, the SDKs take care
+// of cryptographically signing requests, managing errors, and retrying requests
+// automatically. For information about the AWS SDKs, including how to download
+// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
+//
+// For information about setting up signatures and authorization through the
+// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
+// in the AWS General Reference. For general information about the Query API,
+// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
+// in Using IAM. For information about using security tokens with other AWS
+// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
+// in the IAM User Guide.
+//
+// If you're new to AWS and need additional technical information about a specific
+// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
+// (http://aws.amazon.com/documentation/).
+//
+// Endpoints
+//
+// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
+// that maps to the US East (N. Virginia) region. Additional regions are available
+// and are activated by default. For more information, see Activating and Deactivating
+// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
+// in the AWS General Reference.
+//
+// Recording API requests
+//
+// STS supports AWS CloudTrail, which is a service that records AWS calls for
+// your AWS account and delivers log files to an Amazon S3 bucket. By using
+// information collected by CloudTrail, you can determine what requests were
+// successfully made to STS, who made the request, when it was made, and so
+// on. To learn more about CloudTrail, including how to turn it on and find
+// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
+//
+// See sts package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
+//
+// Using the Client
+//
+// To contact AWS Security Token Service with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the AWS Security Token Service client STS for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
+package sts
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
new file mode 100755
index 0000000..e24884e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
@@ -0,0 +1,73 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+const (
+
+ // ErrCodeExpiredTokenException for service response error code
+ // "ExpiredTokenException".
+ //
+ // The web identity token that was passed is expired or is not valid. Get a
+ // new identity token from the identity provider and then retry the request.
+ ErrCodeExpiredTokenException = "ExpiredTokenException"
+
+ // ErrCodeIDPCommunicationErrorException for service response error code
+ // "IDPCommunicationError".
+ //
+ // The request could not be fulfilled because the non-AWS identity provider
+ // (IDP) that was asked to verify the incoming identity token could not be reached.
+ // This is often a transient error caused by network conditions. Retry the request
+ // a limited number of times so that you don't exceed the request rate. If the
+ // error persists, the non-AWS identity provider might be down or not responding.
+ ErrCodeIDPCommunicationErrorException = "IDPCommunicationError"
+
+ // ErrCodeIDPRejectedClaimException for service response error code
+ // "IDPRejectedClaim".
+ //
+ // The identity provider (IdP) reported that authentication failed. This might
+ // be because the claim is invalid.
+ //
+ // If this error is returned for the AssumeRoleWithWebIdentity operation, it
+ // can also mean that the claim has expired or has been explicitly revoked.
+ ErrCodeIDPRejectedClaimException = "IDPRejectedClaim"
+
+ // ErrCodeInvalidAuthorizationMessageException for service response error code
+ // "InvalidAuthorizationMessageException".
+ //
+ // The error returned if the message passed to DecodeAuthorizationMessage was
+ // invalid. This can happen if the token contains invalid characters, such as
+ // linebreaks.
+ ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException"
+
+ // ErrCodeInvalidIdentityTokenException for service response error code
+ // "InvalidIdentityToken".
+ //
+ // The web identity token that was passed could not be validated by AWS. Get
+ // a new identity token from the identity provider and then retry the request.
+ ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken"
+
+ // ErrCodeMalformedPolicyDocumentException for service response error code
+ // "MalformedPolicyDocument".
+ //
+ // The request was rejected because the policy document was malformed. The error
+ // message describes the specific error.
+ ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument"
+
+ // ErrCodePackedPolicyTooLargeException for service response error code
+ // "PackedPolicyTooLarge".
+ //
+ // The request was rejected because the policy document was too large. The error
+ // message describes how big the policy document is, in packed form, as a percentage
+ // of what the API allows.
+ ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge"
+
+ // ErrCodeRegionDisabledException for service response error code
+ // "RegionDisabledException".
+ //
+ // STS is not activated in the requested region for the account that is being
+ // asked to generate credentials. The account administrator must use the IAM
+ // console to activate STS in that region. For more information, see Activating
+ // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+ // in the IAM User Guide.
+ ErrCodeRegionDisabledException = "RegionDisabledException"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
new file mode 100755
index 0000000..185c914
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
@@ -0,0 +1,95 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+)
+
+// STS provides the API operation methods for making requests to
+// AWS Security Token Service. See this package's package overview docs
+// for details on the service.
+//
+// STS methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type STS struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "sts" // Name of service.
+ EndpointsID = ServiceName // ID to lookup a service endpoint with.
+ ServiceID = "STS" // ServiceID is a unique identifer of a specific service.
+)
+
+// New creates a new instance of the STS client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a STS client from just a session.
+// svc := sts.New(mySession)
+//
+// // Create a STS client with additional configuration
+// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS {
+ svc := &STS{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2011-06-15",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(query.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a STS operation and runs any
+// custom request initialization.
+func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/boltdb/bolt/LICENSE
new file mode 100755
index 0000000..004e77f
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Ben Johnson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/boltdb/bolt/Makefile b/vendor/github.com/boltdb/bolt/Makefile
new file mode 100755
index 0000000..e035e63
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/Makefile
@@ -0,0 +1,18 @@
+BRANCH=`git rev-parse --abbrev-ref HEAD`
+COMMIT=`git rev-parse --short HEAD`
+GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
+
+default: build
+
+race:
+ @go test -v -race -test.run="TestSimulate_(100op|1000op)"
+
+# go get github.com/kisielk/errcheck
+errcheck:
+ @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
+
+test:
+ @go test -v -cover .
+ @go test -v ./cmd/bolt
+
+.PHONY: fmt test
diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/boltdb/bolt/README.md
new file mode 100755
index 0000000..fc2a1e0
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/README.md
@@ -0,0 +1,935 @@
+Bolt [](https://coveralls.io/r/boltdb/bolt?branch=master) [](https://godoc.org/github.com/boltdb/bolt) 
+====
+
+Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
+[LMDB project][lmdb]. The goal of the project is to provide a simple,
+fast, and reliable database for projects that don't require a full database
+server such as Postgres or MySQL.
+
+Since Bolt is meant to be used as such a low-level piece of functionality,
+simplicity is key. The API will be small and only focus on getting values
+and setting values. That's it.
+
+[hyc_symas]: https://twitter.com/hyc_symas
+[lmdb]: http://symas.com/mdb/
+
+## Project Status
+
+Bolt is stable, the API is fixed, and the file format is fixed. Full unit
+test coverage and randomized black box testing are used to ensure database
+consistency and thread safety. Bolt is currently used in high-load production
+environments serving databases as large as 1TB. Many companies such as
+Shopify and Heroku use Bolt-backed services every day.
+
+## A message from the author
+
+> The original goal of Bolt was to provide a simple pure Go key/value store and to
+> not bloat the code with extraneous features. To that end, the project has been
+> a success. However, this limited scope also means that the project is complete.
+>
+> Maintaining an open source database requires an immense amount of time and energy.
+> Changes to the code can have unintended and sometimes catastrophic effects so
+> even simple changes require hours and hours of careful testing and validation.
+>
+> Unfortunately I no longer have the time or energy to continue this work. Bolt is
+> in a stable state and has years of successful production use. As such, I feel that
+> leaving it in its current state is the most prudent course of action.
+>
+> If you are interested in using a more featureful version of Bolt, I suggest that
+> you look at the CoreOS fork called [bbolt](https://github.com/coreos/bbolt).
+
+- Ben Johnson ([@benbjohnson](https://twitter.com/benbjohnson))
+
+## Table of Contents
+
+- [Getting Started](#getting-started)
+ - [Installing](#installing)
+ - [Opening a database](#opening-a-database)
+ - [Transactions](#transactions)
+ - [Read-write transactions](#read-write-transactions)
+ - [Read-only transactions](#read-only-transactions)
+ - [Batch read-write transactions](#batch-read-write-transactions)
+ - [Managing transactions manually](#managing-transactions-manually)
+ - [Using buckets](#using-buckets)
+ - [Using key/value pairs](#using-keyvalue-pairs)
+ - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
+ - [Iterating over keys](#iterating-over-keys)
+ - [Prefix scans](#prefix-scans)
+ - [Range scans](#range-scans)
+ - [ForEach()](#foreach)
+ - [Nested buckets](#nested-buckets)
+ - [Database backups](#database-backups)
+ - [Statistics](#statistics)
+ - [Read-Only Mode](#read-only-mode)
+ - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
+- [Resources](#resources)
+- [Comparison with other databases](#comparison-with-other-databases)
+ - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
+ - [LevelDB, RocksDB](#leveldb-rocksdb)
+ - [LMDB](#lmdb)
+- [Caveats & Limitations](#caveats--limitations)
+- [Reading the Source](#reading-the-source)
+- [Other Projects Using Bolt](#other-projects-using-bolt)
+
+## Getting Started
+
+### Installing
+
+To start using Bolt, install Go and run `go get`:
+
+```sh
+$ go get github.com/boltdb/bolt/...
+```
+
+This will retrieve the library and install the `bolt` command line utility into
+your `$GOBIN` path.
+
+
+### Opening a database
+
+The top-level object in Bolt is a `DB`. It is represented as a single file on
+your disk and represents a consistent snapshot of your data.
+
+To open your database, simply use the `bolt.Open()` function:
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/boltdb/bolt"
+)
+
+func main() {
+ // Open the my.db data file in your current directory.
+ // It will be created if it doesn't exist.
+ db, err := bolt.Open("my.db", 0600, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer db.Close()
+
+ ...
+}
+```
+
+Please note that Bolt obtains a file lock on the data file so multiple processes
+cannot open the same database at the same time. Opening an already open Bolt
+database will cause it to hang until the other process closes it. To prevent
+an indefinite wait you can pass a timeout option to the `Open()` function:
+
+```go
+db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
+```
+
+
+### Transactions
+
+Bolt allows only one read-write transaction at a time but allows as many
+read-only transactions as you want at a time. Each transaction has a consistent
+view of the data as it existed when the transaction started.
+
+Individual transactions and all objects created from them (e.g. buckets, keys)
+are not thread safe. To work with data in multiple goroutines you must start
+a transaction for each one or use locking to ensure only one goroutine accesses
+a transaction at a time. Creating transaction from the `DB` is thread safe.
+
+Read-only transactions and read-write transactions should not depend on one
+another and generally shouldn't be opened simultaneously in the same goroutine.
+This can cause a deadlock as the read-write transaction needs to periodically
+re-map the data file but it cannot do so while a read-only transaction is open.
+
+
+#### Read-write transactions
+
+To start a read-write transaction, you can use the `DB.Update()` function:
+
+```go
+err := db.Update(func(tx *bolt.Tx) error {
+ ...
+ return nil
+})
+```
+
+Inside the closure, you have a consistent view of the database. You commit the
+transaction by returning `nil` at the end. You can also rollback the transaction
+at any point by returning an error. All database operations are allowed inside
+a read-write transaction.
+
+Always check the return error as it will report any disk failures that can cause
+your transaction to not complete. If you return an error within your closure
+it will be passed through.
+
+
+#### Read-only transactions
+
+To start a read-only transaction, you can use the `DB.View()` function:
+
+```go
+err := db.View(func(tx *bolt.Tx) error {
+ ...
+ return nil
+})
+```
+
+You also get a consistent view of the database within this closure, however,
+no mutating operations are allowed within a read-only transaction. You can only
+retrieve buckets, retrieve values, and copy the database within a read-only
+transaction.
+
+
+#### Batch read-write transactions
+
+Each `DB.Update()` waits for disk to commit the writes. This overhead
+can be minimized by combining multiple updates with the `DB.Batch()`
+function:
+
+```go
+err := db.Batch(func(tx *bolt.Tx) error {
+ ...
+ return nil
+})
+```
+
+Concurrent Batch calls are opportunistically combined into larger
+transactions. Batch is only useful when there are multiple goroutines
+calling it.
+
+The trade-off is that `Batch` can call the given
+function multiple times, if parts of the transaction fail. The
+function must be idempotent and side effects must take effect only
+after a successful return from `DB.Batch()`.
+
+For example: don't display messages from inside the function, instead
+set variables in the enclosing scope:
+
+```go
+var id uint64
+err := db.Batch(func(tx *bolt.Tx) error {
+ // Find last key in bucket, decode as bigendian uint64, increment
+ // by one, encode back to []byte, and add new key.
+ ...
+ id = newValue
+ return nil
+})
+if err != nil {
+ return ...
+}
+fmt.Println("Allocated ID %d", id)
+```
+
+
+#### Managing transactions manually
+
+The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
+function. These helper functions will start the transaction, execute a function,
+and then safely close your transaction if an error is returned. This is the
+recommended way to use Bolt transactions.
+
+However, sometimes you may want to manually start and end your transactions.
+You can use the `DB.Begin()` function directly but **please** be sure to close
+the transaction.
+
+```go
+// Start a writable transaction.
+tx, err := db.Begin(true)
+if err != nil {
+ return err
+}
+defer tx.Rollback()
+
+// Use the transaction...
+_, err := tx.CreateBucket([]byte("MyBucket"))
+if err != nil {
+ return err
+}
+
+// Commit the transaction and check for error.
+if err := tx.Commit(); err != nil {
+ return err
+}
+```
+
+The first argument to `DB.Begin()` is a boolean stating if the transaction
+should be writable.
+
+
+### Using buckets
+
+Buckets are collections of key/value pairs within the database. All keys in a
+bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
+function:
+
+```go
+db.Update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucket([]byte("MyBucket"))
+ if err != nil {
+ return fmt.Errorf("create bucket: %s", err)
+ }
+ return nil
+})
+```
+
+You can also create a bucket only if it doesn't exist by using the
+`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
+function for all your top-level buckets after you open your database so you can
+guarantee that they exist for future transactions.
+
+To delete a bucket, simply call the `Tx.DeleteBucket()` function.
+
+
+### Using key/value pairs
+
+To save a key/value pair to a bucket, use the `Bucket.Put()` function:
+
+```go
+db.Update(func(tx *bolt.Tx) error {
+ b := tx.Bucket([]byte("MyBucket"))
+ err := b.Put([]byte("answer"), []byte("42"))
+ return err
+})
+```
+
+This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
+bucket. To retrieve this value, we can use the `Bucket.Get()` function:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ b := tx.Bucket([]byte("MyBucket"))
+ v := b.Get([]byte("answer"))
+ fmt.Printf("The answer is: %s\n", v)
+ return nil
+})
+```
+
+The `Get()` function does not return an error because its operation is
+guaranteed to work (unless there is some kind of system failure). If the key
+exists then it will return its byte slice value. If it doesn't exist then it
+will return `nil`. It's important to note that you can have a zero-length value
+set to a key which is different than the key not existing.
+
+Use the `Bucket.Delete()` function to delete a key from the bucket.
+
+Please note that values returned from `Get()` are only valid while the
+transaction is open. If you need to use a value outside of the transaction
+then you must use `copy()` to copy it to another byte slice.
+
+
+### Autoincrementing integer for the bucket
+By using the `NextSequence()` function, you can let Bolt determine a sequence
+which can be used as the unique identifier for your key/value pairs. See the
+example below.
+
+```go
+// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
+func (s *Store) CreateUser(u *User) error {
+ return s.db.Update(func(tx *bolt.Tx) error {
+ // Retrieve the users bucket.
+ // This should be created when the DB is first opened.
+ b := tx.Bucket([]byte("users"))
+
+ // Generate ID for the user.
+ // This returns an error only if the Tx is closed or not writeable.
+ // That can't happen in an Update() call so I ignore the error check.
+ id, _ := b.NextSequence()
+ u.ID = int(id)
+
+ // Marshal user data into bytes.
+ buf, err := json.Marshal(u)
+ if err != nil {
+ return err
+ }
+
+ // Persist bytes to users bucket.
+ return b.Put(itob(u.ID), buf)
+ })
+}
+
+// itob returns an 8-byte big endian representation of v.
+func itob(v int) []byte {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(v))
+ return b
+}
+
+type User struct {
+ ID int
+ ...
+}
+```
+
+### Iterating over keys
+
+Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
+iteration over these keys extremely fast. To iterate over keys we'll use a
+`Cursor`:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume bucket exists and has keys
+ b := tx.Bucket([]byte("MyBucket"))
+
+ c := b.Cursor()
+
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ }
+
+ return nil
+})
+```
+
+The cursor allows you to move to a specific point in the list of keys and move
+forward or backward through the keys one at a time.
+
+The following functions are available on the cursor:
+
+```
+First() Move to the first key.
+Last() Move to the last key.
+Seek() Move to a specific key.
+Next() Move to the next key.
+Prev() Move to the previous key.
+```
+
+Each of those functions has a return signature of `(key []byte, value []byte)`.
+When you have iterated to the end of the cursor then `Next()` will return a
+`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
+before calling `Next()` or `Prev()`. If you do not seek to a position then
+these functions will return a `nil` key.
+
+During iteration, if the key is non-`nil` but the value is `nil`, that means
+the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
+access the sub-bucket.
+
+
+#### Prefix scans
+
+To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume bucket exists and has keys
+ c := tx.Bucket([]byte("MyBucket")).Cursor()
+
+ prefix := []byte("1234")
+ for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ }
+
+ return nil
+})
+```
+
+#### Range scans
+
+Another common use case is scanning over a range such as a time range. If you
+use a sortable time encoding such as RFC3339 then you can query a specific
+date range like this:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume our events bucket exists and has RFC3339 encoded time keys.
+ c := tx.Bucket([]byte("Events")).Cursor()
+
+ // Our time range spans the 90's decade.
+ min := []byte("1990-01-01T00:00:00Z")
+ max := []byte("2000-01-01T00:00:00Z")
+
+ // Iterate over the 90's.
+ for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
+ fmt.Printf("%s: %s\n", k, v)
+ }
+
+ return nil
+})
+```
+
+Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
+
+
+#### ForEach()
+
+You can also use the function `ForEach()` if you know you'll be iterating over
+all the keys in a bucket:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume bucket exists and has keys
+ b := tx.Bucket([]byte("MyBucket"))
+
+ b.ForEach(func(k, v []byte) error {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ return nil
+ })
+ return nil
+})
+```
+
+Please note that keys and values in `ForEach()` are only valid while
+the transaction is open. If you need to use a key or value outside of
+the transaction, you must use `copy()` to copy it to another byte
+slice.
+
+### Nested buckets
+
+You can also store a bucket in a key to create nested buckets. The API is the
+same as the bucket management API on the `DB` object:
+
+```go
+func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
+func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
+func (*Bucket) DeleteBucket(key []byte) error
+```
+
+Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
+
+```go
+
+// createUser creates a new user in the given account.
+func createUser(accountID int, u *User) error {
+ // Start the transaction.
+ tx, err := db.Begin(true)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ // Retrieve the root bucket for the account.
+ // Assume this has already been created when the account was set up.
+ root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
+
+ // Setup the users bucket.
+ bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
+ if err != nil {
+ return err
+ }
+
+ // Generate an ID for the new user.
+ userID, err := bkt.NextSequence()
+ if err != nil {
+ return err
+ }
+ u.ID = userID
+
+ // Marshal and save the encoded user.
+ if buf, err := json.Marshal(u); err != nil {
+ return err
+ } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
+ return err
+ }
+
+ // Commit the transaction.
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+```
+
+
+
+
+### Database backups
+
+Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
+function to write a consistent view of the database to a writer. If you call
+this from a read-only transaction, it will perform a hot backup and not block
+your other database reads and writes.
+
+By default, it will use a regular file handle which will utilize the operating
+system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
+documentation for information about optimizing for larger-than-RAM datasets.
+
+One common use case is to backup over HTTP so you can use tools like `cURL` to
+do database backups:
+
+```go
+func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
+ err := db.View(func(tx *bolt.Tx) error {
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
+ w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
+ _, err := tx.WriteTo(w)
+ return err
+ })
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+```
+
+Then you can backup using this command:
+
+```sh
+$ curl http://localhost/backup > my.db
+```
+
+Or you can open your browser to `http://localhost/backup` and it will download
+automatically.
+
+If you want to backup to another file you can use the `Tx.CopyFile()` helper
+function.
+
+
+### Statistics
+
+The database keeps a running count of many of the internal operations it
+performs so you can better understand what's going on. By grabbing a snapshot
+of these stats at two points in time we can see what operations were performed
+in that time range.
+
+For example, we could start a goroutine to log stats every 10 seconds:
+
+```go
+go func() {
+ // Grab the initial stats.
+ prev := db.Stats()
+
+ for {
+ // Wait for 10s.
+ time.Sleep(10 * time.Second)
+
+ // Grab the current stats and diff them.
+ stats := db.Stats()
+ diff := stats.Sub(&prev)
+
+ // Encode stats to JSON and print to STDERR.
+ json.NewEncoder(os.Stderr).Encode(diff)
+
+ // Save stats for the next loop.
+ prev = stats
+ }
+}()
+```
+
+It's also useful to pipe these stats to a service such as statsd for monitoring
+or to provide an HTTP endpoint that will perform a fixed-length sample.
+
+
+### Read-Only Mode
+
+Sometimes it is useful to create a shared, read-only Bolt database. To this,
+set the `Options.ReadOnly` flag when opening your database. Read-only mode
+uses a shared lock to allow multiple processes to read from the database but
+it will block any processes from opening the database in read-write mode.
+
+```go
+db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+### Mobile Use (iOS/Android)
+
+Bolt is able to run on mobile devices by leveraging the binding feature of the
+[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
+contain your database logic and a reference to a `*bolt.DB` with a initializing
+constructor that takes in a filepath where the database file will be stored.
+Neither Android nor iOS require extra permissions or cleanup from using this method.
+
+```go
+func NewBoltDB(filepath string) *BoltDB {
+ db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ return &BoltDB{db}
+}
+
+type BoltDB struct {
+ db *bolt.DB
+ ...
+}
+
+func (b *BoltDB) Path() string {
+ return b.db.Path()
+}
+
+func (b *BoltDB) Close() {
+ b.db.Close()
+}
+```
+
+Database logic should be defined as methods on this wrapper struct.
+
+To initialize this struct from the native language (both platforms now sync
+their local storage to the cloud. These snippets disable that functionality for the
+database file):
+
+#### Android
+
+```java
+String path;
+if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
+ path = getNoBackupFilesDir().getAbsolutePath();
+} else{
+ path = getFilesDir().getAbsolutePath();
+}
+Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
+```
+
+#### iOS
+
+```objc
+- (void)demo {
+ NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
+ NSUserDomainMask,
+ YES) objectAtIndex:0];
+ GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
+ [self addSkipBackupAttributeToItemAtPath:demo.path];
+ //Some DB Logic would go here
+ [demo close];
+}
+
+- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
+{
+ NSURL* URL= [NSURL fileURLWithPath: filePathString];
+ assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
+
+ NSError *error = nil;
+ BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
+ forKey: NSURLIsExcludedFromBackupKey error: &error];
+ if(!success){
+ NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
+ }
+ return success;
+}
+
+```
+
+## Resources
+
+For more information on getting started with Bolt, check out the following articles:
+
+* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
+* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
+
+
+## Comparison with other databases
+
+### Postgres, MySQL, & other relational databases
+
+Relational databases structure data into rows and are only accessible through
+the use of SQL. This approach provides flexibility in how you store and query
+your data but also incurs overhead in parsing and planning SQL statements. Bolt
+accesses all data by a byte slice key. This makes Bolt fast to read and write
+data by key but provides no built-in support for joining values together.
+
+Most relational databases (with the exception of SQLite) are standalone servers
+that run separately from your application. This gives your systems
+flexibility to connect multiple application servers to a single database
+server but also adds overhead in serializing and transporting data over the
+network. Bolt runs as a library included in your application so all data access
+has to go through your application's process. This brings data closer to your
+application but limits multi-process access to the data.
+
+
+### LevelDB, RocksDB
+
+LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
+they are libraries bundled into the application, however, their underlying
+structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
+random writes by using a write ahead log and multi-tiered, sorted files called
+SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
+have trade-offs.
+
+If you require a high random write throughput (>10,000 w/sec) or you need to use
+spinning disks then LevelDB could be a good choice. If your application is
+read-heavy or does a lot of range scans then Bolt could be a good choice.
+
+One other important consideration is that LevelDB does not have transactions.
+It supports batch writing of key/values pairs and it supports read snapshots
+but it will not give you the ability to do a compare-and-swap operation safely.
+Bolt supports fully serializable ACID transactions.
+
+
+### LMDB
+
+Bolt was originally a port of LMDB so it is architecturally similar. Both use
+a B+tree, have ACID semantics with fully serializable transactions, and support
+lock-free MVCC using a single writer and multiple readers.
+
+The two projects have somewhat diverged. LMDB heavily focuses on raw performance
+while Bolt has focused on simplicity and ease of use. For example, LMDB allows
+several unsafe actions such as direct writes for the sake of performance. Bolt
+opts to disallow actions which can leave the database in a corrupted state. The
+only exception to this in Bolt is `DB.NoSync`.
+
+There are also a few differences in API. LMDB requires a maximum mmap size when
+opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
+automatically. LMDB overloads the getter and setter functions with multiple
+flags whereas Bolt splits these specialized cases into their own functions.
+
+
+## Caveats & Limitations
+
+It's important to pick the right tool for the job and Bolt is no exception.
+Here are a few things to note when evaluating and using Bolt:
+
+* Bolt is good for read intensive workloads. Sequential write performance is
+ also fast but random writes can be slow. You can use `DB.Batch()` or add a
+ write-ahead log to help mitigate this issue.
+
+* Bolt uses a B+tree internally so there can be a lot of random page access.
+ SSDs provide a significant performance boost over spinning disks.
+
+* Try to avoid long running read transactions. Bolt uses copy-on-write so
+ old pages cannot be reclaimed while an old transaction is using them.
+
+* Byte slices returned from Bolt are only valid during a transaction. Once the
+ transaction has been committed or rolled back then the memory they point to
+ can be reused by a new page or can be unmapped from virtual memory and you'll
+ see an `unexpected fault address` panic when accessing it.
+
+* Bolt uses an exclusive write lock on the database file so it cannot be
+ shared by multiple processes.
+
+* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
+ buckets that have random inserts will cause your database to have very poor
+ page utilization.
+
+* Use larger buckets in general. Smaller buckets causes poor page utilization
+ once they become larger than the page size (typically 4KB).
+
+* Bulk loading a lot of random writes into a new bucket can be slow as the
+ page will not split until the transaction is committed. Randomly inserting
+ more than 100,000 key/value pairs into a single new bucket in a single
+ transaction is not advised.
+
+* Bolt uses a memory-mapped file so the underlying operating system handles the
+ caching of the data. Typically, the OS will cache as much of the file as it
+ can in memory and will release memory as needed to other processes. This means
+ that Bolt can show very high memory usage when working with large databases.
+ However, this is expected and the OS will release memory as needed. Bolt can
+ handle databases much larger than the available physical RAM, provided its
+ memory-map fits in the process virtual address space. It may be problematic
+ on 32-bits systems.
+
+* The data structures in the Bolt database are memory mapped so the data file
+ will be endian specific. This means that you cannot copy a Bolt file from a
+ little endian machine to a big endian machine and have it work. For most
+ users this is not a concern since most modern CPUs are little endian.
+
+* Because of the way pages are laid out on disk, Bolt cannot truncate data files
+ and return free pages back to the disk. Instead, Bolt maintains a free list
+ of unused pages within its data file. These free pages can be reused by later
+ transactions. This works well for many use cases as databases generally tend
+ to grow. However, it's important to note that deleting large chunks of data
+ will not allow you to reclaim that space on disk.
+
+ For more information on page allocation, [see this comment][page-allocation].
+
+[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
+
+
+## Reading the Source
+
+Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
+transactional key/value database so it can be a good starting point for people
+interested in how databases work.
+
+The best places to start are the main entry points into Bolt:
+
+- `Open()` - Initializes the reference to the database. It's responsible for
+ creating the database if it doesn't exist, obtaining an exclusive lock on the
+ file, reading the meta pages, & memory-mapping the file.
+
+- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
+ value of the `writable` argument. This requires briefly obtaining the "meta"
+ lock to keep track of open transactions. Only one read-write transaction can
+ exist at a time so the "rwlock" is acquired during the life of a read-write
+ transaction.
+
+- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
+ arguments, a cursor is used to traverse the B+tree to the page and position
+ where they key & value will be written. Once the position is found, the bucket
+ materializes the underlying page and the page's parent pages into memory as
+ "nodes". These nodes are where mutations occur during read-write transactions.
+ These changes get flushed to disk during commit.
+
+- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
+ to move to the page & position of a key/value pair. During a read-only
+ transaction, the key and value data is returned as a direct reference to the
+ underlying mmap file so there's no allocation overhead. For read-write
+ transactions, this data may reference the mmap file or one of the in-memory
+ node values.
+
+- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
+ or in-memory nodes. It can seek to a specific key, move to the first or last
+ value, or it can move forward or backward. The cursor handles the movement up
+ and down the B+tree transparently to the end user.
+
+- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
+ into pages to be written to disk. Writing to disk then occurs in two phases.
+ First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
+ new meta page with an incremented transaction ID is written and another
+ `fsync()` occurs. This two phase write ensures that partially written data
+ pages are ignored in the event of a crash since the meta page pointing to them
+ is never written. Partially written meta pages are invalidated because they
+ are written with a checksum.
+
+If you have additional notes that could be helpful for others, please submit
+them via pull request.
+
+
+## Other Projects Using Bolt
+
+Below is a list of public, open source projects that use Bolt:
+
+* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
+* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
+* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
+* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
+* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
+* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
+* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
+* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
+* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
+* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
+* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
+* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
+* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
+* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
+* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
+* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
+* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
+* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
+* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
+* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
+* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
+* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
+* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
+* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
+* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
+* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
+* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
+ backed by boltdb.
+* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
+ simple tx and key scans.
+* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
+* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
+* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
+* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
+* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
+* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
+* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
+* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
+* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
+* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
+* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
+* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
+* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
+* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
+* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
+* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
+* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
+* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
+* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework.
+
+If you are using Bolt in a project please send a pull request to add it to the list.
diff --git a/vendor/github.com/boltdb/bolt/appveyor.yml b/vendor/github.com/boltdb/bolt/appveyor.yml
new file mode 100755
index 0000000..6e26e94
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/appveyor.yml
@@ -0,0 +1,18 @@
+version: "{build}"
+
+os: Windows Server 2012 R2
+
+clone_folder: c:\gopath\src\github.com\boltdb\bolt
+
+environment:
+ GOPATH: c:\gopath
+
+install:
+ - echo %PATH%
+ - echo %GOPATH%
+ - go version
+ - go env
+ - go get -v -t ./...
+
+build_script:
+ - go test -v ./...
diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go
new file mode 100755
index 0000000..820d533
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bolt_386.go
@@ -0,0 +1,10 @@
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x7FFFFFFF // 2GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go
new file mode 100755
index 0000000..98fafdb
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bolt_amd64.go
@@ -0,0 +1,10 @@
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/boltdb/bolt/bolt_arm.go
new file mode 100755
index 0000000..7e5cb4b
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bolt_arm.go
@@ -0,0 +1,28 @@
+package bolt
+
+import "unsafe"
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x7FFFFFFF // 2GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned bool
+
+func init() {
+ // Simple check to see whether this arch handles unaligned load/stores
+ // correctly.
+
+ // ARM9 and older devices require load/stores to be from/to aligned
+ // addresses. If not, the lower 2 bits are cleared and that address is
+ // read in a jumbled up order.
+
+ // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
+
+ raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
+ val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
+
+ brokenUnaligned = val != 0x11222211
+}
diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go
new file mode 100755
index 0000000..b26d84f
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bolt_arm64.go
@@ -0,0 +1,12 @@
+// +build arm64
+
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/boltdb/bolt/bolt_linux.go
new file mode 100755
index 0000000..2b67666
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bolt_linux.go
@@ -0,0 +1,10 @@
+package bolt
+
+import (
+ "syscall"
+)
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+ return syscall.Fdatasync(int(db.file.Fd()))
+}
diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/boltdb/bolt/bolt_openbsd.go
new file mode 100755
index 0000000..7058c3d
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bolt_openbsd.go
@@ -0,0 +1,27 @@
+package bolt
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ msAsync = 1 << iota // perform asynchronous writes
+ msSync // perform synchronous writes
+ msInvalidate // invalidate cached data
+)
+
+func msync(db *DB) error {
+ _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
+
+func fdatasync(db *DB) error {
+ if db.data != nil {
+ return msync(db)
+ }
+ return db.file.Sync()
+}
diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/boltdb/bolt/bolt_ppc.go
new file mode 100755
index 0000000..645ddc3
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bolt_ppc.go
@@ -0,0 +1,9 @@
+// +build ppc
+
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x7FFFFFFF // 2GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go
new file mode 100755
index 0000000..9331d97
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bolt_ppc64.go
@@ -0,0 +1,12 @@
+// +build ppc64
+
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go
new file mode 100755
index 0000000..8c143bc
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go
@@ -0,0 +1,12 @@
+// +build ppc64le
+
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go
new file mode 100755
index 0000000..d7c39af
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bolt_s390x.go
@@ -0,0 +1,12 @@
+// +build s390x
+
+package bolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/boltdb/bolt/bolt_unix.go
new file mode 100755
index 0000000..cad62dd
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bolt_unix.go
@@ -0,0 +1,89 @@
+// +build !windows,!plan9,!solaris
+
+package bolt
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ for {
+ // If we're beyond our timeout then return an error.
+ // This can only occur after we've attempted a flock once.
+ if t.IsZero() {
+ t = time.Now()
+ } else if timeout > 0 && time.Since(t) > timeout {
+ return ErrTimeout
+ }
+ flag := syscall.LOCK_SH
+ if exclusive {
+ flag = syscall.LOCK_EX
+ }
+
+ // Otherwise attempt to obtain an exclusive lock.
+ err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
+ if err == nil {
+ return nil
+ } else if err != syscall.EWOULDBLOCK {
+ return err
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(50 * time.Millisecond)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+ // Map the data file to memory.
+ b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+ if err != nil {
+ return err
+ }
+
+ // Advise the kernel that the mmap is accessed randomly.
+ if err := madvise(b, syscall.MADV_RANDOM); err != nil {
+ return fmt.Errorf("madvise: %s", err)
+ }
+
+ // Save the original byte slice and convert to a byte array pointer.
+ db.dataref = b
+ db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
+ db.datasz = sz
+ return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+ // Ignore the unmap if we have no mapped data.
+ if db.dataref == nil {
+ return nil
+ }
+
+ // Unmap using the original byte slice.
+ err := syscall.Munmap(db.dataref)
+ db.dataref = nil
+ db.data = nil
+ db.datasz = 0
+ return err
+}
+
+// NOTE: This function is copied from stdlib because it is not available on darwin.
+func madvise(b []byte, advice int) (err error) {
+ _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go
new file mode 100755
index 0000000..307bf2b
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go
@@ -0,0 +1,90 @@
+package bolt
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ for {
+ // If we're beyond our timeout then return an error.
+ // This can only occur after we've attempted a flock once.
+ if t.IsZero() {
+ t = time.Now()
+ } else if timeout > 0 && time.Since(t) > timeout {
+ return ErrTimeout
+ }
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Pid = 0
+ lock.Whence = 0
+ lock.Pid = 0
+ if exclusive {
+ lock.Type = syscall.F_WRLCK
+ } else {
+ lock.Type = syscall.F_RDLCK
+ }
+ err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
+ if err == nil {
+ return nil
+ } else if err != syscall.EAGAIN {
+ return err
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(50 * time.Millisecond)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Type = syscall.F_UNLCK
+ lock.Whence = 0
+ return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+ // Map the data file to memory.
+ b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+ if err != nil {
+ return err
+ }
+
+ // Advise the kernel that the mmap is accessed randomly.
+ if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
+ return fmt.Errorf("madvise: %s", err)
+ }
+
+ // Save the original byte slice and convert to a byte array pointer.
+ db.dataref = b
+ db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
+ db.datasz = sz
+ return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+ // Ignore the unmap if we have no mapped data.
+ if db.dataref == nil {
+ return nil
+ }
+
+ // Unmap using the original byte slice.
+ err := unix.Munmap(db.dataref)
+ db.dataref = nil
+ db.data = nil
+ db.datasz = 0
+ return err
+}
diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/boltdb/bolt/bolt_windows.go
new file mode 100755
index 0000000..b00fb07
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bolt_windows.go
@@ -0,0 +1,144 @@
+package bolt
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procLockFileEx = modkernel32.NewProc("LockFileEx")
+ procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
+)
+
+const (
+ lockExt = ".lock"
+
+ // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
+ flagLockExclusive = 2
+ flagLockFailImmediately = 1
+
+ // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
+ errLockViolation syscall.Errno = 0x21
+)
+
+func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+ r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
+ if r == 0 {
+ return err
+ }
+ return nil
+}
+
+func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+ r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
+ if r == 0 {
+ return err
+ }
+ return nil
+}
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+ return db.file.Sync()
+}
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
+ // Create a separate lock file on windows because a process
+ // cannot share an exclusive lock on the same file. This is
+ // needed during Tx.WriteTo().
+ f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
+ if err != nil {
+ return err
+ }
+ db.lockfile = f
+
+ var t time.Time
+ for {
+ // If we're beyond our timeout then return an error.
+ // This can only occur after we've attempted a flock once.
+ if t.IsZero() {
+ t = time.Now()
+ } else if timeout > 0 && time.Since(t) > timeout {
+ return ErrTimeout
+ }
+
+ var flag uint32 = flagLockFailImmediately
+ if exclusive {
+ flag |= flagLockExclusive
+ }
+
+ err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
+ if err == nil {
+ return nil
+ } else if err != errLockViolation {
+ return err
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(50 * time.Millisecond)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
+ db.lockfile.Close()
+ os.Remove(db.path + lockExt)
+ return err
+}
+
+// mmap memory maps a DB's data file.
+// Based on: https://github.com/edsrzf/mmap-go
+func mmap(db *DB, sz int) error {
+ if !db.readOnly {
+ // Truncate the database to the size of the mmap.
+ if err := db.file.Truncate(int64(sz)); err != nil {
+ return fmt.Errorf("truncate: %s", err)
+ }
+ }
+
+ // Open a file mapping handle.
+ sizelo := uint32(sz >> 32)
+ sizehi := uint32(sz) & 0xffffffff
+ h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
+ if h == 0 {
+ return os.NewSyscallError("CreateFileMapping", errno)
+ }
+
+ // Create the memory map.
+ addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
+ if addr == 0 {
+ return os.NewSyscallError("MapViewOfFile", errno)
+ }
+
+ // Close mapping handle.
+ if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
+ return os.NewSyscallError("CloseHandle", err)
+ }
+
+ // Convert to a byte array.
+ db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
+ db.datasz = sz
+
+ return nil
+}
+
+// munmap unmaps a pointer from a file.
+// Based on: https://github.com/edsrzf/mmap-go
+func munmap(db *DB) error {
+ if db.data == nil {
+ return nil
+ }
+
+ addr := (uintptr)(unsafe.Pointer(&db.data[0]))
+ if err := syscall.UnmapViewOfFile(addr); err != nil {
+ return os.NewSyscallError("UnmapViewOfFile", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/boltdb/bolt/boltsync_unix.go
new file mode 100755
index 0000000..f504425
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/boltsync_unix.go
@@ -0,0 +1,8 @@
+// +build !windows,!plan9,!linux,!openbsd
+
+package bolt
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+ return db.file.Sync()
+}
diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/boltdb/bolt/bucket.go
new file mode 100755
index 0000000..0c5bf27
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/bucket.go
@@ -0,0 +1,777 @@
+package bolt
+
+import (
+ "bytes"
+ "fmt"
+ "unsafe"
+)
+
+const (
+ // MaxKeySize is the maximum length of a key, in bytes.
+ MaxKeySize = 32768
+
+ // MaxValueSize is the maximum length of a value, in bytes.
+ MaxValueSize = (1 << 31) - 2
+)
+
+const (
+ maxUint = ^uint(0)
+ minUint = 0
+ maxInt = int(^uint(0) >> 1)
+ minInt = -maxInt - 1
+)
+
+const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
+
+const (
+ minFillPercent = 0.1
+ maxFillPercent = 1.0
+)
+
+// DefaultFillPercent is the percentage that split pages are filled.
+// This value can be changed by setting Bucket.FillPercent.
+const DefaultFillPercent = 0.5
+
+// Bucket represents a collection of key/value pairs inside the database.
+type Bucket struct {
+ *bucket
+ tx *Tx // the associated transaction
+ buckets map[string]*Bucket // subbucket cache
+ page *page // inline page reference
+ rootNode *node // materialized node for the root page.
+ nodes map[pgid]*node // node cache
+
+ // Sets the threshold for filling nodes when they split. By default,
+ // the bucket will fill to 50% but it can be useful to increase this
+ // amount if you know that your write workloads are mostly append-only.
+ //
+ // This is non-persisted across transactions so it must be set in every Tx.
+ FillPercent float64
+}
+
+// bucket represents the on-file representation of a bucket.
+// This is stored as the "value" of a bucket key. If the bucket is small enough,
+// then its root page can be stored inline in the "value", after the bucket
+// header. In the case of inline buckets, the "root" will be 0.
+type bucket struct {
+ root pgid // page id of the bucket's root-level page
+ sequence uint64 // monotonically incrementing, used by NextSequence()
+}
+
+// newBucket returns a new bucket associated with a transaction.
+func newBucket(tx *Tx) Bucket {
+ var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
+ if tx.writable {
+ b.buckets = make(map[string]*Bucket)
+ b.nodes = make(map[pgid]*node)
+ }
+ return b
+}
+
+// Tx returns the tx of the bucket.
+func (b *Bucket) Tx() *Tx {
+ return b.tx
+}
+
+// Root returns the root of the bucket.
+func (b *Bucket) Root() pgid {
+ return b.root
+}
+
+// Writable returns whether the bucket is writable.
+func (b *Bucket) Writable() bool {
+ return b.tx.writable
+}
+
+// Cursor creates a cursor associated with the bucket.
+// The cursor is only valid as long as the transaction is open.
+// Do not use a cursor after the transaction is closed.
+func (b *Bucket) Cursor() *Cursor {
+ // Update transaction statistics.
+ b.tx.stats.CursorCount++
+
+ // Allocate and return a cursor.
+ return &Cursor{
+ bucket: b,
+ stack: make([]elemRef, 0),
+ }
+}
+
+// Bucket retrieves a nested bucket by name.
+// Returns nil if the bucket does not exist.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) Bucket(name []byte) *Bucket {
+ if b.buckets != nil {
+ if child := b.buckets[string(name)]; child != nil {
+ return child
+ }
+ }
+
+ // Move cursor to key.
+ c := b.Cursor()
+ k, v, flags := c.seek(name)
+
+ // Return nil if the key doesn't exist or it is not a bucket.
+ if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
+ return nil
+ }
+
+ // Otherwise create a bucket and cache it.
+ var child = b.openBucket(v)
+ if b.buckets != nil {
+ b.buckets[string(name)] = child
+ }
+
+ return child
+}
+
+// Helper method that re-interprets a sub-bucket value
+// from a parent into a Bucket
+func (b *Bucket) openBucket(value []byte) *Bucket {
+ var child = newBucket(b.tx)
+
+ // If unaligned load/stores are broken on this arch and value is
+ // unaligned simply clone to an aligned byte array.
+ unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
+
+ if unaligned {
+ value = cloneBytes(value)
+ }
+
+ // If this is a writable transaction then we need to copy the bucket entry.
+ // Read-only transactions can point directly at the mmap entry.
+ if b.tx.writable && !unaligned {
+ child.bucket = &bucket{}
+ *child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
+ } else {
+ child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
+ }
+
+ // Save a reference to the inline page if the bucket is inline.
+ if child.root == 0 {
+ child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
+ }
+
+ return &child
+}
+
+// CreateBucket creates a new bucket at the given key and returns the new bucket.
+// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
+ if b.tx.db == nil {
+ return nil, ErrTxClosed
+ } else if !b.tx.writable {
+ return nil, ErrTxNotWritable
+ } else if len(key) == 0 {
+ return nil, ErrBucketNameRequired
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return an error if there is an existing key.
+ if bytes.Equal(key, k) {
+ if (flags & bucketLeafFlag) != 0 {
+ return nil, ErrBucketExists
+ }
+ return nil, ErrIncompatibleValue
+ }
+
+ // Create empty, inline bucket.
+ var bucket = Bucket{
+ bucket: &bucket{},
+ rootNode: &node{isLeaf: true},
+ FillPercent: DefaultFillPercent,
+ }
+ var value = bucket.write()
+
+ // Insert into node.
+ key = cloneBytes(key)
+ c.node().put(key, key, value, 0, bucketLeafFlag)
+
+ // Since subbuckets are not allowed on inline buckets, we need to
+ // dereference the inline page, if it exists. This will cause the bucket
+ // to be treated as a regular, non-inline bucket for the rest of the tx.
+ b.page = nil
+
+ return b.Bucket(key), nil
+}
+
+// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
+// Returns an error if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
+ child, err := b.CreateBucket(key)
+ if err == ErrBucketExists {
+ return b.Bucket(key), nil
+ } else if err != nil {
+ return nil, err
+ }
+ return child, nil
+}
+
+// DeleteBucket deletes a bucket at the given key.
+// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
+func (b *Bucket) DeleteBucket(key []byte) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.Writable() {
+ return ErrTxNotWritable
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return an error if bucket doesn't exist or is not a bucket.
+ if !bytes.Equal(key, k) {
+ return ErrBucketNotFound
+ } else if (flags & bucketLeafFlag) == 0 {
+ return ErrIncompatibleValue
+ }
+
+ // Recursively delete all child buckets.
+ child := b.Bucket(key)
+ err := child.ForEach(func(k, v []byte) error {
+ if v == nil {
+ if err := child.DeleteBucket(k); err != nil {
+ return fmt.Errorf("delete bucket: %s", err)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ // Remove cached copy.
+ delete(b.buckets, string(key))
+
+ // Release all bucket pages to freelist.
+ child.nodes = nil
+ child.rootNode = nil
+ child.free()
+
+ // Delete the node if we have a matching key.
+ c.node().del(key)
+
+ return nil
+}
+
+// Get retrieves the value for a key in the bucket.
+// Returns a nil value if the key does not exist or if the key is a nested bucket.
+// The returned value is only valid for the life of the transaction.
+func (b *Bucket) Get(key []byte) []byte {
+ k, v, flags := b.Cursor().seek(key)
+
+ // Return nil if this is a bucket.
+ if (flags & bucketLeafFlag) != 0 {
+ return nil
+ }
+
+ // If our target node isn't the same key as what's passed in then return nil.
+ if !bytes.Equal(key, k) {
+ return nil
+ }
+ return v
+}
+
+// Put sets the value for a key in the bucket.
+// If the key exist then its previous value will be overwritten.
+// Supplied value must remain valid for the life of the transaction.
+// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
+func (b *Bucket) Put(key []byte, value []byte) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.Writable() {
+ return ErrTxNotWritable
+ } else if len(key) == 0 {
+ return ErrKeyRequired
+ } else if len(key) > MaxKeySize {
+ return ErrKeyTooLarge
+ } else if int64(len(value)) > MaxValueSize {
+ return ErrValueTooLarge
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return an error if there is an existing key with a bucket value.
+ if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
+ return ErrIncompatibleValue
+ }
+
+ // Insert into node.
+ key = cloneBytes(key)
+ c.node().put(key, key, value, 0, 0)
+
+ return nil
+}
+
+// Delete removes a key from the bucket.
+// If the key does not exist then nothing is done and a nil error is returned.
+// Returns an error if the bucket was created from a read-only transaction.
+func (b *Bucket) Delete(key []byte) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.Writable() {
+ return ErrTxNotWritable
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ _, _, flags := c.seek(key)
+
+ // Return an error if there is already existing bucket value.
+ if (flags & bucketLeafFlag) != 0 {
+ return ErrIncompatibleValue
+ }
+
+ // Delete the node if we have a matching key.
+ c.node().del(key)
+
+ return nil
+}
+
+// Sequence returns the current integer for the bucket without incrementing it.
+func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
+
+// SetSequence updates the sequence number for the bucket.
+func (b *Bucket) SetSequence(v uint64) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.Writable() {
+ return ErrTxNotWritable
+ }
+
+ // Materialize the root node if it hasn't been already so that the
+ // bucket will be saved during commit.
+ if b.rootNode == nil {
+ _ = b.node(b.root, nil)
+ }
+
+ // Increment and return the sequence.
+ b.bucket.sequence = v
+ return nil
+}
+
+// NextSequence returns an autoincrementing integer for the bucket.
+func (b *Bucket) NextSequence() (uint64, error) {
+ if b.tx.db == nil {
+ return 0, ErrTxClosed
+ } else if !b.Writable() {
+ return 0, ErrTxNotWritable
+ }
+
+ // Materialize the root node if it hasn't been already so that the
+ // bucket will be saved during commit.
+ if b.rootNode == nil {
+ _ = b.node(b.root, nil)
+ }
+
+ // Increment and return the sequence.
+ b.bucket.sequence++
+ return b.bucket.sequence, nil
+}
+
+// ForEach executes a function for each key/value pair in a bucket.
+// If the provided function returns an error then the iteration is stopped and
+// the error is returned to the caller. The provided function must not modify
+// the bucket; this will result in undefined behavior.
+func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ }
+ c := b.Cursor()
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ if err := fn(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Stat returns stats on a bucket.
+func (b *Bucket) Stats() BucketStats {
+ var s, subStats BucketStats
+ pageSize := b.tx.db.pageSize
+ s.BucketN += 1
+ if b.root == 0 {
+ s.InlineBucketN += 1
+ }
+ b.forEachPage(func(p *page, depth int) {
+ if (p.flags & leafPageFlag) != 0 {
+ s.KeyN += int(p.count)
+
+ // used totals the used bytes for the page
+ used := pageHeaderSize
+
+ if p.count != 0 {
+ // If page has any elements, add all element headers.
+ used += leafPageElementSize * int(p.count-1)
+
+ // Add all element key, value sizes.
+ // The computation takes advantage of the fact that the position
+ // of the last element's key/value equals to the total of the sizes
+ // of all previous elements' keys and values.
+ // It also includes the last element's header.
+ lastElement := p.leafPageElement(p.count - 1)
+ used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
+ }
+
+ if b.root == 0 {
+ // For inlined bucket just update the inline stats
+ s.InlineBucketInuse += used
+ } else {
+ // For non-inlined bucket update all the leaf stats
+ s.LeafPageN++
+ s.LeafInuse += used
+ s.LeafOverflowN += int(p.overflow)
+
+ // Collect stats from sub-buckets.
+ // Do that by iterating over all element headers
+ // looking for the ones with the bucketLeafFlag.
+ for i := uint16(0); i < p.count; i++ {
+ e := p.leafPageElement(i)
+ if (e.flags & bucketLeafFlag) != 0 {
+ // For any bucket element, open the element value
+ // and recursively call Stats on the contained bucket.
+ subStats.Add(b.openBucket(e.value()).Stats())
+ }
+ }
+ }
+ } else if (p.flags & branchPageFlag) != 0 {
+ s.BranchPageN++
+ lastElement := p.branchPageElement(p.count - 1)
+
+ // used totals the used bytes for the page
+ // Add header and all element headers.
+ used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
+
+ // Add size of all keys and values.
+ // Again, use the fact that last element's position equals to
+ // the total of key, value sizes of all previous elements.
+ used += int(lastElement.pos + lastElement.ksize)
+ s.BranchInuse += used
+ s.BranchOverflowN += int(p.overflow)
+ }
+
+ // Keep track of maximum page depth.
+ if depth+1 > s.Depth {
+ s.Depth = (depth + 1)
+ }
+ })
+
+ // Alloc stats can be computed from page counts and pageSize.
+ s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
+ s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
+
+ // Add the max depth of sub-buckets to get total nested depth.
+ s.Depth += subStats.Depth
+ // Add the stats for all sub-buckets
+ s.Add(subStats)
+ return s
+}
+
+// forEachPage iterates over every page in a bucket, including inline pages.
+func (b *Bucket) forEachPage(fn func(*page, int)) {
+ // If we have an inline page then just use that.
+ if b.page != nil {
+ fn(b.page, 0)
+ return
+ }
+
+ // Otherwise traverse the page hierarchy.
+ b.tx.forEachPage(b.root, 0, fn)
+}
+
+// forEachPageNode iterates over every page (or node) in a bucket.
+// This also includes inline pages.
+func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
+ // If we have an inline page or root node then just use that.
+ if b.page != nil {
+ fn(b.page, nil, 0)
+ return
+ }
+ b._forEachPageNode(b.root, 0, fn)
+}
+
+func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
+ var p, n = b.pageNode(pgid)
+
+ // Execute function.
+ fn(p, n, depth)
+
+ // Recursively loop over children.
+ if p != nil {
+ if (p.flags & branchPageFlag) != 0 {
+ for i := 0; i < int(p.count); i++ {
+ elem := p.branchPageElement(uint16(i))
+ b._forEachPageNode(elem.pgid, depth+1, fn)
+ }
+ }
+ } else {
+ if !n.isLeaf {
+ for _, inode := range n.inodes {
+ b._forEachPageNode(inode.pgid, depth+1, fn)
+ }
+ }
+ }
+}
+
+// spill writes all the nodes for this bucket to dirty pages.
+func (b *Bucket) spill() error {
+ // Spill all child buckets first.
+ for name, child := range b.buckets {
+ // If the child bucket is small enough and it has no child buckets then
+ // write it inline into the parent bucket's page. Otherwise spill it
+ // like a normal bucket and make the parent value a pointer to the page.
+ var value []byte
+ if child.inlineable() {
+ child.free()
+ value = child.write()
+ } else {
+ if err := child.spill(); err != nil {
+ return err
+ }
+
+ // Update the child bucket header in this bucket.
+ value = make([]byte, unsafe.Sizeof(bucket{}))
+ var bucket = (*bucket)(unsafe.Pointer(&value[0]))
+ *bucket = *child.bucket
+ }
+
+ // Skip writing the bucket if there are no materialized nodes.
+ if child.rootNode == nil {
+ continue
+ }
+
+ // Update parent node.
+ var c = b.Cursor()
+ k, _, flags := c.seek([]byte(name))
+ if !bytes.Equal([]byte(name), k) {
+ panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
+ }
+ if flags&bucketLeafFlag == 0 {
+ panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
+ }
+ c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
+ }
+
+ // Ignore if there's not a materialized root node.
+ if b.rootNode == nil {
+ return nil
+ }
+
+ // Spill nodes.
+ if err := b.rootNode.spill(); err != nil {
+ return err
+ }
+ b.rootNode = b.rootNode.root()
+
+ // Update the root node for this bucket.
+ if b.rootNode.pgid >= b.tx.meta.pgid {
+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
+ }
+ b.root = b.rootNode.pgid
+
+ return nil
+}
+
+// inlineable returns true if a bucket is small enough to be written inline
+// and if it contains no subbuckets. Otherwise returns false.
+func (b *Bucket) inlineable() bool {
+ var n = b.rootNode
+
+ // Bucket must only contain a single leaf node.
+ if n == nil || !n.isLeaf {
+ return false
+ }
+
+ // Bucket is not inlineable if it contains subbuckets or if it goes beyond
+ // our threshold for inline bucket size.
+ var size = pageHeaderSize
+ for _, inode := range n.inodes {
+ size += leafPageElementSize + len(inode.key) + len(inode.value)
+
+ if inode.flags&bucketLeafFlag != 0 {
+ return false
+ } else if size > b.maxInlineBucketSize() {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Returns the maximum total size of a bucket to make it a candidate for inlining.
+func (b *Bucket) maxInlineBucketSize() int {
+ return b.tx.db.pageSize / 4
+}
+
+// write allocates and writes a bucket to a byte slice.
+func (b *Bucket) write() []byte {
+ // Allocate the appropriate size.
+ var n = b.rootNode
+ var value = make([]byte, bucketHeaderSize+n.size())
+
+ // Write a bucket header.
+ var bucket = (*bucket)(unsafe.Pointer(&value[0]))
+ *bucket = *b.bucket
+
+ // Convert byte slice to a fake page and write the root node.
+ var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
+ n.write(p)
+
+ return value
+}
+
+// rebalance attempts to balance all nodes.
+func (b *Bucket) rebalance() {
+ for _, n := range b.nodes {
+ n.rebalance()
+ }
+ for _, child := range b.buckets {
+ child.rebalance()
+ }
+}
+
+// node creates a node from a page and associates it with a given parent.
+func (b *Bucket) node(pgid pgid, parent *node) *node {
+ _assert(b.nodes != nil, "nodes map expected")
+
+ // Retrieve node if it's already been created.
+ if n := b.nodes[pgid]; n != nil {
+ return n
+ }
+
+ // Otherwise create a node and cache it.
+ n := &node{bucket: b, parent: parent}
+ if parent == nil {
+ b.rootNode = n
+ } else {
+ parent.children = append(parent.children, n)
+ }
+
+ // Use the inline page if this is an inline bucket.
+ var p = b.page
+ if p == nil {
+ p = b.tx.page(pgid)
+ }
+
+ // Read the page into the node and cache it.
+ n.read(p)
+ b.nodes[pgid] = n
+
+ // Update statistics.
+ b.tx.stats.NodeCount++
+
+ return n
+}
+
+// free recursively frees all pages in the bucket.
+func (b *Bucket) free() {
+ if b.root == 0 {
+ return
+ }
+
+ var tx = b.tx
+ b.forEachPageNode(func(p *page, n *node, _ int) {
+ if p != nil {
+ tx.db.freelist.free(tx.meta.txid, p)
+ } else {
+ n.free()
+ }
+ })
+ b.root = 0
+}
+
+// dereference removes all references to the old mmap.
+func (b *Bucket) dereference() {
+ if b.rootNode != nil {
+ b.rootNode.root().dereference()
+ }
+
+ for _, child := range b.buckets {
+ child.dereference()
+ }
+}
+
+// pageNode returns the in-memory node, if it exists.
+// Otherwise returns the underlying page.
+func (b *Bucket) pageNode(id pgid) (*page, *node) {
+ // Inline buckets have a fake page embedded in their value so treat them
+ // differently. We'll return the rootNode (if available) or the fake page.
+ if b.root == 0 {
+ if id != 0 {
+ panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
+ }
+ if b.rootNode != nil {
+ return nil, b.rootNode
+ }
+ return b.page, nil
+ }
+
+ // Check the node cache for non-inline buckets.
+ if b.nodes != nil {
+ if n := b.nodes[id]; n != nil {
+ return nil, n
+ }
+ }
+
+ // Finally lookup the page from the transaction if no node is materialized.
+ return b.tx.page(id), nil
+}
+
+// BucketStats records statistics about resources used by a bucket.
+type BucketStats struct {
+ // Page count statistics.
+ BranchPageN int // number of logical branch pages
+ BranchOverflowN int // number of physical branch overflow pages
+ LeafPageN int // number of logical leaf pages
+ LeafOverflowN int // number of physical leaf overflow pages
+
+ // Tree statistics.
+ KeyN int // number of keys/value pairs
+ Depth int // number of levels in B+tree
+
+ // Page size utilization.
+ BranchAlloc int // bytes allocated for physical branch pages
+ BranchInuse int // bytes actually used for branch data
+ LeafAlloc int // bytes allocated for physical leaf pages
+ LeafInuse int // bytes actually used for leaf data
+
+ // Bucket statistics
+ BucketN int // total number of buckets including the top bucket
+ InlineBucketN int // total number on inlined buckets
+ InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
+}
+
+func (s *BucketStats) Add(other BucketStats) {
+ s.BranchPageN += other.BranchPageN
+ s.BranchOverflowN += other.BranchOverflowN
+ s.LeafPageN += other.LeafPageN
+ s.LeafOverflowN += other.LeafOverflowN
+ s.KeyN += other.KeyN
+ if s.Depth < other.Depth {
+ s.Depth = other.Depth
+ }
+ s.BranchAlloc += other.BranchAlloc
+ s.BranchInuse += other.BranchInuse
+ s.LeafAlloc += other.LeafAlloc
+ s.LeafInuse += other.LeafInuse
+
+ s.BucketN += other.BucketN
+ s.InlineBucketN += other.InlineBucketN
+ s.InlineBucketInuse += other.InlineBucketInuse
+}
+
+// cloneBytes returns a copy of a given slice.
+func cloneBytes(v []byte) []byte {
+ var clone = make([]byte, len(v))
+ copy(clone, v)
+ return clone
+}
diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/boltdb/bolt/cursor.go
new file mode 100755
index 0000000..1be9f35
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/cursor.go
@@ -0,0 +1,400 @@
+package bolt
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+)
+
+// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
+// Cursors see nested buckets with value == nil.
+// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
+//
+// Keys and values returned from the cursor are only valid for the life of the transaction.
+//
+// Changing data while traversing with a cursor may cause it to be invalidated
+// and return unexpected keys and/or values. You must reposition your cursor
+// after mutating data.
+type Cursor struct {
+ bucket *Bucket
+ stack []elemRef
+}
+
+// Bucket returns the bucket that this cursor was created from.
+func (c *Cursor) Bucket() *Bucket {
+ return c.bucket
+}
+
+// First moves the cursor to the first item in the bucket and returns its key and value.
+// If the bucket is empty then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) First() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+ c.stack = c.stack[:0]
+ p, n := c.bucket.pageNode(c.bucket.root)
+ c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
+ c.first()
+
+ // If we land on an empty page then move to the next value.
+ // https://github.com/boltdb/bolt/issues/450
+ if c.stack[len(c.stack)-1].count() == 0 {
+ c.next()
+ }
+
+ k, v, flags := c.keyValue()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+
+}
+
+// Last moves the cursor to the last item in the bucket and returns its key and value.
+// If the bucket is empty then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Last() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+ c.stack = c.stack[:0]
+ p, n := c.bucket.pageNode(c.bucket.root)
+ ref := elemRef{page: p, node: n}
+ ref.index = ref.count() - 1
+ c.stack = append(c.stack, ref)
+ c.last()
+ k, v, flags := c.keyValue()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Next moves the cursor to the next item in the bucket and returns its key and value.
+// If the cursor is at the end of the bucket then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Next() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+ k, v, flags := c.next()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Prev moves the cursor to the previous item in the bucket and returns its key and value.
+// If the cursor is at the beginning of the bucket then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Prev() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+
+ // Attempt to move back one element until we're successful.
+ // Move up the stack as we hit the beginning of each page in our stack.
+ for i := len(c.stack) - 1; i >= 0; i-- {
+ elem := &c.stack[i]
+ if elem.index > 0 {
+ elem.index--
+ break
+ }
+ c.stack = c.stack[:i]
+ }
+
+ // If we've hit the end then return nil.
+ if len(c.stack) == 0 {
+ return nil, nil
+ }
+
+ // Move down the stack to find the last element of the last leaf under this branch.
+ c.last()
+ k, v, flags := c.keyValue()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Seek moves the cursor to a given key and returns it.
+// If the key does not exist then the next key is used. If no keys
+// follow, a nil key is returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
+ k, v, flags := c.seek(seek)
+
+ // If we ended up after the last element of a page then move to the next one.
+ if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
+ k, v, flags = c.next()
+ }
+
+ if k == nil {
+ return nil, nil
+ } else if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Delete removes the current key/value under the cursor from the bucket.
+// Delete fails if current key/value is a bucket or if the transaction is not writable.
+func (c *Cursor) Delete() error {
+ if c.bucket.tx.db == nil {
+ return ErrTxClosed
+ } else if !c.bucket.Writable() {
+ return ErrTxNotWritable
+ }
+
+ key, _, flags := c.keyValue()
+ // Return an error if current value is a bucket.
+ if (flags & bucketLeafFlag) != 0 {
+ return ErrIncompatibleValue
+ }
+ c.node().del(key)
+
+ return nil
+}
+
+// seek moves the cursor to a given key and returns it.
+// If the key does not exist then the next key is used.
+func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+
+ // Start from root page/node and traverse to correct page.
+ c.stack = c.stack[:0]
+ c.search(seek, c.bucket.root)
+ ref := &c.stack[len(c.stack)-1]
+
+ // If the cursor is pointing to the end of page/node then return nil.
+ if ref.index >= ref.count() {
+ return nil, nil, 0
+ }
+
+ // If this is a bucket then return a nil value.
+ return c.keyValue()
+}
+
+// first moves the cursor to the first leaf element under the last page in the stack.
+func (c *Cursor) first() {
+ for {
+ // Exit when we hit a leaf page.
+ var ref = &c.stack[len(c.stack)-1]
+ if ref.isLeaf() {
+ break
+ }
+
+ // Keep adding pages pointing to the first element to the stack.
+ var pgid pgid
+ if ref.node != nil {
+ pgid = ref.node.inodes[ref.index].pgid
+ } else {
+ pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
+ }
+ p, n := c.bucket.pageNode(pgid)
+ c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
+ }
+}
+
+// last moves the cursor to the last leaf element under the last page in the stack.
+func (c *Cursor) last() {
+ for {
+ // Exit when we hit a leaf page.
+ ref := &c.stack[len(c.stack)-1]
+ if ref.isLeaf() {
+ break
+ }
+
+ // Keep adding pages pointing to the last element in the stack.
+ var pgid pgid
+ if ref.node != nil {
+ pgid = ref.node.inodes[ref.index].pgid
+ } else {
+ pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
+ }
+ p, n := c.bucket.pageNode(pgid)
+
+ var nextRef = elemRef{page: p, node: n}
+ nextRef.index = nextRef.count() - 1
+ c.stack = append(c.stack, nextRef)
+ }
+}
+
+// next moves to the next leaf element and returns the key and value.
+// If the cursor is at the last leaf element then it stays there and returns nil.
+func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
+ for {
+ // Attempt to move over one element until we're successful.
+ // Move up the stack as we hit the end of each page in our stack.
+ var i int
+ for i = len(c.stack) - 1; i >= 0; i-- {
+ elem := &c.stack[i]
+ if elem.index < elem.count()-1 {
+ elem.index++
+ break
+ }
+ }
+
+ // If we've hit the root page then stop and return. This will leave the
+ // cursor on the last element of the last page.
+ if i == -1 {
+ return nil, nil, 0
+ }
+
+ // Otherwise start from where we left off in the stack and find the
+ // first element of the first leaf page.
+ c.stack = c.stack[:i+1]
+ c.first()
+
+ // If this is an empty page then restart and move back up the stack.
+ // https://github.com/boltdb/bolt/issues/450
+ if c.stack[len(c.stack)-1].count() == 0 {
+ continue
+ }
+
+ return c.keyValue()
+ }
+}
+
+// search recursively performs a binary search against a given page/node until it finds a given key.
+func (c *Cursor) search(key []byte, pgid pgid) {
+ p, n := c.bucket.pageNode(pgid)
+ if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
+ panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
+ }
+ e := elemRef{page: p, node: n}
+ c.stack = append(c.stack, e)
+
+ // If we're on a leaf page/node then find the specific node.
+ if e.isLeaf() {
+ c.nsearch(key)
+ return
+ }
+
+ if n != nil {
+ c.searchNode(key, n)
+ return
+ }
+ c.searchPage(key, p)
+}
+
+func (c *Cursor) searchNode(key []byte, n *node) {
+ var exact bool
+ index := sort.Search(len(n.inodes), func(i int) bool {
+ // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
+ // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
+ ret := bytes.Compare(n.inodes[i].key, key)
+ if ret == 0 {
+ exact = true
+ }
+ return ret != -1
+ })
+ if !exact && index > 0 {
+ index--
+ }
+ c.stack[len(c.stack)-1].index = index
+
+ // Recursively search to the next page.
+ c.search(key, n.inodes[index].pgid)
+}
+
+func (c *Cursor) searchPage(key []byte, p *page) {
+ // Binary search for the correct range.
+ inodes := p.branchPageElements()
+
+ var exact bool
+ index := sort.Search(int(p.count), func(i int) bool {
+ // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
+ // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
+ ret := bytes.Compare(inodes[i].key(), key)
+ if ret == 0 {
+ exact = true
+ }
+ return ret != -1
+ })
+ if !exact && index > 0 {
+ index--
+ }
+ c.stack[len(c.stack)-1].index = index
+
+ // Recursively search to the next page.
+ c.search(key, inodes[index].pgid)
+}
+
+// nsearch searches the leaf node on the top of the stack for a key.
+func (c *Cursor) nsearch(key []byte) {
+ e := &c.stack[len(c.stack)-1]
+ p, n := e.page, e.node
+
+ // If we have a node then search its inodes.
+ if n != nil {
+ index := sort.Search(len(n.inodes), func(i int) bool {
+ return bytes.Compare(n.inodes[i].key, key) != -1
+ })
+ e.index = index
+ return
+ }
+
+ // If we have a page then search its leaf elements.
+ inodes := p.leafPageElements()
+ index := sort.Search(int(p.count), func(i int) bool {
+ return bytes.Compare(inodes[i].key(), key) != -1
+ })
+ e.index = index
+}
+
+// keyValue returns the key and value of the current leaf element.
+func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
+ ref := &c.stack[len(c.stack)-1]
+ if ref.count() == 0 || ref.index >= ref.count() {
+ return nil, nil, 0
+ }
+
+ // Retrieve value from node.
+ if ref.node != nil {
+ inode := &ref.node.inodes[ref.index]
+ return inode.key, inode.value, inode.flags
+ }
+
+ // Or retrieve value from page.
+ elem := ref.page.leafPageElement(uint16(ref.index))
+ return elem.key(), elem.value(), elem.flags
+}
+
+// node returns the node that the cursor is currently positioned on.
+func (c *Cursor) node() *node {
+ _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
+
+ // If the top of the stack is a leaf node then just return it.
+ if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
+ return ref.node
+ }
+
+ // Start from root and traverse down the hierarchy.
+ var n = c.stack[0].node
+ if n == nil {
+ n = c.bucket.node(c.stack[0].page.id, nil)
+ }
+ for _, ref := range c.stack[:len(c.stack)-1] {
+ _assert(!n.isLeaf, "expected branch node")
+ n = n.childAt(int(ref.index))
+ }
+ _assert(n.isLeaf, "expected leaf node")
+ return n
+}
+
+// elemRef represents a reference to an element on a given page/node.
+type elemRef struct {
+ page *page
+ node *node
+ index int
+}
+
+// isLeaf returns whether the ref is pointing at a leaf page/node.
+func (r *elemRef) isLeaf() bool {
+ if r.node != nil {
+ return r.node.isLeaf
+ }
+ return (r.page.flags & leafPageFlag) != 0
+}
+
+// count returns the number of inodes or page elements.
+func (r *elemRef) count() int {
+ if r.node != nil {
+ return len(r.node.inodes)
+ }
+ return int(r.page.count)
+}
diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/boltdb/bolt/db.go
new file mode 100755
index 0000000..cc3596f
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/db.go
@@ -0,0 +1,1037 @@
+package bolt
+
+import (
+ "errors"
+ "fmt"
+ "hash/fnv"
+ "log"
+ "os"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "sync"
+ "time"
+ "unsafe"
+)
+
+// The largest step that can be taken when remapping the mmap.
+const maxMmapStep = 1 << 30 // 1GB
+
+// The data file format version.
+const version = 2
+
+// Represents a marker value to indicate that a file is a Bolt DB.
+const magic uint32 = 0xED0CDAED
+
+// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
+// syncing changes to a file. This is required as some operating systems,
+// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
+// must be synchronized using the msync(2) syscall.
+const IgnoreNoSync = runtime.GOOS == "openbsd"
+
+// Default values if not set in a DB instance.
+const (
+ DefaultMaxBatchSize int = 1000
+ DefaultMaxBatchDelay = 10 * time.Millisecond
+ DefaultAllocSize = 16 * 1024 * 1024
+)
+
+// default page size for db is set to the OS page size.
+var defaultPageSize = os.Getpagesize()
+
+// DB represents a collection of buckets persisted to a file on disk.
+// All data access is performed through transactions which can be obtained through the DB.
+// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
+type DB struct {
+ // When enabled, the database will perform a Check() after every commit.
+ // A panic is issued if the database is in an inconsistent state. This
+ // flag has a large performance impact so it should only be used for
+ // debugging purposes.
+ StrictMode bool
+
+ // Setting the NoSync flag will cause the database to skip fsync()
+ // calls after each commit. This can be useful when bulk loading data
+ // into a database and you can restart the bulk load in the event of
+ // a system failure or database corruption. Do not set this flag for
+ // normal use.
+ //
+ // If the package global IgnoreNoSync constant is true, this value is
+ // ignored. See the comment on that constant for more details.
+ //
+ // THIS IS UNSAFE. PLEASE USE WITH CAUTION.
+ NoSync bool
+
+ // When true, skips the truncate call when growing the database.
+ // Setting this to true is only safe on non-ext3/ext4 systems.
+ // Skipping truncation avoids preallocation of hard drive space and
+ // bypasses a truncate() and fsync() syscall on remapping.
+ //
+ // https://github.com/boltdb/bolt/issues/284
+ NoGrowSync bool
+
+ // If you want to read the entire database fast, you can set MmapFlag to
+ // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
+ MmapFlags int
+
+ // MaxBatchSize is the maximum size of a batch. Default value is
+ // copied from DefaultMaxBatchSize in Open.
+ //
+ // If <=0, disables batching.
+ //
+ // Do not change concurrently with calls to Batch.
+ MaxBatchSize int
+
+ // MaxBatchDelay is the maximum delay before a batch starts.
+ // Default value is copied from DefaultMaxBatchDelay in Open.
+ //
+ // If <=0, effectively disables batching.
+ //
+ // Do not change concurrently with calls to Batch.
+ MaxBatchDelay time.Duration
+
+ // AllocSize is the amount of space allocated when the database
+ // needs to create new pages. This is done to amortize the cost
+ // of truncate() and fsync() when growing the data file.
+ AllocSize int
+
+ path string
+ file *os.File
+ lockfile *os.File // windows only
+ dataref []byte // mmap'ed readonly, write throws SEGV
+ data *[maxMapSize]byte
+ datasz int
+ filesz int // current on disk file size
+ meta0 *meta
+ meta1 *meta
+ pageSize int
+ opened bool
+ rwtx *Tx
+ txs []*Tx
+ freelist *freelist
+ stats Stats
+
+ pagePool sync.Pool
+
+ batchMu sync.Mutex
+ batch *batch
+
+ rwlock sync.Mutex // Allows only one writer at a time.
+ metalock sync.Mutex // Protects meta page access.
+ mmaplock sync.RWMutex // Protects mmap access during remapping.
+ statlock sync.RWMutex // Protects stats access.
+
+ ops struct {
+ writeAt func(b []byte, off int64) (n int, err error)
+ }
+
+ // Read only mode.
+ // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately.
+ readOnly bool
+}
+
+// Path returns the path to currently open database file.
+func (db *DB) Path() string {
+ return db.path
+}
+
+// GoString returns the Go string representation of the database.
+func (db *DB) GoString() string {
+ return fmt.Sprintf("bolt.DB{path:%q}", db.path)
+}
+
+// String returns the string representation of the database.
+func (db *DB) String() string {
+ return fmt.Sprintf("DB<%q>", db.path)
+}
+
+// Open creates and opens a database at the given path.
+// If the file does not exist then it will be created automatically.
+// Passing in nil options will cause Bolt to open the database with the default options.
+func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
+ var db = &DB{opened: true}
+
+ // Set default options if no options are provided.
+ if options == nil {
+ options = DefaultOptions
+ }
+ db.NoGrowSync = options.NoGrowSync
+ db.MmapFlags = options.MmapFlags
+
+ // Set default values for later DB operations.
+ db.MaxBatchSize = DefaultMaxBatchSize
+ db.MaxBatchDelay = DefaultMaxBatchDelay
+ db.AllocSize = DefaultAllocSize
+
+ flag := os.O_RDWR
+ if options.ReadOnly {
+ flag = os.O_RDONLY
+ db.readOnly = true
+ }
+
+ // Open data file and separate sync handler for metadata writes.
+ db.path = path
+ var err error
+ if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil {
+ _ = db.close()
+ return nil, err
+ }
+
+ // Lock file so that other processes using Bolt in read-write mode cannot
+ // use the database at the same time. This would cause corruption since
+ // the two processes would write meta pages and free pages separately.
+ // The database file is locked exclusively (only one process can grab the lock)
+ // if !options.ReadOnly.
+ // The database file is locked using the shared lock (more than one process may
+ // hold a lock at the same time) otherwise (options.ReadOnly is set).
+ if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
+ _ = db.close()
+ return nil, err
+ }
+
+ // Default values for test hooks
+ db.ops.writeAt = db.file.WriteAt
+
+ // Initialize the database if it doesn't exist.
+ if info, err := db.file.Stat(); err != nil {
+ return nil, err
+ } else if info.Size() == 0 {
+ // Initialize new files with meta pages.
+ if err := db.init(); err != nil {
+ return nil, err
+ }
+ } else {
+ // Read the first meta page to determine the page size.
+ var buf [0x1000]byte
+ if _, err := db.file.ReadAt(buf[:], 0); err == nil {
+ m := db.pageInBuffer(buf[:], 0).meta()
+ if err := m.validate(); err != nil {
+ // If we can't read the page size, we can assume it's the same
+ // as the OS -- since that's how the page size was chosen in the
+ // first place.
+ //
+ // If the first page is invalid and this OS uses a different
+ // page size than what the database was created with then we
+ // are out of luck and cannot access the database.
+ db.pageSize = os.Getpagesize()
+ } else {
+ db.pageSize = int(m.pageSize)
+ }
+ }
+ }
+
+ // Initialize page pool.
+ db.pagePool = sync.Pool{
+ New: func() interface{} {
+ return make([]byte, db.pageSize)
+ },
+ }
+
+ // Memory map the data file.
+ if err := db.mmap(options.InitialMmapSize); err != nil {
+ _ = db.close()
+ return nil, err
+ }
+
+ // Read in the freelist.
+ db.freelist = newFreelist()
+ db.freelist.read(db.page(db.meta().freelist))
+
+ // Mark the database as opened and return.
+ return db, nil
+}
+
+// mmap opens the underlying memory-mapped file and initializes the meta references.
+// minsz is the minimum size that the new mmap can be.
+func (db *DB) mmap(minsz int) error {
+ db.mmaplock.Lock()
+ defer db.mmaplock.Unlock()
+
+ info, err := db.file.Stat()
+ if err != nil {
+ return fmt.Errorf("mmap stat error: %s", err)
+ } else if int(info.Size()) < db.pageSize*2 {
+ return fmt.Errorf("file size too small")
+ }
+
+ // Ensure the size is at least the minimum size.
+ var size = int(info.Size())
+ if size < minsz {
+ size = minsz
+ }
+ size, err = db.mmapSize(size)
+ if err != nil {
+ return err
+ }
+
+ // Dereference all mmap references before unmapping.
+ if db.rwtx != nil {
+ db.rwtx.root.dereference()
+ }
+
+ // Unmap existing data before continuing.
+ if err := db.munmap(); err != nil {
+ return err
+ }
+
+ // Memory-map the data file as a byte slice.
+ if err := mmap(db, size); err != nil {
+ return err
+ }
+
+ // Save references to the meta pages.
+ db.meta0 = db.page(0).meta()
+ db.meta1 = db.page(1).meta()
+
+ // Validate the meta pages. We only return an error if both meta pages fail
+ // validation, since meta0 failing validation means that it wasn't saved
+ // properly -- but we can recover using meta1. And vice-versa.
+ err0 := db.meta0.validate()
+ err1 := db.meta1.validate()
+ if err0 != nil && err1 != nil {
+ return err0
+ }
+
+ return nil
+}
+
+// munmap unmaps the data file from memory.
+func (db *DB) munmap() error {
+ if err := munmap(db); err != nil {
+ return fmt.Errorf("unmap error: " + err.Error())
+ }
+ return nil
+}
+
+// mmapSize determines the appropriate size for the mmap given the current size
+// of the database. The minimum size is 32KB and doubles until it reaches 1GB.
+// Returns an error if the new mmap size is greater than the max allowed.
+func (db *DB) mmapSize(size int) (int, error) {
+ // Double the size from 32KB until 1GB.
+ for i := uint(15); i <= 30; i++ {
+ if size <= 1< maxMapSize {
+ return 0, fmt.Errorf("mmap too large")
+ }
+
+ // If larger than 1GB then grow by 1GB at a time.
+ sz := int64(size)
+ if remainder := sz % int64(maxMmapStep); remainder > 0 {
+ sz += int64(maxMmapStep) - remainder
+ }
+
+ // Ensure that the mmap size is a multiple of the page size.
+ // This should always be true since we're incrementing in MBs.
+ pageSize := int64(db.pageSize)
+ if (sz % pageSize) != 0 {
+ sz = ((sz / pageSize) + 1) * pageSize
+ }
+
+ // If we've exceeded the max size then only grow up to the max size.
+ if sz > maxMapSize {
+ sz = maxMapSize
+ }
+
+ return int(sz), nil
+}
+
+// init creates a new database file and initializes its meta pages.
+func (db *DB) init() error {
+ // Set the page size to the OS page size.
+ db.pageSize = os.Getpagesize()
+
+ // Create two meta pages on a buffer.
+ buf := make([]byte, db.pageSize*4)
+ for i := 0; i < 2; i++ {
+ p := db.pageInBuffer(buf[:], pgid(i))
+ p.id = pgid(i)
+ p.flags = metaPageFlag
+
+ // Initialize the meta page.
+ m := p.meta()
+ m.magic = magic
+ m.version = version
+ m.pageSize = uint32(db.pageSize)
+ m.freelist = 2
+ m.root = bucket{root: 3}
+ m.pgid = 4
+ m.txid = txid(i)
+ m.checksum = m.sum64()
+ }
+
+ // Write an empty freelist at page 3.
+ p := db.pageInBuffer(buf[:], pgid(2))
+ p.id = pgid(2)
+ p.flags = freelistPageFlag
+ p.count = 0
+
+ // Write an empty leaf page at page 4.
+ p = db.pageInBuffer(buf[:], pgid(3))
+ p.id = pgid(3)
+ p.flags = leafPageFlag
+ p.count = 0
+
+ // Write the buffer to our data file.
+ if _, err := db.ops.writeAt(buf, 0); err != nil {
+ return err
+ }
+ if err := fdatasync(db); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Close releases all database resources.
+// All transactions must be closed before closing the database.
+func (db *DB) Close() error {
+ db.rwlock.Lock()
+ defer db.rwlock.Unlock()
+
+ db.metalock.Lock()
+ defer db.metalock.Unlock()
+
+ db.mmaplock.RLock()
+ defer db.mmaplock.RUnlock()
+
+ return db.close()
+}
+
+func (db *DB) close() error {
+ if !db.opened {
+ return nil
+ }
+
+ db.opened = false
+
+ db.freelist = nil
+
+ // Clear ops.
+ db.ops.writeAt = nil
+
+ // Close the mmap.
+ if err := db.munmap(); err != nil {
+ return err
+ }
+
+ // Close file handles.
+ if db.file != nil {
+ // No need to unlock read-only file.
+ if !db.readOnly {
+ // Unlock the file.
+ if err := funlock(db); err != nil {
+ log.Printf("bolt.Close(): funlock error: %s", err)
+ }
+ }
+
+ // Close the file descriptor.
+ if err := db.file.Close(); err != nil {
+ return fmt.Errorf("db file close: %s", err)
+ }
+ db.file = nil
+ }
+
+ db.path = ""
+ return nil
+}
+
+// Begin starts a new transaction.
+// Multiple read-only transactions can be used concurrently but only one
+// write transaction can be used at a time. Starting multiple write transactions
+// will cause the calls to block and be serialized until the current write
+// transaction finishes.
+//
+// Transactions should not be dependent on one another. Opening a read
+// transaction and a write transaction in the same goroutine can cause the
+// writer to deadlock because the database periodically needs to re-mmap itself
+// as it grows and it cannot do that while a read transaction is open.
+//
+// If a long running read transaction (for example, a snapshot transaction) is
+// needed, you might want to set DB.InitialMmapSize to a large enough value
+// to avoid potential blocking of write transaction.
+//
+// IMPORTANT: You must close read-only transactions after you are finished or
+// else the database will not reclaim old pages.
+func (db *DB) Begin(writable bool) (*Tx, error) {
+ if writable {
+ return db.beginRWTx()
+ }
+ return db.beginTx()
+}
+
+func (db *DB) beginTx() (*Tx, error) {
+ // Lock the meta pages while we initialize the transaction. We obtain
+ // the meta lock before the mmap lock because that's the order that the
+ // write transaction will obtain them.
+ db.metalock.Lock()
+
+ // Obtain a read-only lock on the mmap. When the mmap is remapped it will
+ // obtain a write lock so all transactions must finish before it can be
+ // remapped.
+ db.mmaplock.RLock()
+
+ // Exit if the database is not open yet.
+ if !db.opened {
+ db.mmaplock.RUnlock()
+ db.metalock.Unlock()
+ return nil, ErrDatabaseNotOpen
+ }
+
+ // Create a transaction associated with the database.
+ t := &Tx{}
+ t.init(db)
+
+ // Keep track of transaction until it closes.
+ db.txs = append(db.txs, t)
+ n := len(db.txs)
+
+ // Unlock the meta pages.
+ db.metalock.Unlock()
+
+ // Update the transaction stats.
+ db.statlock.Lock()
+ db.stats.TxN++
+ db.stats.OpenTxN = n
+ db.statlock.Unlock()
+
+ return t, nil
+}
+
+func (db *DB) beginRWTx() (*Tx, error) {
+ // If the database was opened with Options.ReadOnly, return an error.
+ if db.readOnly {
+ return nil, ErrDatabaseReadOnly
+ }
+
+ // Obtain writer lock. This is released by the transaction when it closes.
+ // This enforces only one writer transaction at a time.
+ db.rwlock.Lock()
+
+ // Once we have the writer lock then we can lock the meta pages so that
+ // we can set up the transaction.
+ db.metalock.Lock()
+ defer db.metalock.Unlock()
+
+ // Exit if the database is not open yet.
+ if !db.opened {
+ db.rwlock.Unlock()
+ return nil, ErrDatabaseNotOpen
+ }
+
+ // Create a transaction associated with the database.
+ t := &Tx{writable: true}
+ t.init(db)
+ db.rwtx = t
+
+ // Free any pages associated with closed read-only transactions.
+ var minid txid = 0xFFFFFFFFFFFFFFFF
+ for _, t := range db.txs {
+ if t.meta.txid < minid {
+ minid = t.meta.txid
+ }
+ }
+ if minid > 0 {
+ db.freelist.release(minid - 1)
+ }
+
+ return t, nil
+}
+
+// removeTx removes a transaction from the database.
+func (db *DB) removeTx(tx *Tx) {
+ // Release the read lock on the mmap.
+ db.mmaplock.RUnlock()
+
+ // Use the meta lock to restrict access to the DB object.
+ db.metalock.Lock()
+
+ // Remove the transaction.
+ for i, t := range db.txs {
+ if t == tx {
+ last := len(db.txs) - 1
+ db.txs[i] = db.txs[last]
+ db.txs[last] = nil
+ db.txs = db.txs[:last]
+ break
+ }
+ }
+ n := len(db.txs)
+
+ // Unlock the meta pages.
+ db.metalock.Unlock()
+
+ // Merge statistics.
+ db.statlock.Lock()
+ db.stats.OpenTxN = n
+ db.stats.TxStats.add(&tx.stats)
+ db.statlock.Unlock()
+}
+
+// Update executes a function within the context of a read-write managed transaction.
+// If no error is returned from the function then the transaction is committed.
+// If an error is returned then the entire transaction is rolled back.
+// Any error that is returned from the function or returned from the commit is
+// returned from the Update() method.
+//
+// Attempting to manually commit or rollback within the function will cause a panic.
+func (db *DB) Update(fn func(*Tx) error) error {
+ t, err := db.Begin(true)
+ if err != nil {
+ return err
+ }
+
+ // Make sure the transaction rolls back in the event of a panic.
+ defer func() {
+ if t.db != nil {
+ t.rollback()
+ }
+ }()
+
+ // Mark as a managed tx so that the inner function cannot manually commit.
+ t.managed = true
+
+ // If an error is returned from the function then rollback and return error.
+ err = fn(t)
+ t.managed = false
+ if err != nil {
+ _ = t.Rollback()
+ return err
+ }
+
+ return t.Commit()
+}
+
+// View executes a function within the context of a managed read-only transaction.
+// Any error that is returned from the function is returned from the View() method.
+//
+// Attempting to manually rollback within the function will cause a panic.
+func (db *DB) View(fn func(*Tx) error) error {
+ t, err := db.Begin(false)
+ if err != nil {
+ return err
+ }
+
+ // Make sure the transaction rolls back in the event of a panic.
+ defer func() {
+ if t.db != nil {
+ t.rollback()
+ }
+ }()
+
+ // Mark as a managed tx so that the inner function cannot manually rollback.
+ t.managed = true
+
+ // If an error is returned from the function then pass it through.
+ err = fn(t)
+ t.managed = false
+ if err != nil {
+ _ = t.Rollback()
+ return err
+ }
+
+ if err := t.Rollback(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Batch calls fn as part of a batch. It behaves similar to Update,
+// except:
+//
+// 1. concurrent Batch calls can be combined into a single Bolt
+// transaction.
+//
+// 2. the function passed to Batch may be called multiple times,
+// regardless of whether it returns error or not.
+//
+// This means that Batch function side effects must be idempotent and
+// take permanent effect only after a successful return is seen in
+// caller.
+//
+// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
+// and DB.MaxBatchDelay, respectively.
+//
+// Batch is only useful when there are multiple goroutines calling it.
+func (db *DB) Batch(fn func(*Tx) error) error {
+ errCh := make(chan error, 1)
+
+ db.batchMu.Lock()
+ if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
+ // There is no existing batch, or the existing batch is full; start a new one.
+ db.batch = &batch{
+ db: db,
+ }
+ db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
+ }
+ db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
+ if len(db.batch.calls) >= db.MaxBatchSize {
+ // wake up batch, it's ready to run
+ go db.batch.trigger()
+ }
+ db.batchMu.Unlock()
+
+ err := <-errCh
+ if err == trySolo {
+ err = db.Update(fn)
+ }
+ return err
+}
+
+type call struct {
+ fn func(*Tx) error
+ err chan<- error
+}
+
+type batch struct {
+ db *DB
+ timer *time.Timer
+ start sync.Once
+ calls []call
+}
+
+// trigger runs the batch if it hasn't already been run.
+func (b *batch) trigger() {
+ b.start.Do(b.run)
+}
+
+// run performs the transactions in the batch and communicates results
+// back to DB.Batch.
+func (b *batch) run() {
+ b.db.batchMu.Lock()
+ b.timer.Stop()
+ // Make sure no new work is added to this batch, but don't break
+ // other batches.
+ if b.db.batch == b {
+ b.db.batch = nil
+ }
+ b.db.batchMu.Unlock()
+
+retry:
+ for len(b.calls) > 0 {
+ var failIdx = -1
+ err := b.db.Update(func(tx *Tx) error {
+ for i, c := range b.calls {
+ if err := safelyCall(c.fn, tx); err != nil {
+ failIdx = i
+ return err
+ }
+ }
+ return nil
+ })
+
+ if failIdx >= 0 {
+ // take the failing transaction out of the batch. it's
+ // safe to shorten b.calls here because db.batch no longer
+ // points to us, and we hold the mutex anyway.
+ c := b.calls[failIdx]
+ b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
+ // tell the submitter re-run it solo, continue with the rest of the batch
+ c.err <- trySolo
+ continue retry
+ }
+
+ // pass success, or bolt internal errors, to all callers
+ for _, c := range b.calls {
+ c.err <- err
+ }
+ break retry
+ }
+}
+
+// trySolo is a special sentinel error value used for signaling that a
+// transaction function should be re-run. It should never be seen by
+// callers.
+var trySolo = errors.New("batch function returned an error and should be re-run solo")
+
+type panicked struct {
+ reason interface{}
+}
+
+func (p panicked) Error() string {
+ if err, ok := p.reason.(error); ok {
+ return err.Error()
+ }
+ return fmt.Sprintf("panic: %v", p.reason)
+}
+
+func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
+ defer func() {
+ if p := recover(); p != nil {
+ err = panicked{p}
+ }
+ }()
+ return fn(tx)
+}
+
+// Sync executes fdatasync() against the database file handle.
+//
+// This is not necessary under normal operation, however, if you use NoSync
+// then it allows you to force the database file to sync against the disk.
+func (db *DB) Sync() error { return fdatasync(db) }
+
+// Stats retrieves ongoing performance stats for the database.
+// This is only updated when a transaction closes.
+func (db *DB) Stats() Stats {
+ db.statlock.RLock()
+ defer db.statlock.RUnlock()
+ return db.stats
+}
+
+// This is for internal access to the raw data bytes from the C cursor, use
+// carefully, or not at all.
+func (db *DB) Info() *Info {
+ return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize}
+}
+
+// page retrieves a page reference from the mmap based on the current page size.
+func (db *DB) page(id pgid) *page {
+ pos := id * pgid(db.pageSize)
+ return (*page)(unsafe.Pointer(&db.data[pos]))
+}
+
+// pageInBuffer retrieves a page reference from a given byte array based on the current page size.
+func (db *DB) pageInBuffer(b []byte, id pgid) *page {
+ return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)]))
+}
+
+// meta retrieves the current meta page reference.
+func (db *DB) meta() *meta {
+ // We have to return the meta with the highest txid which doesn't fail
+ // validation. Otherwise, we can cause errors when in fact the database is
+ // in a consistent state. metaA is the one with the higher txid.
+ metaA := db.meta0
+ metaB := db.meta1
+ if db.meta1.txid > db.meta0.txid {
+ metaA = db.meta1
+ metaB = db.meta0
+ }
+
+ // Use higher meta page if valid. Otherwise fallback to previous, if valid.
+ if err := metaA.validate(); err == nil {
+ return metaA
+ } else if err := metaB.validate(); err == nil {
+ return metaB
+ }
+
+ // This should never be reached, because both meta1 and meta0 were validated
+ // on mmap() and we do fsync() on every write.
+ panic("bolt.DB.meta(): invalid meta pages")
+}
+
+// allocate returns a contiguous block of memory starting at a given page.
+func (db *DB) allocate(count int) (*page, error) {
+ // Allocate a temporary buffer for the page.
+ var buf []byte
+ if count == 1 {
+ buf = db.pagePool.Get().([]byte)
+ } else {
+ buf = make([]byte, count*db.pageSize)
+ }
+ p := (*page)(unsafe.Pointer(&buf[0]))
+ p.overflow = uint32(count - 1)
+
+ // Use pages from the freelist if they are available.
+ if p.id = db.freelist.allocate(count); p.id != 0 {
+ return p, nil
+ }
+
+ // Resize mmap() if we're at the end.
+ p.id = db.rwtx.meta.pgid
+ var minsz = int((p.id+pgid(count))+1) * db.pageSize
+ if minsz >= db.datasz {
+ if err := db.mmap(minsz); err != nil {
+ return nil, fmt.Errorf("mmap allocate error: %s", err)
+ }
+ }
+
+ // Move the page id high water mark.
+ db.rwtx.meta.pgid += pgid(count)
+
+ return p, nil
+}
+
+// grow grows the size of the database to the given sz.
+func (db *DB) grow(sz int) error {
+ // Ignore if the new size is less than available file size.
+ if sz <= db.filesz {
+ return nil
+ }
+
+ // If the data is smaller than the alloc size then only allocate what's needed.
+ // Once it goes over the allocation size then allocate in chunks.
+ if db.datasz < db.AllocSize {
+ sz = db.datasz
+ } else {
+ sz += db.AllocSize
+ }
+
+ // Truncate and fsync to ensure file size metadata is flushed.
+ // https://github.com/boltdb/bolt/issues/284
+ if !db.NoGrowSync && !db.readOnly {
+ if runtime.GOOS != "windows" {
+ if err := db.file.Truncate(int64(sz)); err != nil {
+ return fmt.Errorf("file resize error: %s", err)
+ }
+ }
+ if err := db.file.Sync(); err != nil {
+ return fmt.Errorf("file sync error: %s", err)
+ }
+ }
+
+ db.filesz = sz
+ return nil
+}
+
+func (db *DB) IsReadOnly() bool {
+ return db.readOnly
+}
+
+// Options represents the options that can be set when opening a database.
+type Options struct {
+ // Timeout is the amount of time to wait to obtain a file lock.
+ // When set to zero it will wait indefinitely. This option is only
+ // available on Darwin and Linux.
+ Timeout time.Duration
+
+ // Sets the DB.NoGrowSync flag before memory mapping the file.
+ NoGrowSync bool
+
+ // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
+ // grab a shared lock (UNIX).
+ ReadOnly bool
+
+ // Sets the DB.MmapFlags flag before memory mapping the file.
+ MmapFlags int
+
+ // InitialMmapSize is the initial mmap size of the database
+ // in bytes. Read transactions won't block write transaction
+ // if the InitialMmapSize is large enough to hold database mmap
+ // size. (See DB.Begin for more information)
+ //
+ // If <=0, the initial map size is 0.
+ // If initialMmapSize is smaller than the previous database size,
+ // it takes no effect.
+ InitialMmapSize int
+}
+
+// DefaultOptions represent the options used if nil options are passed into Open().
+// No timeout is used which will cause Bolt to wait indefinitely for a lock.
+var DefaultOptions = &Options{
+ Timeout: 0,
+ NoGrowSync: false,
+}
+
+// Stats represents statistics about the database.
+type Stats struct {
+ // Freelist stats
+ FreePageN int // total number of free pages on the freelist
+ PendingPageN int // total number of pending pages on the freelist
+ FreeAlloc int // total bytes allocated in free pages
+ FreelistInuse int // total bytes used by the freelist
+
+ // Transaction stats
+ TxN int // total number of started read transactions
+ OpenTxN int // number of currently open read transactions
+
+ TxStats TxStats // global, ongoing stats.
+}
+
+// Sub calculates and returns the difference between two sets of database stats.
+// This is useful when obtaining stats at two different points and time and
+// you need the performance counters that occurred within that time span.
+func (s *Stats) Sub(other *Stats) Stats {
+ if other == nil {
+ return *s
+ }
+ var diff Stats
+ diff.FreePageN = s.FreePageN
+ diff.PendingPageN = s.PendingPageN
+ diff.FreeAlloc = s.FreeAlloc
+ diff.FreelistInuse = s.FreelistInuse
+ diff.TxN = s.TxN - other.TxN
+ diff.TxStats = s.TxStats.Sub(&other.TxStats)
+ return diff
+}
+
+func (s *Stats) add(other *Stats) {
+ s.TxStats.add(&other.TxStats)
+}
+
+type Info struct {
+ Data uintptr
+ PageSize int
+}
+
+type meta struct {
+ magic uint32
+ version uint32
+ pageSize uint32
+ flags uint32
+ root bucket
+ freelist pgid
+ pgid pgid
+ txid txid
+ checksum uint64
+}
+
+// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
+func (m *meta) validate() error {
+ if m.magic != magic {
+ return ErrInvalid
+ } else if m.version != version {
+ return ErrVersionMismatch
+ } else if m.checksum != 0 && m.checksum != m.sum64() {
+ return ErrChecksum
+ }
+ return nil
+}
+
+// copy copies one meta object to another.
+func (m *meta) copy(dest *meta) {
+ *dest = *m
+}
+
+// write writes the meta onto a page.
+func (m *meta) write(p *page) {
+ if m.root.root >= m.pgid {
+ panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
+ } else if m.freelist >= m.pgid {
+ panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
+ }
+
+ // Page id is either going to be 0 or 1 which we can determine by the transaction ID.
+ p.id = pgid(m.txid % 2)
+ p.flags |= metaPageFlag
+
+ // Calculate the checksum.
+ m.checksum = m.sum64()
+
+ m.copy(p.meta())
+}
+
+// generates the checksum for the meta.
+func (m *meta) sum64() uint64 {
+ var h = fnv.New64a()
+ _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
+ return h.Sum64()
+}
+
+// _assert will panic with a given formatted message if the given condition is false.
+func _assert(condition bool, msg string, v ...interface{}) {
+ if !condition {
+ panic(fmt.Sprintf("assertion failed: "+msg, v...))
+ }
+}
+
+func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) }
+func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
+
+func printstack() {
+ stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
+ fmt.Fprintln(os.Stderr, stack)
+}
diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/boltdb/bolt/doc.go
new file mode 100755
index 0000000..cc93784
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/doc.go
@@ -0,0 +1,44 @@
+/*
+Package bolt implements a low-level key/value store in pure Go. It supports
+fully serializable transactions, ACID semantics, and lock-free MVCC with
+multiple readers and a single writer. Bolt can be used for projects that
+want a simple data store without the need to add large dependencies such as
+Postgres or MySQL.
+
+Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
+optimized for fast read access and does not require recovery in the event of a
+system crash. Transactions which have not finished committing will simply be
+rolled back in the event of a crash.
+
+The design of Bolt is based on Howard Chu's LMDB database project.
+
+Bolt currently works on Windows, Mac OS X, and Linux.
+
+
+Basics
+
+There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
+a collection of buckets and is represented by a single file on disk. A bucket is
+a collection of unique keys that are associated with values.
+
+Transactions provide either read-only or read-write access to the database.
+Read-only transactions can retrieve key/value pairs and can use Cursors to
+iterate over the dataset sequentially. Read-write transactions can create and
+delete buckets and can insert and remove keys. Only one read-write transaction
+is allowed at a time.
+
+
+Caveats
+
+The database uses a read-only, memory-mapped data file to ensure that
+applications cannot corrupt the database, however, this means that keys and
+values returned from Bolt cannot be changed. Writing to a read-only byte slice
+will cause Go to panic.
+
+Keys and values retrieved from the database are only valid for the life of
+the transaction. When used outside the transaction, these byte slices can
+point to different data or can point to invalid memory which will cause a panic.
+
+
+*/
+package bolt
diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go
new file mode 100755
index 0000000..a3620a3
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/errors.go
@@ -0,0 +1,71 @@
+package bolt
+
+import "errors"
+
+// These errors can be returned when opening or calling methods on a DB.
+var (
+ // ErrDatabaseNotOpen is returned when a DB instance is accessed before it
+ // is opened or after it is closed.
+ ErrDatabaseNotOpen = errors.New("database not open")
+
+ // ErrDatabaseOpen is returned when opening a database that is
+ // already open.
+ ErrDatabaseOpen = errors.New("database already open")
+
+ // ErrInvalid is returned when both meta pages on a database are invalid.
+ // This typically occurs when a file is not a bolt database.
+ ErrInvalid = errors.New("invalid database")
+
+ // ErrVersionMismatch is returned when the data file was created with a
+ // different version of Bolt.
+ ErrVersionMismatch = errors.New("version mismatch")
+
+ // ErrChecksum is returned when either meta page checksum does not match.
+ ErrChecksum = errors.New("checksum error")
+
+ // ErrTimeout is returned when a database cannot obtain an exclusive lock
+ // on the data file after the timeout passed to Open().
+ ErrTimeout = errors.New("timeout")
+)
+
+// These errors can occur when beginning or committing a Tx.
+var (
+ // ErrTxNotWritable is returned when performing a write operation on a
+ // read-only transaction.
+ ErrTxNotWritable = errors.New("tx not writable")
+
+ // ErrTxClosed is returned when committing or rolling back a transaction
+ // that has already been committed or rolled back.
+ ErrTxClosed = errors.New("tx closed")
+
+ // ErrDatabaseReadOnly is returned when a mutating transaction is started on a
+ // read-only database.
+ ErrDatabaseReadOnly = errors.New("database is in read-only mode")
+)
+
+// These errors can occur when putting or deleting a value or a bucket.
+var (
+ // ErrBucketNotFound is returned when trying to access a bucket that has
+ // not been created yet.
+ ErrBucketNotFound = errors.New("bucket not found")
+
+ // ErrBucketExists is returned when creating a bucket that already exists.
+ ErrBucketExists = errors.New("bucket already exists")
+
+ // ErrBucketNameRequired is returned when creating a bucket with a blank name.
+ ErrBucketNameRequired = errors.New("bucket name required")
+
+ // ErrKeyRequired is returned when inserting a zero-length key.
+ ErrKeyRequired = errors.New("key required")
+
+ // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
+ ErrKeyTooLarge = errors.New("key too large")
+
+ // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
+ ErrValueTooLarge = errors.New("value too large")
+
+ // ErrIncompatibleValue is returned when trying create or delete a bucket
+ // on an existing non-bucket key or when trying to create or delete a
+ // non-bucket key on an existing bucket key.
+ ErrIncompatibleValue = errors.New("incompatible value")
+)
diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go
new file mode 100755
index 0000000..aba48f5
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/freelist.go
@@ -0,0 +1,252 @@
+package bolt
+
+import (
+ "fmt"
+ "sort"
+ "unsafe"
+)
+
+// freelist represents a list of all pages that are available for allocation.
+// It also tracks pages that have been freed but are still in use by open transactions.
+type freelist struct {
+ ids []pgid // all free and available free page ids.
+ pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
+ cache map[pgid]bool // fast lookup of all free and pending page ids.
+}
+
+// newFreelist returns an empty, initialized freelist.
+func newFreelist() *freelist {
+ return &freelist{
+ pending: make(map[txid][]pgid),
+ cache: make(map[pgid]bool),
+ }
+}
+
+// size returns the size of the page after serialization.
+func (f *freelist) size() int {
+ n := f.count()
+ if n >= 0xFFFF {
+ // The first element will be used to store the count. See freelist.write.
+ n++
+ }
+ return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
+}
+
+// count returns count of pages on the freelist
+func (f *freelist) count() int {
+ return f.free_count() + f.pending_count()
+}
+
+// free_count returns count of free pages
+func (f *freelist) free_count() int {
+ return len(f.ids)
+}
+
+// pending_count returns count of pending pages
+func (f *freelist) pending_count() int {
+ var count int
+ for _, list := range f.pending {
+ count += len(list)
+ }
+ return count
+}
+
+// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
+// f.count returns the minimum length required for dst.
+func (f *freelist) copyall(dst []pgid) {
+ m := make(pgids, 0, f.pending_count())
+ for _, list := range f.pending {
+ m = append(m, list...)
+ }
+ sort.Sort(m)
+ mergepgids(dst, f.ids, m)
+}
+
+// allocate returns the starting page id of a contiguous list of pages of a given size.
+// If a contiguous block cannot be found then 0 is returned.
+func (f *freelist) allocate(n int) pgid {
+ if len(f.ids) == 0 {
+ return 0
+ }
+
+ var initial, previd pgid
+ for i, id := range f.ids {
+ if id <= 1 {
+ panic(fmt.Sprintf("invalid page allocation: %d", id))
+ }
+
+ // Reset initial page if this is not contiguous.
+ if previd == 0 || id-previd != 1 {
+ initial = id
+ }
+
+ // If we found a contiguous block then remove it and return it.
+ if (id-initial)+1 == pgid(n) {
+ // If we're allocating off the beginning then take the fast path
+ // and just adjust the existing slice. This will use extra memory
+ // temporarily but the append() in free() will realloc the slice
+ // as is necessary.
+ if (i + 1) == n {
+ f.ids = f.ids[i+1:]
+ } else {
+ copy(f.ids[i-n+1:], f.ids[i+1:])
+ f.ids = f.ids[:len(f.ids)-n]
+ }
+
+ // Remove from the free cache.
+ for i := pgid(0); i < pgid(n); i++ {
+ delete(f.cache, initial+i)
+ }
+
+ return initial
+ }
+
+ previd = id
+ }
+ return 0
+}
+
+// free releases a page and its overflow for a given transaction id.
+// If the page is already free then a panic will occur.
+func (f *freelist) free(txid txid, p *page) {
+ if p.id <= 1 {
+ panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
+ }
+
+ // Free page and all its overflow pages.
+ var ids = f.pending[txid]
+ for id := p.id; id <= p.id+pgid(p.overflow); id++ {
+ // Verify that page is not already free.
+ if f.cache[id] {
+ panic(fmt.Sprintf("page %d already freed", id))
+ }
+
+ // Add to the freelist and cache.
+ ids = append(ids, id)
+ f.cache[id] = true
+ }
+ f.pending[txid] = ids
+}
+
+// release moves all page ids for a transaction id (or older) to the freelist.
+func (f *freelist) release(txid txid) {
+ m := make(pgids, 0)
+ for tid, ids := range f.pending {
+ if tid <= txid {
+ // Move transaction's pending pages to the available freelist.
+ // Don't remove from the cache since the page is still free.
+ m = append(m, ids...)
+ delete(f.pending, tid)
+ }
+ }
+ sort.Sort(m)
+ f.ids = pgids(f.ids).merge(m)
+}
+
+// rollback removes the pages from a given pending tx.
+func (f *freelist) rollback(txid txid) {
+ // Remove page ids from cache.
+ for _, id := range f.pending[txid] {
+ delete(f.cache, id)
+ }
+
+ // Remove pages from pending list.
+ delete(f.pending, txid)
+}
+
+// freed returns whether a given page is in the free list.
+func (f *freelist) freed(pgid pgid) bool {
+ return f.cache[pgid]
+}
+
+// read initializes the freelist from a freelist page.
+func (f *freelist) read(p *page) {
+ // If the page.count is at the max uint16 value (64k) then it's considered
+ // an overflow and the size of the freelist is stored as the first element.
+ idx, count := 0, int(p.count)
+ if count == 0xFFFF {
+ idx = 1
+ count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
+ }
+
+ // Copy the list of page ids from the freelist.
+ if count == 0 {
+ f.ids = nil
+ } else {
+ ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
+ f.ids = make([]pgid, len(ids))
+ copy(f.ids, ids)
+
+ // Make sure they're sorted.
+ sort.Sort(pgids(f.ids))
+ }
+
+ // Rebuild the page cache.
+ f.reindex()
+}
+
+// write writes the page ids onto a freelist page. All free and pending ids are
+// saved to disk since in the event of a program crash, all pending ids will
+// become free.
+func (f *freelist) write(p *page) error {
+ // Combine the old free pgids and pgids waiting on an open transaction.
+
+ // Update the header flag.
+ p.flags |= freelistPageFlag
+
+ // The page.count can only hold up to 64k elements so if we overflow that
+ // number then we handle it by putting the size in the first element.
+ lenids := f.count()
+ if lenids == 0 {
+ p.count = uint16(lenids)
+ } else if lenids < 0xFFFF {
+ p.count = uint16(lenids)
+ f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
+ } else {
+ p.count = 0xFFFF
+ ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
+ f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
+ }
+
+ return nil
+}
+
+// reload reads the freelist from a page and filters out pending items.
+func (f *freelist) reload(p *page) {
+ f.read(p)
+
+ // Build a cache of only pending pages.
+ pcache := make(map[pgid]bool)
+ for _, pendingIDs := range f.pending {
+ for _, pendingID := range pendingIDs {
+ pcache[pendingID] = true
+ }
+ }
+
+ // Check each page in the freelist and build a new available freelist
+ // with any pages not in the pending lists.
+ var a []pgid
+ for _, id := range f.ids {
+ if !pcache[id] {
+ a = append(a, id)
+ }
+ }
+ f.ids = a
+
+ // Once the available list is rebuilt then rebuild the free cache so that
+ // it includes the available and pending free pages.
+ f.reindex()
+}
+
+// reindex rebuilds the free cache based on available and pending free lists.
+func (f *freelist) reindex() {
+ f.cache = make(map[pgid]bool, len(f.ids))
+ for _, id := range f.ids {
+ f.cache[id] = true
+ }
+ for _, pendingIDs := range f.pending {
+ for _, pendingID := range pendingIDs {
+ f.cache[pendingID] = true
+ }
+ }
+}
diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/boltdb/bolt/node.go
new file mode 100755
index 0000000..159318b
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/node.go
@@ -0,0 +1,604 @@
+package bolt
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "unsafe"
+)
+
+// node represents an in-memory, deserialized page.
+type node struct {
+ bucket *Bucket
+ isLeaf bool
+ unbalanced bool
+ spilled bool
+ key []byte
+ pgid pgid
+ parent *node
+ children nodes
+ inodes inodes
+}
+
+// root returns the top-level node this node is attached to.
+func (n *node) root() *node {
+ if n.parent == nil {
+ return n
+ }
+ return n.parent.root()
+}
+
+// minKeys returns the minimum number of inodes this node should have.
+func (n *node) minKeys() int {
+ if n.isLeaf {
+ return 1
+ }
+ return 2
+}
+
+// size returns the size of the node after serialization.
+func (n *node) size() int {
+ sz, elsz := pageHeaderSize, n.pageElementSize()
+ for i := 0; i < len(n.inodes); i++ {
+ item := &n.inodes[i]
+ sz += elsz + len(item.key) + len(item.value)
+ }
+ return sz
+}
+
+// sizeLessThan returns true if the node is less than a given size.
+// This is an optimization to avoid calculating a large node when we only need
+// to know if it fits inside a certain page size.
+func (n *node) sizeLessThan(v int) bool {
+ sz, elsz := pageHeaderSize, n.pageElementSize()
+ for i := 0; i < len(n.inodes); i++ {
+ item := &n.inodes[i]
+ sz += elsz + len(item.key) + len(item.value)
+ if sz >= v {
+ return false
+ }
+ }
+ return true
+}
+
+// pageElementSize returns the size of each page element based on the type of node.
+func (n *node) pageElementSize() int {
+ if n.isLeaf {
+ return leafPageElementSize
+ }
+ return branchPageElementSize
+}
+
+// childAt returns the child node at a given index.
+func (n *node) childAt(index int) *node {
+ if n.isLeaf {
+ panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
+ }
+ return n.bucket.node(n.inodes[index].pgid, n)
+}
+
+// childIndex returns the index of a given child node.
+func (n *node) childIndex(child *node) int {
+ index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
+ return index
+}
+
+// numChildren returns the number of children.
+func (n *node) numChildren() int {
+ return len(n.inodes)
+}
+
+// nextSibling returns the next node with the same parent.
+func (n *node) nextSibling() *node {
+ if n.parent == nil {
+ return nil
+ }
+ index := n.parent.childIndex(n)
+ if index >= n.parent.numChildren()-1 {
+ return nil
+ }
+ return n.parent.childAt(index + 1)
+}
+
+// prevSibling returns the previous node with the same parent.
+func (n *node) prevSibling() *node {
+ if n.parent == nil {
+ return nil
+ }
+ index := n.parent.childIndex(n)
+ if index == 0 {
+ return nil
+ }
+ return n.parent.childAt(index - 1)
+}
+
+// put inserts a key/value.
+func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
+ if pgid >= n.bucket.tx.meta.pgid {
+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
+ } else if len(oldKey) <= 0 {
+ panic("put: zero-length old key")
+ } else if len(newKey) <= 0 {
+ panic("put: zero-length new key")
+ }
+
+ // Find insertion index.
+ index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
+
+ // Add capacity and shift nodes if we don't have an exact match and need to insert.
+ exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
+ if !exact {
+ n.inodes = append(n.inodes, inode{})
+ copy(n.inodes[index+1:], n.inodes[index:])
+ }
+
+ inode := &n.inodes[index]
+ inode.flags = flags
+ inode.key = newKey
+ inode.value = value
+ inode.pgid = pgid
+ _assert(len(inode.key) > 0, "put: zero-length inode key")
+}
+
+// del removes a key from the node.
+func (n *node) del(key []byte) {
+ // Find index of key.
+ index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
+
+ // Exit if the key isn't found.
+ if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
+ return
+ }
+
+ // Delete inode from the node.
+ n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
+
+ // Mark the node as needing rebalancing.
+ n.unbalanced = true
+}
+
+// read initializes the node from a page.
+func (n *node) read(p *page) {
+ n.pgid = p.id
+ n.isLeaf = ((p.flags & leafPageFlag) != 0)
+ n.inodes = make(inodes, int(p.count))
+
+ for i := 0; i < int(p.count); i++ {
+ inode := &n.inodes[i]
+ if n.isLeaf {
+ elem := p.leafPageElement(uint16(i))
+ inode.flags = elem.flags
+ inode.key = elem.key()
+ inode.value = elem.value()
+ } else {
+ elem := p.branchPageElement(uint16(i))
+ inode.pgid = elem.pgid
+ inode.key = elem.key()
+ }
+ _assert(len(inode.key) > 0, "read: zero-length inode key")
+ }
+
+ // Save first key so we can find the node in the parent when we spill.
+ if len(n.inodes) > 0 {
+ n.key = n.inodes[0].key
+ _assert(len(n.key) > 0, "read: zero-length node key")
+ } else {
+ n.key = nil
+ }
+}
+
+// write writes the items onto one or more pages.
+func (n *node) write(p *page) {
+ // Initialize page.
+ if n.isLeaf {
+ p.flags |= leafPageFlag
+ } else {
+ p.flags |= branchPageFlag
+ }
+
+ if len(n.inodes) >= 0xFFFF {
+ panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
+ }
+ p.count = uint16(len(n.inodes))
+
+ // Stop here if there are no items to write.
+ if p.count == 0 {
+ return
+ }
+
+ // Loop over each item and write it to the page.
+ b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
+ for i, item := range n.inodes {
+ _assert(len(item.key) > 0, "write: zero-length inode key")
+
+ // Write the page element.
+ if n.isLeaf {
+ elem := p.leafPageElement(uint16(i))
+ elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
+ elem.flags = item.flags
+ elem.ksize = uint32(len(item.key))
+ elem.vsize = uint32(len(item.value))
+ } else {
+ elem := p.branchPageElement(uint16(i))
+ elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
+ elem.ksize = uint32(len(item.key))
+ elem.pgid = item.pgid
+ _assert(elem.pgid != p.id, "write: circular dependency occurred")
+ }
+
+ // If the length of key+value is larger than the max allocation size
+ // then we need to reallocate the byte array pointer.
+ //
+ // See: https://github.com/boltdb/bolt/pull/335
+ klen, vlen := len(item.key), len(item.value)
+ if len(b) < klen+vlen {
+ b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
+ }
+
+ // Write data for the element to the end of the page.
+ copy(b[0:], item.key)
+ b = b[klen:]
+ copy(b[0:], item.value)
+ b = b[vlen:]
+ }
+
+ // DEBUG ONLY: n.dump()
+}
+
+// split breaks up a node into multiple smaller nodes, if appropriate.
+// This should only be called from the spill() function.
+func (n *node) split(pageSize int) []*node {
+ var nodes []*node
+
+ node := n
+ for {
+ // Split node into two.
+ a, b := node.splitTwo(pageSize)
+ nodes = append(nodes, a)
+
+ // If we can't split then exit the loop.
+ if b == nil {
+ break
+ }
+
+ // Set node to b so it gets split on the next iteration.
+ node = b
+ }
+
+ return nodes
+}
+
+// splitTwo breaks up a node into two smaller nodes, if appropriate.
+// This should only be called from the split() function.
+func (n *node) splitTwo(pageSize int) (*node, *node) {
+ // Ignore the split if the page doesn't have at least enough nodes for
+ // two pages or if the nodes can fit in a single page.
+ if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
+ return n, nil
+ }
+
+ // Determine the threshold before starting a new node.
+ var fillPercent = n.bucket.FillPercent
+ if fillPercent < minFillPercent {
+ fillPercent = minFillPercent
+ } else if fillPercent > maxFillPercent {
+ fillPercent = maxFillPercent
+ }
+ threshold := int(float64(pageSize) * fillPercent)
+
+ // Determine split position and sizes of the two pages.
+ splitIndex, _ := n.splitIndex(threshold)
+
+ // Split node into two separate nodes.
+ // If there's no parent then we'll need to create one.
+ if n.parent == nil {
+ n.parent = &node{bucket: n.bucket, children: []*node{n}}
+ }
+
+ // Create a new node and add it to the parent.
+ next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
+ n.parent.children = append(n.parent.children, next)
+
+ // Split inodes across two nodes.
+ next.inodes = n.inodes[splitIndex:]
+ n.inodes = n.inodes[:splitIndex]
+
+ // Update the statistics.
+ n.bucket.tx.stats.Split++
+
+ return n, next
+}
+
+// splitIndex finds the position where a page will fill a given threshold.
+// It returns the index as well as the size of the first page.
+// This is only be called from split().
+func (n *node) splitIndex(threshold int) (index, sz int) {
+ sz = pageHeaderSize
+
+ // Loop until we only have the minimum number of keys required for the second page.
+ for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
+ index = i
+ inode := n.inodes[i]
+ elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
+
+ // If we have at least the minimum number of keys and adding another
+ // node would put us over the threshold then exit and return.
+ if i >= minKeysPerPage && sz+elsize > threshold {
+ break
+ }
+
+ // Add the element size to the total size.
+ sz += elsize
+ }
+
+ return
+}
+
+// spill writes the nodes to dirty pages and splits nodes as it goes.
+// Returns an error if dirty pages cannot be allocated.
+func (n *node) spill() error {
+ var tx = n.bucket.tx
+ if n.spilled {
+ return nil
+ }
+
+ // Spill child nodes first. Child nodes can materialize sibling nodes in
+ // the case of split-merge so we cannot use a range loop. We have to check
+ // the children size on every loop iteration.
+ sort.Sort(n.children)
+ for i := 0; i < len(n.children); i++ {
+ if err := n.children[i].spill(); err != nil {
+ return err
+ }
+ }
+
+ // We no longer need the child list because it's only used for spill tracking.
+ n.children = nil
+
+ // Split nodes into appropriate sizes. The first node will always be n.
+ var nodes = n.split(tx.db.pageSize)
+ for _, node := range nodes {
+ // Add node's page to the freelist if it's not new.
+ if node.pgid > 0 {
+ tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
+ node.pgid = 0
+ }
+
+ // Allocate contiguous space for the node.
+ p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
+ if err != nil {
+ return err
+ }
+
+ // Write the node.
+ if p.id >= tx.meta.pgid {
+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
+ }
+ node.pgid = p.id
+ node.write(p)
+ node.spilled = true
+
+ // Insert into parent inodes.
+ if node.parent != nil {
+ var key = node.key
+ if key == nil {
+ key = node.inodes[0].key
+ }
+
+ node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
+ node.key = node.inodes[0].key
+ _assert(len(node.key) > 0, "spill: zero-length node key")
+ }
+
+ // Update the statistics.
+ tx.stats.Spill++
+ }
+
+ // If the root node split and created a new root then we need to spill that
+ // as well. We'll clear out the children to make sure it doesn't try to respill.
+ if n.parent != nil && n.parent.pgid == 0 {
+ n.children = nil
+ return n.parent.spill()
+ }
+
+ return nil
+}
+
+// rebalance attempts to combine the node with sibling nodes if the node fill
+// size is below a threshold or if there are not enough keys.
+func (n *node) rebalance() {
+ if !n.unbalanced {
+ return
+ }
+ n.unbalanced = false
+
+ // Update statistics.
+ n.bucket.tx.stats.Rebalance++
+
+ // Ignore if node is above threshold (25%) and has enough keys.
+ var threshold = n.bucket.tx.db.pageSize / 4
+ if n.size() > threshold && len(n.inodes) > n.minKeys() {
+ return
+ }
+
+ // Root node has special handling.
+ if n.parent == nil {
+ // If root node is a branch and only has one node then collapse it.
+ if !n.isLeaf && len(n.inodes) == 1 {
+ // Move root's child up.
+ child := n.bucket.node(n.inodes[0].pgid, n)
+ n.isLeaf = child.isLeaf
+ n.inodes = child.inodes[:]
+ n.children = child.children
+
+ // Reparent all child nodes being moved.
+ for _, inode := range n.inodes {
+ if child, ok := n.bucket.nodes[inode.pgid]; ok {
+ child.parent = n
+ }
+ }
+
+ // Remove old child.
+ child.parent = nil
+ delete(n.bucket.nodes, child.pgid)
+ child.free()
+ }
+
+ return
+ }
+
+ // If node has no keys then just remove it.
+ if n.numChildren() == 0 {
+ n.parent.del(n.key)
+ n.parent.removeChild(n)
+ delete(n.bucket.nodes, n.pgid)
+ n.free()
+ n.parent.rebalance()
+ return
+ }
+
+ _assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
+
+ // Destination node is right sibling if idx == 0, otherwise left sibling.
+ var target *node
+ var useNextSibling = (n.parent.childIndex(n) == 0)
+ if useNextSibling {
+ target = n.nextSibling()
+ } else {
+ target = n.prevSibling()
+ }
+
+ // If both this node and the target node are too small then merge them.
+ if useNextSibling {
+ // Reparent all child nodes being moved.
+ for _, inode := range target.inodes {
+ if child, ok := n.bucket.nodes[inode.pgid]; ok {
+ child.parent.removeChild(child)
+ child.parent = n
+ child.parent.children = append(child.parent.children, child)
+ }
+ }
+
+ // Copy over inodes from target and remove target.
+ n.inodes = append(n.inodes, target.inodes...)
+ n.parent.del(target.key)
+ n.parent.removeChild(target)
+ delete(n.bucket.nodes, target.pgid)
+ target.free()
+ } else {
+ // Reparent all child nodes being moved.
+ for _, inode := range n.inodes {
+ if child, ok := n.bucket.nodes[inode.pgid]; ok {
+ child.parent.removeChild(child)
+ child.parent = target
+ child.parent.children = append(child.parent.children, child)
+ }
+ }
+
+ // Copy over inodes to target and remove node.
+ target.inodes = append(target.inodes, n.inodes...)
+ n.parent.del(n.key)
+ n.parent.removeChild(n)
+ delete(n.bucket.nodes, n.pgid)
+ n.free()
+ }
+
+ // Either this node or the target node was deleted from the parent so rebalance it.
+ n.parent.rebalance()
+}
+
+// removes a node from the list of in-memory children.
+// This does not affect the inodes.
+func (n *node) removeChild(target *node) {
+ for i, child := range n.children {
+ if child == target {
+ n.children = append(n.children[:i], n.children[i+1:]...)
+ return
+ }
+ }
+}
+
+// dereference causes the node to copy all its inode key/value references to heap memory.
+// This is required when the mmap is reallocated so inodes are not pointing to stale data.
+func (n *node) dereference() {
+ if n.key != nil {
+ key := make([]byte, len(n.key))
+ copy(key, n.key)
+ n.key = key
+ _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
+ }
+
+ for i := range n.inodes {
+ inode := &n.inodes[i]
+
+ key := make([]byte, len(inode.key))
+ copy(key, inode.key)
+ inode.key = key
+ _assert(len(inode.key) > 0, "dereference: zero-length inode key")
+
+ value := make([]byte, len(inode.value))
+ copy(value, inode.value)
+ inode.value = value
+ }
+
+ // Recursively dereference children.
+ for _, child := range n.children {
+ child.dereference()
+ }
+
+ // Update statistics.
+ n.bucket.tx.stats.NodeDeref++
+}
+
+// free adds the node's underlying page to the freelist.
+func (n *node) free() {
+ if n.pgid != 0 {
+ n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
+ n.pgid = 0
+ }
+}
+
+// dump writes the contents of the node to STDERR for debugging purposes.
+/*
+func (n *node) dump() {
+ // Write node header.
+ var typ = "branch"
+ if n.isLeaf {
+ typ = "leaf"
+ }
+ warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
+
+ // Write out abbreviated version of each item.
+ for _, item := range n.inodes {
+ if n.isLeaf {
+ if item.flags&bucketLeafFlag != 0 {
+ bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
+ warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
+ } else {
+ warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
+ }
+ } else {
+ warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
+ }
+ }
+ warn("")
+}
+*/
+
+type nodes []*node
+
+func (s nodes) Len() int { return len(s) }
+func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
+
+// inode represents an internal node inside of a node.
+// It can be used to point to elements in a page or point
+// to an element which hasn't been added to a page yet.
+type inode struct {
+ flags uint32
+ pgid pgid
+ key []byte
+ value []byte
+}
+
+type inodes []inode
diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/boltdb/bolt/page.go
new file mode 100755
index 0000000..cde403a
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/page.go
@@ -0,0 +1,197 @@
+package bolt
+
+import (
+ "fmt"
+ "os"
+ "sort"
+ "unsafe"
+)
+
+const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
+
+const minKeysPerPage = 2
+
+const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
+const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
+
+const (
+ branchPageFlag = 0x01
+ leafPageFlag = 0x02
+ metaPageFlag = 0x04
+ freelistPageFlag = 0x10
+)
+
+const (
+ bucketLeafFlag = 0x01
+)
+
+type pgid uint64
+
+type page struct {
+ id pgid
+ flags uint16
+ count uint16
+ overflow uint32
+ ptr uintptr
+}
+
+// typ returns a human readable page type string used for debugging.
+func (p *page) typ() string {
+ if (p.flags & branchPageFlag) != 0 {
+ return "branch"
+ } else if (p.flags & leafPageFlag) != 0 {
+ return "leaf"
+ } else if (p.flags & metaPageFlag) != 0 {
+ return "meta"
+ } else if (p.flags & freelistPageFlag) != 0 {
+ return "freelist"
+ }
+ return fmt.Sprintf("unknown<%02x>", p.flags)
+}
+
+// meta returns a pointer to the metadata section of the page.
+func (p *page) meta() *meta {
+ return (*meta)(unsafe.Pointer(&p.ptr))
+}
+
+// leafPageElement retrieves the leaf node by index
+func (p *page) leafPageElement(index uint16) *leafPageElement {
+ n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
+ return n
+}
+
+// leafPageElements retrieves a list of leaf nodes.
+func (p *page) leafPageElements() []leafPageElement {
+ if p.count == 0 {
+ return nil
+ }
+ return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
+}
+
+// branchPageElement retrieves the branch node by index
+func (p *page) branchPageElement(index uint16) *branchPageElement {
+ return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
+}
+
+// branchPageElements retrieves a list of branch nodes.
+func (p *page) branchPageElements() []branchPageElement {
+ if p.count == 0 {
+ return nil
+ }
+ return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
+}
+
+// dump writes n bytes of the page to STDERR as hex output.
+func (p *page) hexdump(n int) {
+ buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
+ fmt.Fprintf(os.Stderr, "%x\n", buf)
+}
+
+type pages []*page
+
+func (s pages) Len() int { return len(s) }
+func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
+
+// branchPageElement represents a node on a branch page.
+type branchPageElement struct {
+ pos uint32
+ ksize uint32
+ pgid pgid
+}
+
+// key returns a byte slice of the node key.
+func (n *branchPageElement) key() []byte {
+ buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+ return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
+}
+
+// leafPageElement represents a node on a leaf page.
+type leafPageElement struct {
+ flags uint32
+ pos uint32
+ ksize uint32
+ vsize uint32
+}
+
+// key returns a byte slice of the node key.
+func (n *leafPageElement) key() []byte {
+ buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+ return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
+}
+
+// value returns a byte slice of the node value.
+func (n *leafPageElement) value() []byte {
+ buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+ return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
+}
+
+// PageInfo represents human readable information about a page.
+type PageInfo struct {
+ ID int
+ Type string
+ Count int
+ OverflowCount int
+}
+
+type pgids []pgid
+
+func (s pgids) Len() int { return len(s) }
+func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
+
+// merge returns the sorted union of a and b.
+func (a pgids) merge(b pgids) pgids {
+ // Return the opposite slice if one is nil.
+ if len(a) == 0 {
+ return b
+ }
+ if len(b) == 0 {
+ return a
+ }
+ merged := make(pgids, len(a)+len(b))
+ mergepgids(merged, a, b)
+ return merged
+}
+
+// mergepgids copies the sorted union of a and b into dst.
+// If dst is too small, it panics.
+func mergepgids(dst, a, b pgids) {
+ if len(dst) < len(a)+len(b) {
+ panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
+ }
+ // Copy in the opposite slice if one is nil.
+ if len(a) == 0 {
+ copy(dst, b)
+ return
+ }
+ if len(b) == 0 {
+ copy(dst, a)
+ return
+ }
+
+ // Merged will hold all elements from both lists.
+ merged := dst[:0]
+
+ // Assign lead to the slice with a lower starting value, follow to the higher value.
+ lead, follow := a, b
+ if b[0] < a[0] {
+ lead, follow = b, a
+ }
+
+ // Continue while there are elements in the lead.
+ for len(lead) > 0 {
+ // Merge largest prefix of lead that is ahead of follow[0].
+ n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
+ merged = append(merged, lead[:n]...)
+ if n >= len(lead) {
+ break
+ }
+
+ // Swap lead and follow.
+ lead, follow = follow, lead[n:]
+ }
+
+ // Append what's left in follow.
+ _ = append(merged, follow...)
+}
diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go
new file mode 100755
index 0000000..bbb60ac
--- /dev/null
+++ b/vendor/github.com/boltdb/bolt/tx.go
@@ -0,0 +1,686 @@
+package bolt
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+ "time"
+ "unsafe"
+)
+
+// txid represents the internal transaction identifier.
+type txid uint64
+
+// Tx represents a read-only or read/write transaction on the database.
+// Read-only transactions can be used for retrieving values for keys and creating cursors.
+// Read/write transactions can create and remove buckets and create and remove keys.
+//
+// IMPORTANT: You must commit or rollback transactions when you are done with
+// them. Pages can not be reclaimed by the writer until no more transactions
+// are using them. A long running read transaction can cause the database to
+// quickly grow.
+type Tx struct {
+ writable bool
+ managed bool
+ db *DB
+ meta *meta
+ root Bucket
+ pages map[pgid]*page
+ stats TxStats
+ commitHandlers []func()
+
+ // WriteFlag specifies the flag for write-related methods like WriteTo().
+ // Tx opens the database file with the specified flag to copy the data.
+ //
+ // By default, the flag is unset, which works well for mostly in-memory
+ // workloads. For databases that are much larger than available RAM,
+ // set the flag to syscall.O_DIRECT to avoid trashing the page cache.
+ WriteFlag int
+}
+
+// init initializes the transaction.
+func (tx *Tx) init(db *DB) {
+ tx.db = db
+ tx.pages = nil
+
+ // Copy the meta page since it can be changed by the writer.
+ tx.meta = &meta{}
+ db.meta().copy(tx.meta)
+
+ // Copy over the root bucket.
+ tx.root = newBucket(tx)
+ tx.root.bucket = &bucket{}
+ *tx.root.bucket = tx.meta.root
+
+ // Increment the transaction id and add a page cache for writable transactions.
+ if tx.writable {
+ tx.pages = make(map[pgid]*page)
+ tx.meta.txid += txid(1)
+ }
+}
+
+// ID returns the transaction id.
+func (tx *Tx) ID() int {
+ return int(tx.meta.txid)
+}
+
+// DB returns a reference to the database that created the transaction.
+func (tx *Tx) DB() *DB {
+ return tx.db
+}
+
+// Size returns current database size in bytes as seen by this transaction.
+func (tx *Tx) Size() int64 {
+ return int64(tx.meta.pgid) * int64(tx.db.pageSize)
+}
+
+// Writable returns whether the transaction can perform write operations.
+func (tx *Tx) Writable() bool {
+ return tx.writable
+}
+
+// Cursor creates a cursor associated with the root bucket.
+// All items in the cursor will return a nil value because all root bucket keys point to buckets.
+// The cursor is only valid as long as the transaction is open.
+// Do not use a cursor after the transaction is closed.
+func (tx *Tx) Cursor() *Cursor {
+ return tx.root.Cursor()
+}
+
+// Stats retrieves a copy of the current transaction statistics.
+func (tx *Tx) Stats() TxStats {
+ return tx.stats
+}
+
+// Bucket retrieves a bucket by name.
+// Returns nil if the bucket does not exist.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) Bucket(name []byte) *Bucket {
+ return tx.root.Bucket(name)
+}
+
+// CreateBucket creates a new bucket.
+// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
+ return tx.root.CreateBucket(name)
+}
+
+// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
+// Returns an error if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
+ return tx.root.CreateBucketIfNotExists(name)
+}
+
+// DeleteBucket deletes a bucket.
+// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
+func (tx *Tx) DeleteBucket(name []byte) error {
+ return tx.root.DeleteBucket(name)
+}
+
+// ForEach executes a function for each bucket in the root.
+// If the provided function returns an error then the iteration is stopped and
+// the error is returned to the caller.
+func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
+ return tx.root.ForEach(func(k, v []byte) error {
+ if err := fn(k, tx.root.Bucket(k)); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+// OnCommit adds a handler function to be executed after the transaction successfully commits.
+func (tx *Tx) OnCommit(fn func()) {
+ tx.commitHandlers = append(tx.commitHandlers, fn)
+}
+
+// Commit writes all changes to disk and updates the meta page.
+// Returns an error if a disk write error occurs, or if Commit is
+// called on a read-only transaction.
+func (tx *Tx) Commit() error {
+ _assert(!tx.managed, "managed tx commit not allowed")
+ if tx.db == nil {
+ return ErrTxClosed
+ } else if !tx.writable {
+ return ErrTxNotWritable
+ }
+
+ // TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
+
+ // Rebalance nodes which have had deletions.
+ var startTime = time.Now()
+ tx.root.rebalance()
+ if tx.stats.Rebalance > 0 {
+ tx.stats.RebalanceTime += time.Since(startTime)
+ }
+
+ // spill data onto dirty pages.
+ startTime = time.Now()
+ if err := tx.root.spill(); err != nil {
+ tx.rollback()
+ return err
+ }
+ tx.stats.SpillTime += time.Since(startTime)
+
+ // Free the old root bucket.
+ tx.meta.root.root = tx.root.root
+
+ opgid := tx.meta.pgid
+
+ // Free the freelist and allocate new pages for it. This will overestimate
+ // the size of the freelist but not underestimate the size (which would be bad).
+ tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
+ p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
+ if err != nil {
+ tx.rollback()
+ return err
+ }
+ if err := tx.db.freelist.write(p); err != nil {
+ tx.rollback()
+ return err
+ }
+ tx.meta.freelist = p.id
+
+ // If the high water mark has moved up then attempt to grow the database.
+ if tx.meta.pgid > opgid {
+ if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
+ tx.rollback()
+ return err
+ }
+ }
+
+ // Write dirty pages to disk.
+ startTime = time.Now()
+ if err := tx.write(); err != nil {
+ tx.rollback()
+ return err
+ }
+
+ // If strict mode is enabled then perform a consistency check.
+ // Only the first consistency error is reported in the panic.
+ if tx.db.StrictMode {
+ ch := tx.Check()
+ var errs []string
+ for {
+ err, ok := <-ch
+ if !ok {
+ break
+ }
+ errs = append(errs, err.Error())
+ }
+ if len(errs) > 0 {
+ panic("check fail: " + strings.Join(errs, "\n"))
+ }
+ }
+
+ // Write meta to disk.
+ if err := tx.writeMeta(); err != nil {
+ tx.rollback()
+ return err
+ }
+ tx.stats.WriteTime += time.Since(startTime)
+
+ // Finalize the transaction.
+ tx.close()
+
+ // Execute commit handlers now that the locks have been removed.
+ for _, fn := range tx.commitHandlers {
+ fn()
+ }
+
+ return nil
+}
+
+// Rollback closes the transaction and ignores all previous updates. Read-only
+// transactions must be rolled back and not committed.
+func (tx *Tx) Rollback() error {
+ _assert(!tx.managed, "managed tx rollback not allowed")
+ if tx.db == nil {
+ return ErrTxClosed
+ }
+ tx.rollback()
+ return nil
+}
+
+func (tx *Tx) rollback() {
+ if tx.db == nil {
+ return
+ }
+ if tx.writable {
+ tx.db.freelist.rollback(tx.meta.txid)
+ tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
+ }
+ tx.close()
+}
+
+func (tx *Tx) close() {
+ if tx.db == nil {
+ return
+ }
+ if tx.writable {
+ // Grab freelist stats.
+ var freelistFreeN = tx.db.freelist.free_count()
+ var freelistPendingN = tx.db.freelist.pending_count()
+ var freelistAlloc = tx.db.freelist.size()
+
+ // Remove transaction ref & writer lock.
+ tx.db.rwtx = nil
+ tx.db.rwlock.Unlock()
+
+ // Merge statistics.
+ tx.db.statlock.Lock()
+ tx.db.stats.FreePageN = freelistFreeN
+ tx.db.stats.PendingPageN = freelistPendingN
+ tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
+ tx.db.stats.FreelistInuse = freelistAlloc
+ tx.db.stats.TxStats.add(&tx.stats)
+ tx.db.statlock.Unlock()
+ } else {
+ tx.db.removeTx(tx)
+ }
+
+ // Clear all references.
+ tx.db = nil
+ tx.meta = nil
+ tx.root = Bucket{tx: tx}
+ tx.pages = nil
+}
+
+// Copy writes the entire database to a writer.
+// This function exists for backwards compatibility.
+//
+// Deprecated; Use WriteTo() instead.
+func (tx *Tx) Copy(w io.Writer) error {
+ _, err := tx.WriteTo(w)
+ return err
+}
+
+// WriteTo writes the entire database to a writer.
+// If err == nil then exactly tx.Size() bytes will be written into the writer.
+func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
+ // Attempt to open reader with WriteFlag
+ f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
+ if err != nil {
+ return 0, err
+ }
+ defer func() { _ = f.Close() }()
+
+ // Generate a meta page. We use the same page data for both meta pages.
+ buf := make([]byte, tx.db.pageSize)
+ page := (*page)(unsafe.Pointer(&buf[0]))
+ page.flags = metaPageFlag
+ *page.meta() = *tx.meta
+
+ // Write meta 0.
+ page.id = 0
+ page.meta().checksum = page.meta().sum64()
+ nn, err := w.Write(buf)
+ n += int64(nn)
+ if err != nil {
+ return n, fmt.Errorf("meta 0 copy: %s", err)
+ }
+
+ // Write meta 1 with a lower transaction id.
+ page.id = 1
+ page.meta().txid -= 1
+ page.meta().checksum = page.meta().sum64()
+ nn, err = w.Write(buf)
+ n += int64(nn)
+ if err != nil {
+ return n, fmt.Errorf("meta 1 copy: %s", err)
+ }
+
+ // Move past the meta pages in the file.
+ if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
+ return n, fmt.Errorf("seek: %s", err)
+ }
+
+ // Copy data pages.
+ wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
+ n += wn
+ if err != nil {
+ return n, err
+ }
+
+ return n, f.Close()
+}
+
+// CopyFile copies the entire database to file at the given path.
+// A reader transaction is maintained during the copy so it is safe to continue
+// using the database while a copy is in progress.
+func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
+ f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
+ if err != nil {
+ return err
+ }
+
+ err = tx.Copy(f)
+ if err != nil {
+ _ = f.Close()
+ return err
+ }
+ return f.Close()
+}
+
+// Check performs several consistency checks on the database for this transaction.
+// An error is returned if any inconsistency is found.
+//
+// It can be safely run concurrently on a writable transaction. However, this
+// incurs a high cost for large databases and databases with a lot of subbuckets
+// because of caching. This overhead can be removed if running on a read-only
+// transaction, however, it is not safe to execute other writer transactions at
+// the same time.
+func (tx *Tx) Check() <-chan error {
+ ch := make(chan error)
+ go tx.check(ch)
+ return ch
+}
+
+func (tx *Tx) check(ch chan error) {
+ // Check if any pages are double freed.
+ freed := make(map[pgid]bool)
+ all := make([]pgid, tx.db.freelist.count())
+ tx.db.freelist.copyall(all)
+ for _, id := range all {
+ if freed[id] {
+ ch <- fmt.Errorf("page %d: already freed", id)
+ }
+ freed[id] = true
+ }
+
+ // Track every reachable page.
+ reachable := make(map[pgid]*page)
+ reachable[0] = tx.page(0) // meta0
+ reachable[1] = tx.page(1) // meta1
+ for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
+ reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
+ }
+
+ // Recursively check buckets.
+ tx.checkBucket(&tx.root, reachable, freed, ch)
+
+ // Ensure all pages below high water mark are either reachable or freed.
+ for i := pgid(0); i < tx.meta.pgid; i++ {
+ _, isReachable := reachable[i]
+ if !isReachable && !freed[i] {
+ ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
+ }
+ }
+
+ // Close the channel to signal completion.
+ close(ch)
+}
+
+func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
+ // Ignore inline buckets.
+ if b.root == 0 {
+ return
+ }
+
+ // Check every page used by this bucket.
+ b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
+ if p.id > tx.meta.pgid {
+ ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
+ }
+
+ // Ensure each page is only referenced once.
+ for i := pgid(0); i <= pgid(p.overflow); i++ {
+ var id = p.id + i
+ if _, ok := reachable[id]; ok {
+ ch <- fmt.Errorf("page %d: multiple references", int(id))
+ }
+ reachable[id] = p
+ }
+
+ // We should only encounter un-freed leaf and branch pages.
+ if freed[p.id] {
+ ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
+ } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
+ ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
+ }
+ })
+
+ // Check each bucket within this bucket.
+ _ = b.ForEach(func(k, v []byte) error {
+ if child := b.Bucket(k); child != nil {
+ tx.checkBucket(child, reachable, freed, ch)
+ }
+ return nil
+ })
+}
+
+// allocate returns a contiguous block of memory starting at a given page.
+func (tx *Tx) allocate(count int) (*page, error) {
+ p, err := tx.db.allocate(count)
+ if err != nil {
+ return nil, err
+ }
+
+ // Save to our page cache.
+ tx.pages[p.id] = p
+
+ // Update statistics.
+ tx.stats.PageCount++
+ tx.stats.PageAlloc += count * tx.db.pageSize
+
+ return p, nil
+}
+
+// write writes any dirty pages to disk.
+func (tx *Tx) write() error {
+ // Sort pages by id.
+ pages := make(pages, 0, len(tx.pages))
+ for _, p := range tx.pages {
+ pages = append(pages, p)
+ }
+ // Clear out page cache early.
+ tx.pages = make(map[pgid]*page)
+ sort.Sort(pages)
+
+ // Write pages to disk in order.
+ for _, p := range pages {
+ size := (int(p.overflow) + 1) * tx.db.pageSize
+ offset := int64(p.id) * int64(tx.db.pageSize)
+
+ // Write out page in "max allocation" sized chunks.
+ ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
+ for {
+ // Limit our write to our max allocation size.
+ sz := size
+ if sz > maxAllocSize-1 {
+ sz = maxAllocSize - 1
+ }
+
+ // Write chunk to disk.
+ buf := ptr[:sz]
+ if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
+ return err
+ }
+
+ // Update statistics.
+ tx.stats.Write++
+
+ // Exit inner for loop if we've written all the chunks.
+ size -= sz
+ if size == 0 {
+ break
+ }
+
+ // Otherwise move offset forward and move pointer to next chunk.
+ offset += int64(sz)
+ ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
+ }
+ }
+
+ // Ignore file sync if flag is set on DB.
+ if !tx.db.NoSync || IgnoreNoSync {
+ if err := fdatasync(tx.db); err != nil {
+ return err
+ }
+ }
+
+ // Put small pages back to page pool.
+ for _, p := range pages {
+ // Ignore page sizes over 1 page.
+ // These are allocated using make() instead of the page pool.
+ if int(p.overflow) != 0 {
+ continue
+ }
+
+ buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
+
+ // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
+ for i := range buf {
+ buf[i] = 0
+ }
+ tx.db.pagePool.Put(buf)
+ }
+
+ return nil
+}
+
+// writeMeta writes the meta to the disk.
+func (tx *Tx) writeMeta() error {
+ // Create a temporary buffer for the meta page.
+ buf := make([]byte, tx.db.pageSize)
+ p := tx.db.pageInBuffer(buf, 0)
+ tx.meta.write(p)
+
+ // Write the meta page to file.
+ if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
+ return err
+ }
+ if !tx.db.NoSync || IgnoreNoSync {
+ if err := fdatasync(tx.db); err != nil {
+ return err
+ }
+ }
+
+ // Update statistics.
+ tx.stats.Write++
+
+ return nil
+}
+
+// page returns a reference to the page with a given id.
+// If page has been written to then a temporary buffered page is returned.
+func (tx *Tx) page(id pgid) *page {
+ // Check the dirty pages first.
+ if tx.pages != nil {
+ if p, ok := tx.pages[id]; ok {
+ return p
+ }
+ }
+
+ // Otherwise return directly from the mmap.
+ return tx.db.page(id)
+}
+
+// forEachPage iterates over every page within a given page and executes a function.
+func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
+ p := tx.page(pgid)
+
+ // Execute function.
+ fn(p, depth)
+
+ // Recursively loop over children.
+ if (p.flags & branchPageFlag) != 0 {
+ for i := 0; i < int(p.count); i++ {
+ elem := p.branchPageElement(uint16(i))
+ tx.forEachPage(elem.pgid, depth+1, fn)
+ }
+ }
+}
+
+// Page returns page information for a given page number.
+// This is only safe for concurrent use when used by a writable transaction.
+func (tx *Tx) Page(id int) (*PageInfo, error) {
+ if tx.db == nil {
+ return nil, ErrTxClosed
+ } else if pgid(id) >= tx.meta.pgid {
+ return nil, nil
+ }
+
+ // Build the page info.
+ p := tx.db.page(pgid(id))
+ info := &PageInfo{
+ ID: id,
+ Count: int(p.count),
+ OverflowCount: int(p.overflow),
+ }
+
+ // Determine the type (or if it's free).
+ if tx.db.freelist.freed(pgid(id)) {
+ info.Type = "free"
+ } else {
+ info.Type = p.typ()
+ }
+
+ return info, nil
+}
+
+// TxStats represents statistics about the actions performed by the transaction.
+type TxStats struct {
+ // Page statistics.
+ PageCount int // number of page allocations
+ PageAlloc int // total bytes allocated
+
+ // Cursor statistics.
+ CursorCount int // number of cursors created
+
+ // Node statistics
+ NodeCount int // number of node allocations
+ NodeDeref int // number of node dereferences
+
+ // Rebalance statistics.
+ Rebalance int // number of node rebalances
+ RebalanceTime time.Duration // total time spent rebalancing
+
+ // Split/Spill statistics.
+ Split int // number of nodes split
+ Spill int // number of nodes spilled
+ SpillTime time.Duration // total time spent spilling
+
+ // Write statistics.
+ Write int // number of writes performed
+ WriteTime time.Duration // total time spent writing to disk
+}
+
+func (s *TxStats) add(other *TxStats) {
+ s.PageCount += other.PageCount
+ s.PageAlloc += other.PageAlloc
+ s.CursorCount += other.CursorCount
+ s.NodeCount += other.NodeCount
+ s.NodeDeref += other.NodeDeref
+ s.Rebalance += other.Rebalance
+ s.RebalanceTime += other.RebalanceTime
+ s.Split += other.Split
+ s.Spill += other.Spill
+ s.SpillTime += other.SpillTime
+ s.Write += other.Write
+ s.WriteTime += other.WriteTime
+}
+
+// Sub calculates and returns the difference between two sets of transaction stats.
+// This is useful when obtaining stats at two different points and time and
+// you need the performance counters that occurred within that time span.
+func (s *TxStats) Sub(other *TxStats) TxStats {
+ var diff TxStats
+ diff.PageCount = s.PageCount - other.PageCount
+ diff.PageAlloc = s.PageAlloc - other.PageAlloc
+ diff.CursorCount = s.CursorCount - other.CursorCount
+ diff.NodeCount = s.NodeCount - other.NodeCount
+ diff.NodeDeref = s.NodeDeref - other.NodeDeref
+ diff.Rebalance = s.Rebalance - other.Rebalance
+ diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
+ diff.Split = s.Split - other.Split
+ diff.Spill = s.Spill - other.Spill
+ diff.SpillTime = s.SpillTime - other.SpillTime
+ diff.Write = s.Write - other.Write
+ diff.WriteTime = s.WriteTime - other.WriteTime
+ return diff
+}
diff --git a/vendor/github.com/bradfitz/gomemcache/LICENSE b/vendor/github.com/bradfitz/gomemcache/LICENSE
new file mode 100755
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/bradfitz/gomemcache/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go b/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
new file mode 100755
index 0000000..25e88ca
--- /dev/null
+++ b/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
@@ -0,0 +1,687 @@
+/*
+Copyright 2011 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package memcache provides a client for the memcached cache server.
+package memcache
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Similar to:
+// https://godoc.org/google.golang.org/appengine/memcache
+
+var (
+ // ErrCacheMiss means that a Get failed because the item wasn't present.
+ ErrCacheMiss = errors.New("memcache: cache miss")
+
+ // ErrCASConflict means that a CompareAndSwap call failed due to the
+ // cached value being modified between the Get and the CompareAndSwap.
+ // If the cached value was simply evicted rather than replaced,
+ // ErrNotStored will be returned instead.
+ ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
+
+ // ErrNotStored means that a conditional write operation (i.e. Add or
+ // CompareAndSwap) failed because the condition was not satisfied.
+ ErrNotStored = errors.New("memcache: item not stored")
+
+ // ErrServer means that a server error occurred.
+ ErrServerError = errors.New("memcache: server error")
+
+ // ErrNoStats means that no statistics were available.
+ ErrNoStats = errors.New("memcache: no statistics available")
+
+ // ErrMalformedKey is returned when an invalid key is used.
+ // Keys must be at maximum 250 bytes long and not
+ // contain whitespace or control characters.
+ ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters")
+
+ // ErrNoServers is returned when no servers are configured or available.
+ ErrNoServers = errors.New("memcache: no servers configured or available")
+)
+
+const (
+ // DefaultTimeout is the default socket read/write timeout.
+ DefaultTimeout = 100 * time.Millisecond
+
+ // DefaultMaxIdleConns is the default maximum number of idle connections
+ // kept for any single address.
+ DefaultMaxIdleConns = 2
+)
+
+const buffered = 8 // arbitrary buffered channel size, for readability
+
+// resumableError returns true if err is only a protocol-level cache error.
+// This is used to determine whether or not a server connection should
+// be re-used or not. If an error occurs, by default we don't reuse the
+// connection, unless it was just a cache error.
+func resumableError(err error) bool {
+ switch err {
+ case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey:
+ return true
+ }
+ return false
+}
+
+func legalKey(key string) bool {
+ if len(key) > 250 {
+ return false
+ }
+ for i := 0; i < len(key); i++ {
+ if key[i] <= ' ' || key[i] == 0x7f {
+ return false
+ }
+ }
+ return true
+}
+
+var (
+ crlf = []byte("\r\n")
+ space = []byte(" ")
+ resultOK = []byte("OK\r\n")
+ resultStored = []byte("STORED\r\n")
+ resultNotStored = []byte("NOT_STORED\r\n")
+ resultExists = []byte("EXISTS\r\n")
+ resultNotFound = []byte("NOT_FOUND\r\n")
+ resultDeleted = []byte("DELETED\r\n")
+ resultEnd = []byte("END\r\n")
+ resultOk = []byte("OK\r\n")
+ resultTouched = []byte("TOUCHED\r\n")
+
+ resultClientErrorPrefix = []byte("CLIENT_ERROR ")
+)
+
+// New returns a memcache client using the provided server(s)
+// with equal weight. If a server is listed multiple times,
+// it gets a proportional amount of weight.
+func New(server ...string) *Client {
+ ss := new(ServerList)
+ ss.SetServers(server...)
+ return NewFromSelector(ss)
+}
+
+// NewFromSelector returns a new Client using the provided ServerSelector.
+func NewFromSelector(ss ServerSelector) *Client {
+ return &Client{selector: ss}
+}
+
+// Client is a memcache client.
+// It is safe for unlocked use by multiple concurrent goroutines.
+type Client struct {
+ // Timeout specifies the socket read/write timeout.
+ // If zero, DefaultTimeout is used.
+ Timeout time.Duration
+
+ // MaxIdleConns specifies the maximum number of idle connections that will
+ // be maintained per address. If less than one, DefaultMaxIdleConns will be
+ // used.
+ //
+ // Consider your expected traffic rates and latency carefully. This should
+ // be set to a number higher than your peak parallel requests.
+ MaxIdleConns int
+
+ selector ServerSelector
+
+ lk sync.Mutex
+ freeconn map[string][]*conn
+}
+
+// Item is an item to be got or stored in a memcached server.
+type Item struct {
+ // Key is the Item's key (250 bytes maximum).
+ Key string
+
+ // Value is the Item's value.
+ Value []byte
+
+ // Flags are server-opaque flags whose semantics are entirely
+ // up to the app.
+ Flags uint32
+
+ // Expiration is the cache expiration time, in seconds: either a relative
+ // time from now (up to 1 month), or an absolute Unix epoch time.
+ // Zero means the Item has no expiration time.
+ Expiration int32
+
+ // Compare and swap ID.
+ casid uint64
+}
+
+// conn is a connection to a server.
+type conn struct {
+ nc net.Conn
+ rw *bufio.ReadWriter
+ addr net.Addr
+ c *Client
+}
+
+// release returns this connection back to the client's free pool
+func (cn *conn) release() {
+ cn.c.putFreeConn(cn.addr, cn)
+}
+
+func (cn *conn) extendDeadline() {
+ cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout()))
+}
+
+// condRelease releases this connection if the error pointed to by err
+// is nil (not an error) or is only a protocol level error (e.g. a
+// cache miss). The purpose is to not recycle TCP connections that
+// are bad.
+func (cn *conn) condRelease(err *error) {
+ if *err == nil || resumableError(*err) {
+ cn.release()
+ } else {
+ cn.nc.Close()
+ }
+}
+
+func (c *Client) putFreeConn(addr net.Addr, cn *conn) {
+ c.lk.Lock()
+ defer c.lk.Unlock()
+ if c.freeconn == nil {
+ c.freeconn = make(map[string][]*conn)
+ }
+ freelist := c.freeconn[addr.String()]
+ if len(freelist) >= c.maxIdleConns() {
+ cn.nc.Close()
+ return
+ }
+ c.freeconn[addr.String()] = append(freelist, cn)
+}
+
+func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) {
+ c.lk.Lock()
+ defer c.lk.Unlock()
+ if c.freeconn == nil {
+ return nil, false
+ }
+ freelist, ok := c.freeconn[addr.String()]
+ if !ok || len(freelist) == 0 {
+ return nil, false
+ }
+ cn = freelist[len(freelist)-1]
+ c.freeconn[addr.String()] = freelist[:len(freelist)-1]
+ return cn, true
+}
+
+func (c *Client) netTimeout() time.Duration {
+ if c.Timeout != 0 {
+ return c.Timeout
+ }
+ return DefaultTimeout
+}
+
+func (c *Client) maxIdleConns() int {
+ if c.MaxIdleConns > 0 {
+ return c.MaxIdleConns
+ }
+ return DefaultMaxIdleConns
+}
+
+// ConnectTimeoutError is the error type used when it takes
+// too long to connect to the desired host. This level of
+// detail can generally be ignored.
+type ConnectTimeoutError struct {
+ Addr net.Addr
+}
+
+func (cte *ConnectTimeoutError) Error() string {
+ return "memcache: connect timeout to " + cte.Addr.String()
+}
+
+func (c *Client) dial(addr net.Addr) (net.Conn, error) {
+ type connError struct {
+ cn net.Conn
+ err error
+ }
+
+ nc, err := net.DialTimeout(addr.Network(), addr.String(), c.netTimeout())
+ if err == nil {
+ return nc, nil
+ }
+
+ if ne, ok := err.(net.Error); ok && ne.Timeout() {
+ return nil, &ConnectTimeoutError{addr}
+ }
+
+ return nil, err
+}
+
+func (c *Client) getConn(addr net.Addr) (*conn, error) {
+ cn, ok := c.getFreeConn(addr)
+ if ok {
+ cn.extendDeadline()
+ return cn, nil
+ }
+ nc, err := c.dial(addr)
+ if err != nil {
+ return nil, err
+ }
+ cn = &conn{
+ nc: nc,
+ addr: addr,
+ rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)),
+ c: c,
+ }
+ cn.extendDeadline()
+ return cn, nil
+}
+
+func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error {
+ addr, err := c.selector.PickServer(item.Key)
+ if err != nil {
+ return err
+ }
+ cn, err := c.getConn(addr)
+ if err != nil {
+ return err
+ }
+ defer cn.condRelease(&err)
+ if err = fn(c, cn.rw, item); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *Client) FlushAll() error {
+ return c.selector.Each(c.flushAllFromAddr)
+}
+
+// Get gets the item for the given key. ErrCacheMiss is returned for a
+// memcache cache miss. The key must be at most 250 bytes in length.
+func (c *Client) Get(key string) (item *Item, err error) {
+ err = c.withKeyAddr(key, func(addr net.Addr) error {
+ return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it })
+ })
+ if err == nil && item == nil {
+ err = ErrCacheMiss
+ }
+ return
+}
+
+// Touch updates the expiry for the given key. The seconds parameter is either
+// a Unix timestamp or, if seconds is less than 1 month, the number of seconds
+// into the future at which time the item will expire. Zero means the item has
+// no expiration time. ErrCacheMiss is returned if the key is not in the cache.
+// The key must be at most 250 bytes in length.
+func (c *Client) Touch(key string, seconds int32) (err error) {
+ return c.withKeyAddr(key, func(addr net.Addr) error {
+ return c.touchFromAddr(addr, []string{key}, seconds)
+ })
+}
+
+func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) {
+ if !legalKey(key) {
+ return ErrMalformedKey
+ }
+ addr, err := c.selector.PickServer(key)
+ if err != nil {
+ return err
+ }
+ return fn(addr)
+}
+
+func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) {
+ cn, err := c.getConn(addr)
+ if err != nil {
+ return err
+ }
+ defer cn.condRelease(&err)
+ return fn(cn.rw)
+}
+
+func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error {
+ return c.withKeyAddr(key, func(addr net.Addr) error {
+ return c.withAddrRw(addr, fn)
+ })
+}
+
+func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error {
+ return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+ if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil {
+ return err
+ }
+ if err := rw.Flush(); err != nil {
+ return err
+ }
+ if err := parseGetResponse(rw.Reader, cb); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+// flushAllFromAddr send the flush_all command to the given addr
+func (c *Client) flushAllFromAddr(addr net.Addr) error {
+ return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+ if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil {
+ return err
+ }
+ if err := rw.Flush(); err != nil {
+ return err
+ }
+ line, err := rw.ReadSlice('\n')
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line, resultOk):
+ break
+ default:
+ return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line))
+ }
+ return nil
+ })
+}
+
+func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error {
+ return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+ for _, key := range keys {
+ if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil {
+ return err
+ }
+ if err := rw.Flush(); err != nil {
+ return err
+ }
+ line, err := rw.ReadSlice('\n')
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line, resultTouched):
+ break
+ case bytes.Equal(line, resultNotFound):
+ return ErrCacheMiss
+ default:
+ return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line))
+ }
+ }
+ return nil
+ })
+}
+
+// GetMulti is a batch version of Get. The returned map from keys to
+// items may have fewer elements than the input slice, due to memcache
+// cache misses. Each key must be at most 250 bytes in length.
+// If no error is returned, the returned map will also be non-nil.
+func (c *Client) GetMulti(keys []string) (map[string]*Item, error) {
+ var lk sync.Mutex
+ m := make(map[string]*Item)
+ addItemToMap := func(it *Item) {
+ lk.Lock()
+ defer lk.Unlock()
+ m[it.Key] = it
+ }
+
+ keyMap := make(map[net.Addr][]string)
+ for _, key := range keys {
+ if !legalKey(key) {
+ return nil, ErrMalformedKey
+ }
+ addr, err := c.selector.PickServer(key)
+ if err != nil {
+ return nil, err
+ }
+ keyMap[addr] = append(keyMap[addr], key)
+ }
+
+ ch := make(chan error, buffered)
+ for addr, keys := range keyMap {
+ go func(addr net.Addr, keys []string) {
+ ch <- c.getFromAddr(addr, keys, addItemToMap)
+ }(addr, keys)
+ }
+
+ var err error
+ for _ = range keyMap {
+ if ge := <-ch; ge != nil {
+ err = ge
+ }
+ }
+ return m, err
+}
+
+// parseGetResponse reads a GET response from r and calls cb for each
+// read and allocated Item
+func parseGetResponse(r *bufio.Reader, cb func(*Item)) error {
+ for {
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ return err
+ }
+ if bytes.Equal(line, resultEnd) {
+ return nil
+ }
+ it := new(Item)
+ size, err := scanGetResponseLine(line, it)
+ if err != nil {
+ return err
+ }
+ it.Value = make([]byte, size+2)
+ _, err = io.ReadFull(r, it.Value)
+ if err != nil {
+ it.Value = nil
+ return err
+ }
+ if !bytes.HasSuffix(it.Value, crlf) {
+ it.Value = nil
+ return fmt.Errorf("memcache: corrupt get result read")
+ }
+ it.Value = it.Value[:size]
+ cb(it)
+ }
+}
+
+// scanGetResponseLine populates it and returns the declared size of the item.
+// It does not read the bytes of the item.
+func scanGetResponseLine(line []byte, it *Item) (size int, err error) {
+ pattern := "VALUE %s %d %d %d\r\n"
+ dest := []interface{}{&it.Key, &it.Flags, &size, &it.casid}
+ if bytes.Count(line, space) == 3 {
+ pattern = "VALUE %s %d %d\r\n"
+ dest = dest[:3]
+ }
+ n, err := fmt.Sscanf(string(line), pattern, dest...)
+ if err != nil || n != len(dest) {
+ return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line)
+ }
+ return size, nil
+}
+
+// Set writes the given item, unconditionally.
+func (c *Client) Set(item *Item) error {
+ return c.onItem(item, (*Client).set)
+}
+
+func (c *Client) set(rw *bufio.ReadWriter, item *Item) error {
+ return c.populateOne(rw, "set", item)
+}
+
+// Add writes the given item, if no value already exists for its
+// key. ErrNotStored is returned if that condition is not met.
+func (c *Client) Add(item *Item) error {
+ return c.onItem(item, (*Client).add)
+}
+
+func (c *Client) add(rw *bufio.ReadWriter, item *Item) error {
+ return c.populateOne(rw, "add", item)
+}
+
+// Replace writes the given item, but only if the server *does*
+// already hold data for this key
+func (c *Client) Replace(item *Item) error {
+ return c.onItem(item, (*Client).replace)
+}
+
+func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error {
+ return c.populateOne(rw, "replace", item)
+}
+
+// CompareAndSwap writes the given item that was previously returned
+// by Get, if the value was neither modified or evicted between the
+// Get and the CompareAndSwap calls. The item's Key should not change
+// between calls but all other item fields may differ. ErrCASConflict
+// is returned if the value was modified in between the
+// calls. ErrNotStored is returned if the value was evicted in between
+// the calls.
+func (c *Client) CompareAndSwap(item *Item) error {
+ return c.onItem(item, (*Client).cas)
+}
+
+func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error {
+ return c.populateOne(rw, "cas", item)
+}
+
+func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error {
+ if !legalKey(item.Key) {
+ return ErrMalformedKey
+ }
+ var err error
+ if verb == "cas" {
+ _, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n",
+ verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid)
+ } else {
+ _, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n",
+ verb, item.Key, item.Flags, item.Expiration, len(item.Value))
+ }
+ if err != nil {
+ return err
+ }
+ if _, err = rw.Write(item.Value); err != nil {
+ return err
+ }
+ if _, err := rw.Write(crlf); err != nil {
+ return err
+ }
+ if err := rw.Flush(); err != nil {
+ return err
+ }
+ line, err := rw.ReadSlice('\n')
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line, resultStored):
+ return nil
+ case bytes.Equal(line, resultNotStored):
+ return ErrNotStored
+ case bytes.Equal(line, resultExists):
+ return ErrCASConflict
+ case bytes.Equal(line, resultNotFound):
+ return ErrCacheMiss
+ }
+ return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line))
+}
+
+func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) {
+ _, err := fmt.Fprintf(rw, format, args...)
+ if err != nil {
+ return nil, err
+ }
+ if err := rw.Flush(); err != nil {
+ return nil, err
+ }
+ line, err := rw.ReadSlice('\n')
+ return line, err
+}
+
+func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error {
+ line, err := writeReadLine(rw, format, args...)
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line, resultOK):
+ return nil
+ case bytes.Equal(line, expect):
+ return nil
+ case bytes.Equal(line, resultNotStored):
+ return ErrNotStored
+ case bytes.Equal(line, resultExists):
+ return ErrCASConflict
+ case bytes.Equal(line, resultNotFound):
+ return ErrCacheMiss
+ }
+ return fmt.Errorf("memcache: unexpected response line: %q", string(line))
+}
+
+// Delete deletes the item with the provided key. The error ErrCacheMiss is
+// returned if the item didn't already exist in the cache.
+func (c *Client) Delete(key string) error {
+ return c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
+ return writeExpectf(rw, resultDeleted, "delete %s\r\n", key)
+ })
+}
+
+// DeleteAll deletes all items in the cache.
+func (c *Client) DeleteAll() error {
+ return c.withKeyRw("", func(rw *bufio.ReadWriter) error {
+ return writeExpectf(rw, resultDeleted, "flush_all\r\n")
+ })
+}
+
+// Increment atomically increments key by delta. The return value is
+// the new value after being incremented or an error. If the value
+// didn't exist in memcached the error is ErrCacheMiss. The value in
+// memcached must be an decimal number, or an error will be returned.
+// On 64-bit overflow, the new value wraps around.
+func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) {
+ return c.incrDecr("incr", key, delta)
+}
+
+// Decrement atomically decrements key by delta. The return value is
+// the new value after being decremented or an error. If the value
+// didn't exist in memcached the error is ErrCacheMiss. The value in
+// memcached must be an decimal number, or an error will be returned.
+// On underflow, the new value is capped at zero and does not wrap
+// around.
+func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) {
+ return c.incrDecr("decr", key, delta)
+}
+
+func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) {
+ var val uint64
+ err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
+ line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta)
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line, resultNotFound):
+ return ErrCacheMiss
+ case bytes.HasPrefix(line, resultClientErrorPrefix):
+ errMsg := line[len(resultClientErrorPrefix) : len(line)-2]
+ return errors.New("memcache: client error: " + string(errMsg))
+ }
+ val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64)
+ if err != nil {
+ return err
+ }
+ return nil
+ })
+ return val, err
+}
diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/selector.go b/vendor/github.com/bradfitz/gomemcache/memcache/selector.go
new file mode 100755
index 0000000..89ad81e
--- /dev/null
+++ b/vendor/github.com/bradfitz/gomemcache/memcache/selector.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2011 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package memcache
+
+import (
+ "hash/crc32"
+ "net"
+ "strings"
+ "sync"
+)
+
+// ServerSelector is the interface that selects a memcache server
+// as a function of the item's key.
+//
+// All ServerSelector implementations must be safe for concurrent use
+// by multiple goroutines.
+type ServerSelector interface {
+ // PickServer returns the server address that a given item
+ // should be shared onto.
+ PickServer(key string) (net.Addr, error)
+ Each(func(net.Addr) error) error
+}
+
+// ServerList is a simple ServerSelector. Its zero value is usable.
+type ServerList struct {
+ mu sync.RWMutex
+ addrs []net.Addr
+}
+
+// staticAddr caches the Network() and String() values from any net.Addr.
+type staticAddr struct {
+ ntw, str string
+}
+
+func newStaticAddr(a net.Addr) net.Addr {
+ return &staticAddr{
+ ntw: a.Network(),
+ str: a.String(),
+ }
+}
+
+func (s *staticAddr) Network() string { return s.ntw }
+func (s *staticAddr) String() string { return s.str }
+
+// SetServers changes a ServerList's set of servers at runtime and is
+// safe for concurrent use by multiple goroutines.
+//
+// Each server is given equal weight. A server is given more weight
+// if it's listed multiple times.
+//
+// SetServers returns an error if any of the server names fail to
+// resolve. No attempt is made to connect to the server. If any error
+// is returned, no changes are made to the ServerList.
+func (ss *ServerList) SetServers(servers ...string) error {
+ naddr := make([]net.Addr, len(servers))
+ for i, server := range servers {
+ if strings.Contains(server, "/") {
+ addr, err := net.ResolveUnixAddr("unix", server)
+ if err != nil {
+ return err
+ }
+ naddr[i] = newStaticAddr(addr)
+ } else {
+ tcpaddr, err := net.ResolveTCPAddr("tcp", server)
+ if err != nil {
+ return err
+ }
+ naddr[i] = newStaticAddr(tcpaddr)
+ }
+ }
+
+ ss.mu.Lock()
+ defer ss.mu.Unlock()
+ ss.addrs = naddr
+ return nil
+}
+
+// Each iterates over each server calling the given function
+func (ss *ServerList) Each(f func(net.Addr) error) error {
+ ss.mu.RLock()
+ defer ss.mu.RUnlock()
+ for _, a := range ss.addrs {
+ if err := f(a); nil != err {
+ return err
+ }
+ }
+ return nil
+}
+
+// keyBufPool returns []byte buffers for use by PickServer's call to
+// crc32.ChecksumIEEE to avoid allocations. (but doesn't avoid the
+// copies, which at least are bounded in size and small)
+var keyBufPool = sync.Pool{
+ New: func() interface{} {
+ b := make([]byte, 256)
+ return &b
+ },
+}
+
+func (ss *ServerList) PickServer(key string) (net.Addr, error) {
+ ss.mu.RLock()
+ defer ss.mu.RUnlock()
+ if len(ss.addrs) == 0 {
+ return nil, ErrNoServers
+ }
+ if len(ss.addrs) == 1 {
+ return ss.addrs[0], nil
+ }
+ bufp := keyBufPool.Get().(*[]byte)
+ n := copy(*bufp, key)
+ cs := crc32.ChecksumIEEE((*bufp)[:n])
+ keyBufPool.Put(bufp)
+
+ return ss.addrs[cs%uint32(len(ss.addrs))], nil
+}
diff --git a/vendor/github.com/buraksezer/consistent/LICENSE b/vendor/github.com/buraksezer/consistent/LICENSE
new file mode 100755
index 0000000..e470334
--- /dev/null
+++ b/vendor/github.com/buraksezer/consistent/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 Burak Sezer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/buraksezer/consistent/README.md b/vendor/github.com/buraksezer/consistent/README.md
new file mode 100755
index 0000000..267afa7
--- /dev/null
+++ b/vendor/github.com/buraksezer/consistent/README.md
@@ -0,0 +1,235 @@
+consistent
+==========
+[](https://godoc.org/github.com/buraksezer/consistent) [](https://travis-ci.org/buraksezer/consistent) [](http://gocover.io/github.com/buraksezer/consistent) [](https://goreportcard.com/report/github.com/buraksezer/consistent) [](https://opensource.org/licenses/MIT) [](https://github.com/avelino/awesome-go)
+
+
+This library provides a consistent hashing function which simultaneously achieves both uniformity and consistency.
+
+For detailed information about the concept, you should take a look at the following resources:
+
+* [Consistent Hashing with Bounded Loads on Google Research Blog](https://research.googleblog.com/2017/04/consistent-hashing-with-bounded-loads.html)
+* [Improving load balancing with a new consistent-hashing algorithm on Vimeo Engineering Blog](https://medium.com/vimeo-engineering-blog/improving-load-balancing-with-a-new-consistent-hashing-algorithm-9f1bd75709ed)
+* [Consistent Hashing with Bounded Loads paper on arXiv](https://arxiv.org/abs/1608.01350)
+
+Table of Content
+----------------
+
+- [Overview](#overview)
+- [Install](#install)
+- [Configuration](#configuration)
+- [Usage](#usage)
+- [Benchmarks](#benchmarks)
+- [Examples](#examples)
+
+Overview
+--------
+
+In this package's context, the keys are distributed among partitions and partitions are distributed among members as well.
+
+When you create a new consistent instance or call `Add/Remove`:
+
+* The member's name is hashed and inserted it into the hash ring,
+* Average load is calculated by the algorithm defined in the paper,
+* Partitions are distributed among members by hashing partition IDs and any of them don't exceed the average load.
+
+Average load cannot be exceeded. So if all the members are loaded at the maximum while trying to add a new member, it panics.
+
+When you want to locate a key by calling `LocateKey`:
+
+* The key(byte slice) is hashed,
+* The result of the hash is mod by the number of partitions,
+* The result of this modulo - `MOD(hash result, partition count)` - is the partition in which the key will be located,
+* Owner of the partition is already determined before calling `LocateKey`. So it returns the partition owner immediately.
+
+No memory is allocated by `consistent` except hashing when you want to locate a key.
+
+Note that the number of partitions cannot be changed after creation.
+
+Install
+-------
+
+With a correctly configured Go environment:
+
+```
+go get github.com/buraksezer/consistent
+```
+
+You will find some useful usage samples in [examples](https://github.com/buraksezer/consistent/tree/master/_examples) folder.
+
+Configuration
+-------------
+
+```go
+type Config struct {
+ // Hasher is responsible for generating unsigned, 64 bit hash of provided byte slice.
+ Hasher Hasher
+
+ // Keys are distributed among partitions. Prime numbers are good to
+ // distribute keys uniformly. Select a big PartitionCount if you have
+ // too many keys.
+ PartitionCount int
+
+ // Members are replicated on consistent hash ring. This number means that a member
+ // how many times replicated on the ring.
+ ReplicationFactor int
+
+ // Load is used to calculate average load. See the code, the paper and Google's
+ // blog post to learn about it.
+ Load float64
+}
+```
+
+Any hash algorithm can be used as hasher which implements Hasher interface. Please take a look at the *Sample* section for an example.
+
+Usage
+-----
+
+`LocateKey` function finds a member in the cluster for your key:
+```go
+// With a properly configured and initialized consistent instance
+key := []byte("my-key")
+member := c.LocateKey(key)
+```
+It returns a thread-safe copy of the member you added before.
+
+The second most probably used function is `GetClosestN`.
+
+```go
+// With a properly configured and initialized consistent instance
+
+key := []byte("my-key")
+members, err := c.GetClosestN(key, 2)
+```
+
+This may be useful to find backup nodes to store your key.
+
+Benchmarks
+----------
+On an early 2015 Macbook:
+
+```
+BenchmarkAddRemove-4 100000 22006 ns/op
+BenchmarkLocateKey-4 5000000 252 ns/op
+BenchmarkGetClosestN-4 500000 2974 ns/op
+```
+
+Examples
+--------
+
+The most basic use of consistent package should be like this. For detailed list of functions, [visit godoc.org.](https://godoc.org/github.com/buraksezer/consistent)
+More sample code can be found under [_examples](https://github.com/buraksezer/consistent/tree/master/_examples).
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/buraksezer/consistent"
+ "github.com/cespare/xxhash"
+)
+
+// In your code, you probably have a custom data type
+// for your cluster members. Just add a String function to implement
+// consistent.Member interface.
+type myMember string
+
+func (m myMember) String() string {
+ return string(m)
+}
+
+// consistent package doesn't provide a default hashing function.
+// You should provide a proper one to distribute keys/members uniformly.
+type hasher struct{}
+
+func (h hasher) Sum64(data []byte) uint64 {
+ // you should use a proper hash function for uniformity.
+ return xxhash.Sum64(data)
+}
+
+func main() {
+ // Create a new consistent instance
+ cfg := consistent.Config{
+ PartitionCount: 7,
+ ReplicationFactor: 20,
+ Load: 1.25,
+ Hasher: hasher{},
+ }
+ c := consistent.New(nil, cfg)
+
+ // Add some members to the consistent hash table.
+ // Add function calculates average load and distributes partitions over members
+ node1 := myMember("node1.olricmq.com")
+ c.Add(node1)
+
+ node2 := myMember("node2.olricmq.com")
+ c.Add(node2)
+
+ key := []byte("my-key")
+ // calculates partition id for the given key
+ // partID := hash(key) % partitionCount
+ // the partitions is already distributed among members by Add function.
+ owner := c.LocateKey(key)
+ fmt.Println(owner.String())
+ // Prints node2.olricmq.com
+}
+```
+
+Another useful example is `_examples/relocation_percentage.go`. It creates a `consistent` object with 8 members and distributes partitions among them. Then adds 9th member,
+here is the result with a proper configuration and hash function:
+
+```
+bloom:consistent burak$ go run _examples/relocation_percentage.go
+partID: 218 moved to node2.olricmq from node0.olricmq
+partID: 173 moved to node9.olricmq from node3.olricmq
+partID: 225 moved to node7.olricmq from node0.olricmq
+partID: 85 moved to node9.olricmq from node7.olricmq
+partID: 220 moved to node5.olricmq from node0.olricmq
+partID: 33 moved to node9.olricmq from node5.olricmq
+partID: 254 moved to node9.olricmq from node4.olricmq
+partID: 71 moved to node9.olricmq from node3.olricmq
+partID: 236 moved to node9.olricmq from node2.olricmq
+partID: 118 moved to node9.olricmq from node3.olricmq
+partID: 233 moved to node3.olricmq from node0.olricmq
+partID: 50 moved to node9.olricmq from node4.olricmq
+partID: 252 moved to node9.olricmq from node2.olricmq
+partID: 121 moved to node9.olricmq from node2.olricmq
+partID: 259 moved to node9.olricmq from node4.olricmq
+partID: 92 moved to node9.olricmq from node7.olricmq
+partID: 152 moved to node9.olricmq from node3.olricmq
+partID: 105 moved to node9.olricmq from node2.olricmq
+
+6% of the partitions are relocated
+```
+
+Moved partition count is highly depends on your configuration and quailty of hash function. You should modify the configuration to find an optimum set of configuration
+for your system.
+
+`_examples/load_distribution.go` is also useful to understand load distribution. It creates a `consistent` object with 8 members and locates 1M key. It also calculates average
+load which cannot be exceeded by any member. Here is the result:
+
+```
+Maximum key count for a member should be around this: 147602
+member: node2.olricmq, key count: 100362
+member: node5.olricmq, key count: 99448
+member: node0.olricmq, key count: 147735
+member: node3.olricmq, key count: 103455
+member: node6.olricmq, key count: 147069
+member: node1.olricmq, key count: 121566
+member: node4.olricmq, key count: 147932
+member: node7.olricmq, key count: 132433
+```
+
+Average load can be calculated by using the following formula:
+
+```
+load := (consistent.AverageLoad() * float64(keyCount)) / float64(config.PartitionCount)
+```
+
+Contributions
+-------------
+Please don't hesitate to fork the project and send a pull request or just e-mail me to ask questions and share ideas.
+
+License
+-------
+MIT License, - see LICENSE for more details.
diff --git a/vendor/github.com/buraksezer/consistent/consistent.go b/vendor/github.com/buraksezer/consistent/consistent.go
new file mode 100755
index 0000000..34a5108
--- /dev/null
+++ b/vendor/github.com/buraksezer/consistent/consistent.go
@@ -0,0 +1,359 @@
+// Copyright (c) 2018 Burak Sezer
+// All rights reserved.
+//
+// This code is licensed under the MIT License.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files(the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions :
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package consistent provides a consistent hashing function with bounded loads.
+// For more information about the underlying algorithm, please take a look at
+// https://research.googleblog.com/2017/04/consistent-hashing-with-bounded-loads.html
+//
+// Example Use:
+// cfg := consistent.Config{
+// PartitionCount: 71,
+// ReplicationFactor: 20,
+// Load: 1.25,
+// Hasher: hasher{},
+// }
+//
+// // Create a new consistent object
+// // You may call this with a list of members
+// // instead of adding them one by one.
+// c := consistent.New(members, cfg)
+//
+// // myMember struct just needs to implement a String method.
+// // New/Add/Remove distributes partitions among members using the algorithm
+// // defined on Google Research Blog.
+// c.Add(myMember)
+//
+// key := []byte("my-key")
+// // LocateKey hashes the key and calculates partition ID with
+// // this modulo operation: MOD(hash result, partition count)
+// // The owner of the partition is already calculated by New/Add/Remove.
+// // LocateKey just returns the member which's responsible for the key.
+// member := c.LocateKey(key)
+//
+package consistent
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+)
+
+var (
+ //ErrInsufficientMemberCount represents an error which means there are not enough members to complete the task.
+ ErrInsufficientMemberCount = errors.New("insufficient member count")
+
+ // ErrMemberNotFound represents an error which means requested member could not be found in consistent hash ring.
+ ErrMemberNotFound = errors.New("member could not be found in ring")
+)
+
+// Hasher is responsible for generating unsigned, 64 bit hash of provided byte slice.
+// Hasher should minimize collisions (generating same hash for different byte slice)
+// and while performance is also important fast functions are preferable (i.e.
+// you can use FarmHash family).
+type Hasher interface {
+ Sum64([]byte) uint64
+}
+
+// Member interface represents a member in consistent hash ring.
+type Member interface {
+ String() string
+}
+
+// Config represents a structure to control consistent package.
+type Config struct {
+ // Hasher is responsible for generating unsigned, 64 bit hash of provided byte slice.
+ Hasher Hasher
+
+ // Keys are distributed among partitions. Prime numbers are good to
+ // distribute keys uniformly. Select a big PartitionCount if you have
+ // too many keys.
+ PartitionCount int
+
+ // Members are replicated on consistent hash ring. This number means that a member
+ // how many times replicated on the ring.
+ ReplicationFactor int
+
+ // Load is used to calculate average load. See the code, the paper and Google's blog post to learn about it.
+ Load float64
+}
+
+// Consistent holds the information about the members of the consistent hash circle.
+type Consistent struct {
+ mu sync.RWMutex
+
+ config Config
+ hasher Hasher
+ sortedSet []uint64
+ partitionCount uint64
+ loads map[string]float64
+ members map[string]*Member
+ partitions map[int]*Member
+ ring map[uint64]*Member
+}
+
+// New creates and returns a new Consistent object.
+func New(members []Member, config Config) *Consistent {
+ c := &Consistent{
+ config: config,
+ members: make(map[string]*Member),
+ partitionCount: uint64(config.PartitionCount),
+ ring: make(map[uint64]*Member),
+ }
+ if config.Hasher == nil {
+ panic("Hasher cannot be nil")
+ }
+ // TODO: Check configuration here
+ c.hasher = config.Hasher
+ for _, member := range members {
+ c.add(member)
+ }
+ if members != nil {
+ c.distributePartitions()
+ }
+ return c
+}
+
+// GetMembers returns a thread-safe copy of members.
+func (c *Consistent) GetMembers() []Member {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ // Create a thread-safe copy of member list.
+ members := []Member{}
+ for _, member := range c.members {
+ members = append(members, *member)
+ }
+ return members
+}
+
+// AverageLoad exposes the current average load.
+func (c *Consistent) AverageLoad() float64 {
+ avgLoad := float64(c.partitionCount/uint64(len(c.members))) * c.config.Load
+ return math.Ceil(avgLoad)
+}
+
+func (c *Consistent) distributeWithLoad(partID, idx int, partitions map[int]*Member, loads map[string]float64) {
+ avgLoad := c.AverageLoad()
+ var count int
+ for {
+ count++
+ if count >= len(c.sortedSet) {
+ // User needs to decrease partition count, increase member count or increase load factor.
+ panic("not enough room to distribute partitions")
+ }
+ i := c.sortedSet[idx]
+ tmp := c.ring[i]
+ member := *tmp
+ load := loads[member.String()]
+ if load+1 <= avgLoad {
+ partitions[partID] = &member
+ loads[member.String()]++
+ return
+ }
+ idx++
+ if idx >= len(c.sortedSet) {
+ idx = 0
+ }
+ }
+}
+
+func (c *Consistent) distributePartitions() {
+ loads := make(map[string]float64)
+ partitions := make(map[int]*Member)
+
+ bs := make([]byte, 8)
+ for partID := uint64(0); partID < c.partitionCount; partID++ {
+ binary.LittleEndian.PutUint64(bs, partID)
+ key := c.hasher.Sum64(bs)
+ idx := sort.Search(len(c.sortedSet), func(i int) bool {
+ return c.sortedSet[i] >= key
+ })
+ if idx >= len(c.sortedSet) {
+ idx = 0
+ }
+ c.distributeWithLoad(int(partID), idx, partitions, loads)
+ }
+ c.partitions = partitions
+ c.loads = loads
+}
+
+func (c *Consistent) add(member Member) {
+ for i := 0; i < c.config.ReplicationFactor; i++ {
+ key := []byte(fmt.Sprintf("%s%d", member.String(), i))
+ h := c.hasher.Sum64(key)
+ c.ring[h] = &member
+ c.sortedSet = append(c.sortedSet, h)
+ }
+ // sort hashes ascendingly
+ sort.Slice(c.sortedSet, func(i int, j int) bool {
+ return c.sortedSet[i] < c.sortedSet[j]
+ })
+ // Storing member at this map is useful to find backup members of a partition.
+ c.members[member.String()] = &member
+}
+
+// Add adds a new member to the consistent hash circle.
+func (c *Consistent) Add(member Member) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if _, ok := c.members[member.String()]; ok {
+ // We have already have this. Quit immediately.
+ return
+ }
+ c.add(member)
+ c.distributePartitions()
+}
+
+func (c *Consistent) delSlice(val uint64) {
+ for i := 0; i < len(c.sortedSet); i++ {
+ if c.sortedSet[i] == val {
+ c.sortedSet = append(c.sortedSet[:i], c.sortedSet[i+1:]...)
+ break
+ }
+ }
+}
+
+// Remove removes a member from the consistent hash circle.
+func (c *Consistent) Remove(name string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if _, ok := c.members[name]; !ok {
+ // There is no member with that name. Quit immediately.
+ return
+ }
+
+ for i := 0; i < c.config.ReplicationFactor; i++ {
+ key := []byte(fmt.Sprintf("%s%d", name, i))
+ h := c.hasher.Sum64(key)
+ delete(c.ring, h)
+ c.delSlice(h)
+ }
+ delete(c.members, name)
+ if len(c.members) == 0 {
+ // consistent hash ring is empty now. Reset the partition table.
+ c.partitions = make(map[int]*Member)
+ return
+ }
+ c.distributePartitions()
+}
+
+// LoadDistribution exposes load distribution of members.
+func (c *Consistent) LoadDistribution() map[string]float64 {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ // Create a thread-safe copy
+ res := make(map[string]float64)
+ for member, load := range c.loads {
+ res[member] = load
+ }
+ return res
+}
+
+// FindPartitionID returns partition id for given key.
+func (c *Consistent) FindPartitionID(key []byte) int {
+ hkey := c.hasher.Sum64(key)
+ return int(hkey % c.partitionCount)
+}
+
+// GetPartitionOwner returns the owner of the given partition.
+func (c *Consistent) GetPartitionOwner(partID int) Member {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ member, ok := c.partitions[partID]
+ if !ok {
+ return nil
+ }
+ // Create a thread-safe copy of member and return it.
+ return *member
+}
+
+// LocateKey finds a home for given key
+func (c *Consistent) LocateKey(key []byte) Member {
+ partID := c.FindPartitionID(key)
+ return c.GetPartitionOwner(partID)
+}
+
+func (c *Consistent) getClosestN(partID, count int) ([]Member, error) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ res := []Member{}
+ if count > len(c.members)-1 {
+ return res, ErrInsufficientMemberCount
+ }
+
+ var ownerKey uint64
+ owner := c.GetPartitionOwner(partID)
+ // Hash and sort all the names.
+ keys := []uint64{}
+ kmems := make(map[uint64]*Member)
+ for name, member := range c.members {
+ key := c.hasher.Sum64([]byte(name))
+ if name == owner.String() {
+ ownerKey = key
+ }
+ keys = append(keys, key)
+ kmems[key] = member
+ }
+ sort.Slice(keys, func(i, j int) bool {
+ return keys[i] < keys[j]
+ })
+
+ // Find the member
+ idx := 0
+ for idx < len(keys) {
+ if keys[idx] == ownerKey {
+ break
+ }
+ idx++
+ }
+
+ // Find the closest members.
+ for len(res) < count {
+ idx++
+ if idx >= len(keys) {
+ idx = 0
+ }
+ key := keys[idx]
+ res = append(res, *kmems[key])
+ }
+ return res, nil
+}
+
+// GetClosestN returns the closest N member to a key in the hash ring. This may be useful to find members for replication.
+func (c *Consistent) GetClosestN(key []byte, count int) ([]Member, error) {
+ partID := c.FindPartitionID(key)
+ return c.getClosestN(partID, count)
+}
+
+// GetClosestNForPartition returns the closest N member for given partition. This may be useful to find members for replication.
+func (c *Consistent) GetClosestNForPartition(partID, count int) ([]Member, error) {
+ return c.getClosestN(partID, count)
+}
diff --git a/vendor/github.com/cespare/xxhash/LICENSE.txt b/vendor/github.com/cespare/xxhash/LICENSE.txt
new file mode 100755
index 0000000..24b5306
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md
new file mode 100755
index 0000000..cbcc6ea
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/README.md
@@ -0,0 +1,55 @@
+# xxhash
+
+[](https://godoc.org/github.com/cespare/xxhash)
+[](https://travis-ci.org/cespare/xxhash)
+
+xxhash is a Go implementation of the 64-bit
+[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+ func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+This implementation provides a fast pure-Go implementation and an even faster
+assembly implementation for amd64.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| --- | --- | --- |
+| 5 B | 979.66 MB/s | 1291.17 MB/s |
+| 100 B | 7475.26 MB/s | 7973.40 MB/s |
+| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
+| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+
+These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
+the following commands under Go 1.11.2:
+
+```
+$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
+$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [FreeCache](https://github.com/coocood/freecache)
diff --git a/vendor/github.com/cespare/xxhash/go.mod b/vendor/github.com/cespare/xxhash/go.mod
new file mode 100755
index 0000000..b83ab6a
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/go.mod
@@ -0,0 +1,7 @@
+module github.com/cespare/xxhash/v2
+
+require (
+ github.com/OneOfOne/xxhash v1.2.2
+ github.com/cespare/xxhash v1.1.0 // indirect
+ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72
+)
diff --git a/vendor/github.com/cespare/xxhash/go.sum b/vendor/github.com/cespare/xxhash/go.sum
new file mode 100755
index 0000000..a7f1832
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/go.sum
@@ -0,0 +1,6 @@
+github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go
new file mode 100755
index 0000000..db0b35f
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/xxhash.go
@@ -0,0 +1,236 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+package xxhash
+
+import (
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+const (
+ prime1 uint64 = 11400714785074694791
+ prime2 uint64 = 14029467366897019727
+ prime3 uint64 = 1609587929392839161
+ prime4 uint64 = 9650029242287828579
+ prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var (
+ prime1v = prime1
+ prime2v = prime2
+ prime3v = prime3
+ prime4v = prime4
+ prime5v = prime5
+)
+
+// Digest implements hash.Hash64.
+type Digest struct {
+ v1 uint64
+ v2 uint64
+ v3 uint64
+ v4 uint64
+ total uint64
+ mem [32]byte
+ n int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+ var d Digest
+ d.Reset()
+ return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+ d.v1 = prime1v + prime2
+ d.v2 = prime2
+ d.v3 = 0
+ d.v4 = -prime1v
+ d.total = 0
+ d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+ n = len(b)
+ d.total += uint64(n)
+
+ if d.n+n < 32 {
+ // This new data doesn't even fill the current block.
+ copy(d.mem[d.n:], b)
+ d.n += n
+ return
+ }
+
+ if d.n > 0 {
+ // Finish off the partial block.
+ copy(d.mem[d.n:], b)
+ d.v1 = round(d.v1, u64(d.mem[0:8]))
+ d.v2 = round(d.v2, u64(d.mem[8:16]))
+ d.v3 = round(d.v3, u64(d.mem[16:24]))
+ d.v4 = round(d.v4, u64(d.mem[24:32]))
+ b = b[32-d.n:]
+ d.n = 0
+ }
+
+ if len(b) >= 32 {
+ // One or more full blocks left.
+ nw := writeBlocks(d, b)
+ b = b[nw:]
+ }
+
+ // Store any remaining partial block.
+ copy(d.mem[:], b)
+ d.n = len(b)
+
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+ s := d.Sum64()
+ return append(
+ b,
+ byte(s>>56),
+ byte(s>>48),
+ byte(s>>40),
+ byte(s>>32),
+ byte(s>>24),
+ byte(s>>16),
+ byte(s>>8),
+ byte(s),
+ )
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+ var h uint64
+
+ if d.total >= 32 {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = d.v3 + prime5
+ }
+
+ h += d.total
+
+ i, end := 0, d.n
+ for ; i+8 <= end; i += 8 {
+ k1 := round(0, u64(d.mem[i:i+8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if i+4 <= end {
+ h ^= uint64(u32(d.mem[i:i+4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ i += 4
+ }
+ for i < end {
+ h ^= uint64(d.mem[i]) * prime5
+ h = rol11(h) * prime1
+ i++
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+const (
+ magic = "xxh\x06"
+ marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ b = append(b, magic...)
+ b = appendUint64(b, d.v1)
+ b = appendUint64(b, d.v2)
+ b = appendUint64(b, d.v3)
+ b = appendUint64(b, d.v4)
+ b = appendUint64(b, d.total)
+ b = append(b, d.mem[:d.n]...)
+ b = b[:len(b)+len(d.mem)-d.n]
+ return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+ return errors.New("xxhash: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("xxhash: invalid hash state size")
+ }
+ b = b[len(magic):]
+ b, d.v1 = consumeUint64(b)
+ b, d.v2 = consumeUint64(b)
+ b, d.v3 = consumeUint64(b)
+ b, d.v4 = consumeUint64(b)
+ b, d.total = consumeUint64(b)
+ copy(d.mem[:], b)
+ b = b[len(d.mem):]
+ d.n = int(d.total % uint64(len(d.mem)))
+ return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.LittleEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ x := u64(b)
+ return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+ acc += input * prime2
+ acc = rol31(acc)
+ acc *= prime1
+ return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+ val = round(0, val)
+ acc ^= val
+ acc = acc*prime1 + prime4
+ return acc
+}
+
+func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/xxhash_amd64.go
new file mode 100755
index 0000000..35318d7
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/xxhash_amd64.go
@@ -0,0 +1,13 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+//go:noescape
+func writeBlocks(*Digest, []byte) int
diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/xxhash_amd64.s
new file mode 100755
index 0000000..d580e32
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/xxhash_amd64.s
@@ -0,0 +1,215 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Register allocation:
+// AX h
+// CX pointer to advance through b
+// DX n
+// BX loop end
+// R8 v1, k1
+// R9 v2
+// R10 v3
+// R11 v4
+// R12 tmp
+// R13 prime1v
+// R14 prime2v
+// R15 prime4v
+
+// round reads from and advances the buffer pointer in CX.
+// It assumes that R13 has prime1v and R14 has prime2v.
+#define round(r) \
+ MOVQ (CX), R12 \
+ ADDQ $8, CX \
+ IMULQ R14, R12 \
+ ADDQ R12, r \
+ ROLQ $31, r \
+ IMULQ R13, r
+
+// mergeRound applies a merge round on the two registers acc and val.
+// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
+#define mergeRound(acc, val) \
+ IMULQ R14, val \
+ ROLQ $31, val \
+ IMULQ R13, val \
+ XORQ val, acc \
+ IMULQ R13, acc \
+ ADDQ R15, acc
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT, $0-32
+ // Load fixed primes.
+ MOVQ ·prime1v(SB), R13
+ MOVQ ·prime2v(SB), R14
+ MOVQ ·prime4v(SB), R15
+
+ // Load slice.
+ MOVQ b_base+0(FP), CX
+ MOVQ b_len+8(FP), DX
+ LEAQ (CX)(DX*1), BX
+
+ // The first loop limit will be len(b)-32.
+ SUBQ $32, BX
+
+ // Check whether we have at least one block.
+ CMPQ DX, $32
+ JLT noBlocks
+
+ // Set up initial state (v1, v2, v3, v4).
+ MOVQ R13, R8
+ ADDQ R14, R8
+ MOVQ R14, R9
+ XORQ R10, R10
+ XORQ R11, R11
+ SUBQ R13, R11
+
+ // Loop until CX > BX.
+blockLoop:
+ round(R8)
+ round(R9)
+ round(R10)
+ round(R11)
+
+ CMPQ CX, BX
+ JLE blockLoop
+
+ MOVQ R8, AX
+ ROLQ $1, AX
+ MOVQ R9, R12
+ ROLQ $7, R12
+ ADDQ R12, AX
+ MOVQ R10, R12
+ ROLQ $12, R12
+ ADDQ R12, AX
+ MOVQ R11, R12
+ ROLQ $18, R12
+ ADDQ R12, AX
+
+ mergeRound(AX, R8)
+ mergeRound(AX, R9)
+ mergeRound(AX, R10)
+ mergeRound(AX, R11)
+
+ JMP afterBlocks
+
+noBlocks:
+ MOVQ ·prime5v(SB), AX
+
+afterBlocks:
+ ADDQ DX, AX
+
+ // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
+ ADDQ $24, BX
+
+ CMPQ CX, BX
+ JG fourByte
+
+wordLoop:
+ // Calculate k1.
+ MOVQ (CX), R8
+ ADDQ $8, CX
+ IMULQ R14, R8
+ ROLQ $31, R8
+ IMULQ R13, R8
+
+ XORQ R8, AX
+ ROLQ $27, AX
+ IMULQ R13, AX
+ ADDQ R15, AX
+
+ CMPQ CX, BX
+ JLE wordLoop
+
+fourByte:
+ ADDQ $4, BX
+ CMPQ CX, BX
+ JG singles
+
+ MOVL (CX), R8
+ ADDQ $4, CX
+ IMULQ R13, R8
+ XORQ R8, AX
+
+ ROLQ $23, AX
+ IMULQ R14, AX
+ ADDQ ·prime3v(SB), AX
+
+singles:
+ ADDQ $4, BX
+ CMPQ CX, BX
+ JGE finalize
+
+singlesLoop:
+ MOVBQZX (CX), R12
+ ADDQ $1, CX
+ IMULQ ·prime5v(SB), R12
+ XORQ R12, AX
+
+ ROLQ $11, AX
+ IMULQ R13, AX
+
+ CMPQ CX, BX
+ JL singlesLoop
+
+finalize:
+ MOVQ AX, R12
+ SHRQ $33, R12
+ XORQ R12, AX
+ IMULQ R14, AX
+ MOVQ AX, R12
+ SHRQ $29, R12
+ XORQ R12, AX
+ IMULQ ·prime3v(SB), AX
+ MOVQ AX, R12
+ SHRQ $32, R12
+ XORQ R12, AX
+
+ MOVQ AX, ret+24(FP)
+ RET
+
+// writeBlocks uses the same registers as above except that it uses AX to store
+// the d pointer.
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+ // Load fixed primes needed for round.
+ MOVQ ·prime1v(SB), R13
+ MOVQ ·prime2v(SB), R14
+
+ // Load slice.
+ MOVQ b_base+8(FP), CX
+ MOVQ b_len+16(FP), DX
+ LEAQ (CX)(DX*1), BX
+ SUBQ $32, BX
+
+ // Load vN from d.
+ MOVQ d+0(FP), AX
+ MOVQ 0(AX), R8 // v1
+ MOVQ 8(AX), R9 // v2
+ MOVQ 16(AX), R10 // v3
+ MOVQ 24(AX), R11 // v4
+
+ // We don't need to check the loop condition here; this function is
+ // always called with at least one block of data to process.
+blockLoop:
+ round(R8)
+ round(R9)
+ round(R10)
+ round(R11)
+
+ CMPQ CX, BX
+ JLE blockLoop
+
+ // Copy vN back to d.
+ MOVQ R8, 0(AX)
+ MOVQ R9, 8(AX)
+ MOVQ R10, 16(AX)
+ MOVQ R11, 24(AX)
+
+ // The number of bytes written is CX minus the old base pointer.
+ SUBQ b_base+8(FP), CX
+ MOVQ CX, ret+32(FP)
+
+ RET
diff --git a/vendor/github.com/cespare/xxhash/xxhash_other.go b/vendor/github.com/cespare/xxhash/xxhash_other.go
new file mode 100755
index 0000000..4a5a821
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/xxhash_other.go
@@ -0,0 +1,76 @@
+// +build !amd64 appengine !gc purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+func Sum64(b []byte) uint64 {
+ // A simpler version would be
+ // d := New()
+ // d.Write(b)
+ // return d.Sum64()
+ // but this is faster, particularly for small inputs.
+
+ n := len(b)
+ var h uint64
+
+ if n >= 32 {
+ v1 := prime1v + prime2
+ v2 := prime2
+ v3 := uint64(0)
+ v4 := -prime1v
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = prime5
+ }
+
+ h += uint64(n)
+
+ i, end := 0, len(b)
+ for ; i+8 <= end; i += 8 {
+ k1 := round(0, u64(b[i:i+8:len(b)]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if i+4 <= end {
+ h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+ h = rol23(h)*prime2 + prime3
+ i += 4
+ }
+ for ; i < end; i++ {
+ h ^= uint64(b[i]) * prime5
+ h = rol11(h) * prime1
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ n := len(b)
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+ return n - len(b)
+}
diff --git a/vendor/github.com/cespare/xxhash/xxhash_safe.go b/vendor/github.com/cespare/xxhash/xxhash_safe.go
new file mode 100755
index 0000000..fc9bea7
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/xxhash_safe.go
@@ -0,0 +1,15 @@
+// +build appengine
+
+// This file contains the safe implementations of otherwise unsafe-using code.
+
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s.
+func Sum64String(s string) uint64 {
+ return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ return d.Write([]byte(s))
+}
diff --git a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go
new file mode 100755
index 0000000..53bf76e
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go
@@ -0,0 +1,46 @@
+// +build !appengine
+
+// This file encapsulates usage of unsafe.
+// xxhash_safe.go contains the safe implementations.
+
+package xxhash
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// Notes:
+//
+// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
+// for some discussion about these unsafe conversions.
+//
+// In the future it's possible that compiler optimizations will make these
+// unsafe operations unnecessary: https://golang.org/issue/2205.
+//
+// Both of these wrapper functions still incur function call overhead since they
+// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
+// for strings to squeeze out a bit more speed. Mid-stack inlining should
+// eventually fix this.
+
+// Sum64String computes the 64-bit xxHash digest of s.
+// It may be faster than Sum64([]byte(s)) by avoiding a copy.
+func Sum64String(s string) uint64 {
+ var b []byte
+ bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+ bh.Len = len(s)
+ bh.Cap = len(s)
+ return Sum64(b)
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+// It may be faster than Write([]byte(s)) by avoiding a copy.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ var b []byte
+ bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+ bh.Len = len(s)
+ bh.Cap = len(s)
+ return d.Write(b)
+}
diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE
new file mode 100755
index 0000000..d361bbc
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright 2014 Unknwon
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile
new file mode 100755
index 0000000..af27ff0
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/Makefile
@@ -0,0 +1,15 @@
+.PHONY: build test bench vet coverage
+
+build: vet bench
+
+test:
+ go test -v -cover -race
+
+bench:
+ go test -v -cover -race -test.bench=. -test.benchmem
+
+vet:
+ go vet
+
+coverage:
+ go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out
diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md
new file mode 100755
index 0000000..988dcea
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/README.md
@@ -0,0 +1,44 @@
+INI [](https://travis-ci.org/go-ini/ini) [](https://sourcegraph.com/github.com/go-ini/ini)
+===
+
+
+
+Package ini provides INI file read and write functionality in Go.
+
+## Features
+
+- Load from multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
+- Read with recursion values.
+- Read with parent-child sections.
+- Read with auto-increment key names.
+- Read with multiple-line values.
+- Read with tons of helper methods.
+- Read and convert values to Go types.
+- Read and **WRITE** comments of sections and keys.
+- Manipulate sections, keys and comments with ease.
+- Keep sections and keys in order as you parse and save.
+
+## Installation
+
+To use a tagged revision:
+
+```sh
+$ go get gopkg.in/ini.v1
+```
+
+To use with latest changes:
+
+```sh
+$ go get github.com/go-ini/ini
+```
+
+Please add `-u` flag to update in the future.
+
+## Getting Help
+
+- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
+- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go
new file mode 100755
index 0000000..80afe74
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/error.go
@@ -0,0 +1,32 @@
+// Copyright 2016 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "fmt"
+)
+
+type ErrDelimiterNotFound struct {
+ Line string
+}
+
+func IsErrDelimiterNotFound(err error) bool {
+ _, ok := err.(ErrDelimiterNotFound)
+ return ok
+}
+
+func (err ErrDelimiterNotFound) Error() string {
+ return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
+}
diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go
new file mode 100755
index 0000000..d7982c3
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/file.go
@@ -0,0 +1,407 @@
+// Copyright 2017 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "sync"
+)
+
+// File represents a combination of a or more INI file(s) in memory.
+type File struct {
+ options LoadOptions
+ dataSources []dataSource
+
+ // Should make things safe, but sometimes doesn't matter.
+ BlockMode bool
+ lock sync.RWMutex
+
+ // To keep data in order.
+ sectionList []string
+ // Actual data is stored here.
+ sections map[string]*Section
+
+ NameMapper
+ ValueMapper
+}
+
+// newFile initializes File object with given data sources.
+func newFile(dataSources []dataSource, opts LoadOptions) *File {
+ return &File{
+ BlockMode: true,
+ dataSources: dataSources,
+ sections: make(map[string]*Section),
+ sectionList: make([]string, 0, 10),
+ options: opts,
+ }
+}
+
+// Empty returns an empty file object.
+func Empty() *File {
+ // Ignore error here, we sure our data is good.
+ f, _ := Load([]byte(""))
+ return f
+}
+
+// NewSection creates a new section.
+func (f *File) NewSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new section: empty section name")
+ } else if f.options.Insensitive && name != DEFAULT_SECTION {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if inSlice(name, f.sectionList) {
+ return f.sections[name], nil
+ }
+
+ f.sectionList = append(f.sectionList, name)
+ f.sections[name] = newSection(f, name)
+ return f.sections[name], nil
+}
+
+// NewRawSection creates a new section with an unparseable body.
+func (f *File) NewRawSection(name, body string) (*Section, error) {
+ section, err := f.NewSection(name)
+ if err != nil {
+ return nil, err
+ }
+
+ section.isRawSection = true
+ section.rawBody = body
+ return section, nil
+}
+
+// NewSections creates a list of sections.
+func (f *File) NewSections(names ...string) (err error) {
+ for _, name := range names {
+ if _, err = f.NewSection(name); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSection returns section by given name.
+func (f *File) GetSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ }
+ if f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ sec := f.sections[name]
+ if sec == nil {
+ return nil, fmt.Errorf("section '%s' does not exist", name)
+ }
+ return sec, nil
+}
+
+// Section assumes named section exists and returns a zero-value when not.
+func (f *File) Section(name string) *Section {
+ sec, err := f.GetSection(name)
+ if err != nil {
+ // Note: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ sec, _ = f.NewSection(name)
+ return sec
+ }
+ return sec
+}
+
+// Section returns list of Section.
+func (f *File) Sections() []*Section {
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ sections := make([]*Section, len(f.sectionList))
+ for i, name := range f.sectionList {
+ sections[i] = f.sections[name]
+ }
+ return sections
+}
+
+// ChildSections returns a list of child sections of given section name.
+func (f *File) ChildSections(name string) []*Section {
+ return f.Section(name).ChildSections()
+}
+
+// SectionStrings returns list of section names.
+func (f *File) SectionStrings() []string {
+ list := make([]string, len(f.sectionList))
+ copy(list, f.sectionList)
+ return list
+}
+
+// DeleteSection deletes a section.
+func (f *File) DeleteSection(name string) {
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ }
+
+ for i, s := range f.sectionList {
+ if s == name {
+ f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+ delete(f.sections, name)
+ return
+ }
+ }
+}
+
+func (f *File) reload(s dataSource) error {
+ r, err := s.ReadCloser()
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ return f.parse(r)
+}
+
+// Reload reloads and parses all data sources.
+func (f *File) Reload() (err error) {
+ for _, s := range f.dataSources {
+ if err = f.reload(s); err != nil {
+ // In loose mode, we create an empty default section for nonexistent files.
+ if os.IsNotExist(err) && f.options.Loose {
+ f.parse(bytes.NewBuffer(nil))
+ continue
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// Append appends one or more data sources and reloads automatically.
+func (f *File) Append(source interface{}, others ...interface{}) error {
+ ds, err := parseDataSource(source)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ for _, s := range others {
+ ds, err = parseDataSource(s)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ }
+ return f.Reload()
+}
+
+func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
+ equalSign := "="
+ if PrettyFormat || PrettyEqual {
+ equalSign = " = "
+ }
+
+ // Use buffer to make sure target is safe until finish encoding.
+ buf := bytes.NewBuffer(nil)
+ for i, sname := range f.sectionList {
+ sec := f.Section(sname)
+ if len(sec.Comment) > 0 {
+ if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
+ sec.Comment = "; " + sec.Comment
+ } else {
+ sec.Comment = sec.Comment[:1] + " " + strings.TrimSpace(sec.Comment[1:])
+ }
+ if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+
+ if i > 0 || DefaultHeader {
+ if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+ return nil, err
+ }
+ } else {
+ // Write nothing if default section is empty
+ if len(sec.keyList) == 0 {
+ continue
+ }
+ }
+
+ if sec.isRawSection {
+ if _, err := buf.WriteString(sec.rawBody); err != nil {
+ return nil, err
+ }
+
+ if PrettySection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+
+ // Count and generate alignment length and buffer spaces using the
+ // longest key. Keys may be modifed if they contain certain characters so
+ // we need to take that into account in our calculation.
+ alignLength := 0
+ if PrettyFormat {
+ for _, kname := range sec.keyList {
+ keyLength := len(kname)
+ // First case will surround key by ` and second by """
+ if strings.ContainsAny(kname, "\"=:") {
+ keyLength += 2
+ } else if strings.Contains(kname, "`") {
+ keyLength += 6
+ }
+
+ if keyLength > alignLength {
+ alignLength = keyLength
+ }
+ }
+ }
+ alignSpaces := bytes.Repeat([]byte(" "), alignLength)
+
+ KEY_LIST:
+ for _, kname := range sec.keyList {
+ key := sec.Key(kname)
+ if len(key.Comment) > 0 {
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+ if key.Comment[0] != '#' && key.Comment[0] != ';' {
+ key.Comment = "; " + key.Comment
+ } else {
+ key.Comment = key.Comment[:1] + " " + strings.TrimSpace(key.Comment[1:])
+ }
+
+ // Support multiline comments
+ key.Comment = strings.Replace(key.Comment, "\n", "\n; ", -1)
+
+ if _, err := buf.WriteString(key.Comment + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+
+ switch {
+ case key.isAutoIncrement:
+ kname = "-"
+ case strings.ContainsAny(kname, "\"=:"):
+ kname = "`" + kname + "`"
+ case strings.Contains(kname, "`"):
+ kname = `"""` + kname + `"""`
+ }
+
+ for _, val := range key.ValueWithShadows() {
+ if _, err := buf.WriteString(kname); err != nil {
+ return nil, err
+ }
+
+ if key.isBooleanType {
+ if kname != sec.keyList[len(sec.keyList)-1] {
+ buf.WriteString(LineBreak)
+ }
+ continue KEY_LIST
+ }
+
+ // Write out alignment spaces before "=" sign
+ if PrettyFormat {
+ buf.Write(alignSpaces[:alignLength-len(kname)])
+ }
+
+ // In case key value contains "\n", "`", "\"", "#" or ";"
+ if strings.ContainsAny(val, "\n`") {
+ val = `"""` + val + `"""`
+ } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
+ val = "`" + val + "`"
+ }
+ if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+
+ for _, val := range key.nestedValues {
+ if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if PrettySection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+// WriteToIndent writes content into io.Writer with given indention.
+// If PrettyFormat has been set to be true,
+// it will align "=" sign with spaces under each section.
+func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return 0, err
+ }
+ return buf.WriteTo(w)
+}
+
+// WriteTo writes file content into io.Writer.
+func (f *File) WriteTo(w io.Writer) (int64, error) {
+ return f.WriteToIndent(w, "")
+}
+
+// SaveToIndent writes content to file system with given value indention.
+func (f *File) SaveToIndent(filename, indent string) error {
+ // Note: Because we are truncating with os.Create,
+ // so it's safer to save to a temporary file location and rename afte done.
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return err
+ }
+
+ return ioutil.WriteFile(filename, buf.Bytes(), 0666)
+}
+
+// SaveTo writes content to file system.
+func (f *File) SaveTo(filename string) error {
+ return f.SaveToIndent(filename, "")
+}
diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go
new file mode 100755
index 0000000..2f0776b
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/ini.go
@@ -0,0 +1,211 @@
+// +build go1.6
+
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package ini provides INI file read and write functionality in Go.
+package ini
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "regexp"
+ "runtime"
+)
+
+const (
+ // Name for default section. You can use this constant or the string literal.
+ // In most of cases, an empty string is all you need to access the section.
+ DEFAULT_SECTION = "DEFAULT"
+
+ // Maximum allowed depth when recursively substituing variable names.
+ _DEPTH_VALUES = 99
+ _VERSION = "1.38.1"
+)
+
+// Version returns current package version literal.
+func Version() string {
+ return _VERSION
+}
+
+var (
+ // Delimiter to determine or compose a new line.
+ // This variable will be changed to "\r\n" automatically on Windows
+ // at package init time.
+ LineBreak = "\n"
+
+ // Variable regexp pattern: %(variable)s
+ varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
+
+ // Indicate whether to align "=" sign with spaces to produce pretty output
+ // or reduce all possible spaces for compact format.
+ PrettyFormat = true
+
+ // Place spaces around "=" sign even when PrettyFormat is false
+ PrettyEqual = false
+
+ // Explicitly write DEFAULT section header
+ DefaultHeader = false
+
+ // Indicate whether to put a line between sections
+ PrettySection = true
+)
+
+func init() {
+ if runtime.GOOS == "windows" {
+ LineBreak = "\r\n"
+ }
+}
+
+func inSlice(str string, s []string) bool {
+ for _, v := range s {
+ if str == v {
+ return true
+ }
+ }
+ return false
+}
+
+// dataSource is an interface that returns object which can be read and closed.
+type dataSource interface {
+ ReadCloser() (io.ReadCloser, error)
+}
+
+// sourceFile represents an object that contains content on the local file system.
+type sourceFile struct {
+ name string
+}
+
+func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
+ return os.Open(s.name)
+}
+
+// sourceData represents an object that contains content in memory.
+type sourceData struct {
+ data []byte
+}
+
+func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
+ return ioutil.NopCloser(bytes.NewReader(s.data)), nil
+}
+
+// sourceReadCloser represents an input stream with Close method.
+type sourceReadCloser struct {
+ reader io.ReadCloser
+}
+
+func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
+ return s.reader, nil
+}
+
+func parseDataSource(source interface{}) (dataSource, error) {
+ switch s := source.(type) {
+ case string:
+ return sourceFile{s}, nil
+ case []byte:
+ return &sourceData{s}, nil
+ case io.ReadCloser:
+ return &sourceReadCloser{s}, nil
+ default:
+ return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
+ }
+}
+
+type LoadOptions struct {
+ // Loose indicates whether the parser should ignore nonexistent files or return error.
+ Loose bool
+ // Insensitive indicates whether the parser forces all section and key names to lowercase.
+ Insensitive bool
+ // IgnoreContinuation indicates whether to ignore continuation lines while parsing.
+ IgnoreContinuation bool
+ // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
+ IgnoreInlineComment bool
+ // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
+ SkipUnrecognizableLines bool
+ // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
+ // This type of keys are mostly used in my.cnf.
+ AllowBooleanKeys bool
+ // AllowShadows indicates whether to keep track of keys with same name under same section.
+ AllowShadows bool
+ // AllowNestedValues indicates whether to allow AWS-like nested values.
+ // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
+ AllowNestedValues bool
+ // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
+ // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
+ // Relevant quote: Values can also span multiple lines, as long as they are indented deeper
+ // than the first line of the value.
+ AllowPythonMultilineValues bool
+ // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value.
+ // Docs: https://docs.python.org/2/library/configparser.html
+ // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names.
+ // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment.
+ SpaceBeforeInlineComment bool
+ // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
+ // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
+ UnescapeValueDoubleQuotes bool
+ // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
+ // when value is NOT surrounded by any quotes.
+ // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
+ UnescapeValueCommentSymbols bool
+ // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
+ // conform to key/value pairs. Specify the names of those blocks here.
+ UnparseableSections []string
+}
+
+func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
+ sources := make([]dataSource, len(others)+1)
+ sources[0], err = parseDataSource(source)
+ if err != nil {
+ return nil, err
+ }
+ for i := range others {
+ sources[i+1], err = parseDataSource(others[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ f := newFile(sources, opts)
+ if err = f.Reload(); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// Load loads and parses from INI data sources.
+// Arguments can be mixed of file name with string type, or raw data in []byte.
+// It will return error if list contains nonexistent files.
+func Load(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{}, source, others...)
+}
+
+// LooseLoad has exactly same functionality as Load function
+// except it ignores nonexistent files instead of returning error.
+func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Loose: true}, source, others...)
+}
+
+// InsensitiveLoad has exactly same functionality as Load function
+// except it forces all section and key names to be lowercased.
+func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Insensitive: true}, source, others...)
+}
+
+// ShadowLoad has exactly same functionality as Load function
+// except it allows have shadow keys.
+func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
+}
diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go
new file mode 100755
index 0000000..7c8566a
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/key.go
@@ -0,0 +1,751 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Key represents a key under a section.
+type Key struct {
+ s *Section
+ Comment string
+ name string
+ value string
+ isAutoIncrement bool
+ isBooleanType bool
+
+ isShadow bool
+ shadows []*Key
+
+ nestedValues []string
+}
+
+// newKey simply return a key object with given values.
+func newKey(s *Section, name, val string) *Key {
+ return &Key{
+ s: s,
+ name: name,
+ value: val,
+ }
+}
+
+func (k *Key) addShadow(val string) error {
+ if k.isShadow {
+ return errors.New("cannot add shadow to another shadow key")
+ } else if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add shadow to auto-increment or boolean key")
+ }
+
+ shadow := newKey(k.s, k.name, val)
+ shadow.isShadow = true
+ k.shadows = append(k.shadows, shadow)
+ return nil
+}
+
+// AddShadow adds a new shadow key to itself.
+func (k *Key) AddShadow(val string) error {
+ if !k.s.f.options.AllowShadows {
+ return errors.New("shadow key is not allowed")
+ }
+ return k.addShadow(val)
+}
+
+func (k *Key) addNestedValue(val string) error {
+ if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add nested value to auto-increment or boolean key")
+ }
+
+ k.nestedValues = append(k.nestedValues, val)
+ return nil
+}
+
+func (k *Key) AddNestedValue(val string) error {
+ if !k.s.f.options.AllowNestedValues {
+ return errors.New("nested value is not allowed")
+ }
+ return k.addNestedValue(val)
+}
+
+// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
+type ValueMapper func(string) string
+
+// Name returns name of key.
+func (k *Key) Name() string {
+ return k.name
+}
+
+// Value returns raw value of key for performance purpose.
+func (k *Key) Value() string {
+ return k.value
+}
+
+// ValueWithShadows returns raw values of key and its shadows if any.
+func (k *Key) ValueWithShadows() []string {
+ if len(k.shadows) == 0 {
+ return []string{k.value}
+ }
+ vals := make([]string, len(k.shadows)+1)
+ vals[0] = k.value
+ for i := range k.shadows {
+ vals[i+1] = k.shadows[i].value
+ }
+ return vals
+}
+
+// NestedValues returns nested values stored in the key.
+// It is possible returned value is nil if no nested values stored in the key.
+func (k *Key) NestedValues() []string {
+ return k.nestedValues
+}
+
+// transformValue takes a raw value and transforms to its final string.
+func (k *Key) transformValue(val string) string {
+ if k.s.f.ValueMapper != nil {
+ val = k.s.f.ValueMapper(val)
+ }
+
+ // Fail-fast if no indicate char found for recursive value
+ if !strings.Contains(val, "%") {
+ return val
+ }
+ for i := 0; i < _DEPTH_VALUES; i++ {
+ vr := varPattern.FindString(val)
+ if len(vr) == 0 {
+ break
+ }
+
+ // Take off leading '%(' and trailing ')s'.
+ noption := strings.TrimLeft(vr, "%(")
+ noption = strings.TrimRight(noption, ")s")
+
+ // Search in the same section.
+ nk, err := k.s.GetKey(noption)
+ if err != nil || k == nk {
+ // Search again in default section.
+ nk, _ = k.s.f.Section("").GetKey(noption)
+ }
+
+ // Substitute by new value and take off leading '%(' and trailing ')s'.
+ val = strings.Replace(val, vr, nk.value, -1)
+ }
+ return val
+}
+
+// String returns string representation of value.
+func (k *Key) String() string {
+ return k.transformValue(k.value)
+}
+
+// Validate accepts a validate function which can
+// return modifed result as key value.
+func (k *Key) Validate(fn func(string) string) string {
+ return fn(k.String())
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
+// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
+// Any other value returns an error.
+func parseBool(str string) (value bool, err error) {
+ switch str {
+ case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
+ return true, nil
+ case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
+ return false, nil
+ }
+ return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
+}
+
+// Bool returns bool type value.
+func (k *Key) Bool() (bool, error) {
+ return parseBool(k.String())
+}
+
+// Float64 returns float64 type value.
+func (k *Key) Float64() (float64, error) {
+ return strconv.ParseFloat(k.String(), 64)
+}
+
+// Int returns int type value.
+func (k *Key) Int() (int, error) {
+ return strconv.Atoi(k.String())
+}
+
+// Int64 returns int64 type value.
+func (k *Key) Int64() (int64, error) {
+ return strconv.ParseInt(k.String(), 10, 64)
+}
+
+// Uint returns uint type valued.
+func (k *Key) Uint() (uint, error) {
+ u, e := strconv.ParseUint(k.String(), 10, 64)
+ return uint(u), e
+}
+
+// Uint64 returns uint64 type value.
+func (k *Key) Uint64() (uint64, error) {
+ return strconv.ParseUint(k.String(), 10, 64)
+}
+
+// Duration returns time.Duration type value.
+func (k *Key) Duration() (time.Duration, error) {
+ return time.ParseDuration(k.String())
+}
+
+// TimeFormat parses with given format and returns time.Time type value.
+func (k *Key) TimeFormat(format string) (time.Time, error) {
+ return time.Parse(format, k.String())
+}
+
+// Time parses with RFC3339 format and returns time.Time type value.
+func (k *Key) Time() (time.Time, error) {
+ return k.TimeFormat(time.RFC3339)
+}
+
+// MustString returns default value if key value is empty.
+func (k *Key) MustString(defaultVal string) string {
+ val := k.String()
+ if len(val) == 0 {
+ k.value = defaultVal
+ return defaultVal
+ }
+ return val
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (k *Key) MustBool(defaultVal ...bool) bool {
+ val, err := k.Bool()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatBool(defaultVal[0])
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (k *Key) MustFloat64(defaultVal ...float64) float64 {
+ val, err := k.Float64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt(defaultVal ...int) int {
+ val, err := k.Int()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt64(defaultVal ...int64) int64 {
+ val, err := k.Int64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint(defaultVal ...uint) uint {
+ val, err := k.Uint()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
+ val, err := k.Uint64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustDuration always returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
+ val, err := k.Duration()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].String()
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTimeFormat always parses with given format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
+ val, err := k.TimeFormat(format)
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].Format(format)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTime always parses with RFC3339 format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
+ return k.MustTimeFormat(time.RFC3339, defaultVal...)
+}
+
+// In always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) In(defaultVal string, candidates []string) string {
+ val := k.String()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InFloat64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
+ val := k.MustFloat64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt(defaultVal int, candidates []int) int {
+ val := k.MustInt()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
+ val := k.MustInt64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
+ val := k.MustUint()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
+ val := k.MustUint64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTimeFormat always parses with given format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTime always parses with RFC3339 format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
+ return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
+}
+
+// RangeFloat64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
+ val := k.MustFloat64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt(defaultVal, min, max int) int {
+ val := k.MustInt()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
+ val := k.MustInt64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTimeFormat checks if value with given format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTime checks if value with RFC3339 format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
+ return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
+}
+
+// Strings returns list of string divided by given delimiter.
+func (k *Key) Strings(delim string) []string {
+ str := k.String()
+ if len(str) == 0 {
+ return []string{}
+ }
+
+ runes := []rune(str)
+ vals := make([]string, 0, 2)
+ var buf bytes.Buffer
+ escape := false
+ idx := 0
+ for {
+ if escape {
+ escape = false
+ if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
+ buf.WriteRune('\\')
+ }
+ buf.WriteRune(runes[idx])
+ } else {
+ if runes[idx] == '\\' {
+ escape = true
+ } else if strings.HasPrefix(string(runes[idx:]), delim) {
+ idx += len(delim) - 1
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ buf.Reset()
+ } else {
+ buf.WriteRune(runes[idx])
+ }
+ }
+ idx += 1
+ if idx == len(runes) {
+ break
+ }
+ }
+
+ if buf.Len() > 0 {
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ }
+
+ return vals
+}
+
+// StringsWithShadows returns list of string divided by given delimiter.
+// Shadows will also be appended if any.
+func (k *Key) StringsWithShadows(delim string) []string {
+ vals := k.ValueWithShadows()
+ results := make([]string, 0, len(vals)*2)
+ for i := range vals {
+ if len(vals) == 0 {
+ continue
+ }
+
+ results = append(results, strings.Split(vals[i], delim)...)
+ }
+
+ for i := range results {
+ results[i] = k.transformValue(strings.TrimSpace(results[i]))
+ }
+ return results
+}
+
+// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Float64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Ints(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), true, false)
+ return vals
+}
+
+// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Int64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) TimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
+ return vals
+}
+
+// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) Times(delim string) []time.Time {
+ return k.TimesFormat(time.RFC3339, delim)
+}
+
+// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
+// it will not be included to result list.
+func (k *Key) ValidFloat64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
+// not be included to result list.
+func (k *Key) ValidInts(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
+// then it will not be included to result list.
+func (k *Key) ValidInt64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
+// then it will not be included to result list.
+func (k *Key) ValidUints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidUint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimes(delim string) []time.Time {
+ return k.ValidTimesFormat(time.RFC3339, delim)
+}
+
+// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
+ return k.parseFloat64s(k.Strings(delim), false, true)
+}
+
+// StrictInts returns list of int divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInts(delim string) ([]int, error) {
+ return k.parseInts(k.Strings(delim), false, true)
+}
+
+// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInt64s(delim string) ([]int64, error) {
+ return k.parseInt64s(k.Strings(delim), false, true)
+}
+
+// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUints(delim string) ([]uint, error) {
+ return k.parseUints(k.Strings(delim), false, true)
+}
+
+// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
+ return k.parseUint64s(k.Strings(delim), false, true)
+}
+
+// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
+ return k.parseTimesFormat(format, k.Strings(delim), false, true)
+}
+
+// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
+ return k.StrictTimesFormat(time.RFC3339, delim)
+}
+
+// parseFloat64s transforms strings to float64s.
+func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
+ vals := make([]float64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseFloat(str, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseInts transforms strings to ints.
+func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
+ vals := make([]int, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.Atoi(str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseInt64s transforms strings to int64s.
+func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
+ vals := make([]int64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseInt(str, 10, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseUints transforms strings to uints.
+func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
+ vals := make([]uint, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseUint(str, 10, 0)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, uint(val))
+ }
+ }
+ return vals, nil
+}
+
+// parseUint64s transforms strings to uint64s.
+func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
+ vals := make([]uint64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseUint(str, 10, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseTimesFormat transforms strings to times in given format.
+func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
+ vals := make([]time.Time, 0, len(strs))
+ for _, str := range strs {
+ val, err := time.Parse(format, str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// SetValue changes key value.
+func (k *Key) SetValue(v string) {
+ if k.s.f.BlockMode {
+ k.s.f.lock.Lock()
+ defer k.s.f.lock.Unlock()
+ }
+
+ k.value = v
+ k.s.keysHash[k.name] = v
+}
diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go
new file mode 100755
index 0000000..3daf54c
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/parser.go
@@ -0,0 +1,494 @@
+// Copyright 2015 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+var pythonMultiline = regexp.MustCompile("^(\\s+)([^\n]+)")
+
+type tokenType int
+
+const (
+ _TOKEN_INVALID tokenType = iota
+ _TOKEN_COMMENT
+ _TOKEN_SECTION
+ _TOKEN_KEY
+)
+
+type parser struct {
+ buf *bufio.Reader
+ isEOF bool
+ count int
+ comment *bytes.Buffer
+}
+
+func newParser(r io.Reader) *parser {
+ return &parser{
+ buf: bufio.NewReader(r),
+ count: 1,
+ comment: &bytes.Buffer{},
+ }
+}
+
+// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
+// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+func (p *parser) BOM() error {
+ mask, err := p.buf.Peek(2)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 2 {
+ return nil
+ }
+
+ switch {
+ case mask[0] == 254 && mask[1] == 255:
+ fallthrough
+ case mask[0] == 255 && mask[1] == 254:
+ p.buf.Read(mask)
+ case mask[0] == 239 && mask[1] == 187:
+ mask, err := p.buf.Peek(3)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 3 {
+ return nil
+ }
+ if mask[2] == 191 {
+ p.buf.Read(mask)
+ }
+ }
+ return nil
+}
+
+func (p *parser) readUntil(delim byte) ([]byte, error) {
+ data, err := p.buf.ReadBytes(delim)
+ if err != nil {
+ if err == io.EOF {
+ p.isEOF = true
+ } else {
+ return nil, err
+ }
+ }
+ return data, nil
+}
+
+func cleanComment(in []byte) ([]byte, bool) {
+ i := bytes.IndexAny(in, "#;")
+ if i == -1 {
+ return nil, false
+ }
+ return in[i:], true
+}
+
+func readKeyName(in []byte) (string, int, error) {
+ line := string(in)
+
+ // Check if key name surrounded by quotes.
+ var keyQuote string
+ if line[0] == '"' {
+ if len(line) > 6 && string(line[0:3]) == `"""` {
+ keyQuote = `"""`
+ } else {
+ keyQuote = `"`
+ }
+ } else if line[0] == '`' {
+ keyQuote = "`"
+ }
+
+ // Get out key name
+ endIdx := -1
+ if len(keyQuote) > 0 {
+ startIdx := len(keyQuote)
+ // FIXME: fail case -> """"""name"""=value
+ pos := strings.Index(line[startIdx:], keyQuote)
+ if pos == -1 {
+ return "", -1, fmt.Errorf("missing closing key quote: %s", line)
+ }
+ pos += startIdx
+
+ // Find key-value delimiter
+ i := strings.IndexAny(line[pos+startIdx:], "=:")
+ if i < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ endIdx = pos + i
+ return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
+ }
+
+ endIdx = strings.IndexAny(line, "=:")
+ if endIdx < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
+}
+
+func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := string(data)
+
+ pos := strings.LastIndex(next, valQuote)
+ if pos > -1 {
+ val += next[:pos]
+
+ comment, has := cleanComment([]byte(next[pos:]))
+ if has {
+ p.comment.Write(bytes.TrimSpace(comment))
+ }
+ break
+ }
+ val += next
+ if p.isEOF {
+ return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
+ }
+ }
+ return val, nil
+}
+
+func (p *parser) readContinuationLines(val string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := strings.TrimSpace(string(data))
+
+ if len(next) == 0 {
+ break
+ }
+ val += next
+ if val[len(val)-1] != '\\' {
+ break
+ }
+ val = val[:len(val)-1]
+ }
+ return val, nil
+}
+
+// hasSurroundedQuote check if and only if the first and last characters
+// are quotes \" or \'.
+// It returns false if any other parts also contain same kind of quotes.
+func hasSurroundedQuote(in string, quote byte) bool {
+ return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
+ strings.IndexByte(in[1:], quote) == len(in)-2
+}
+
+func (p *parser) readValue(in []byte,
+ parserBufferSize int,
+ ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols, allowPythonMultilines, spaceBeforeInlineComment bool) (string, error) {
+
+ line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
+ if len(line) == 0 {
+ return "", nil
+ }
+
+ var valQuote string
+ if len(line) > 3 && string(line[0:3]) == `"""` {
+ valQuote = `"""`
+ } else if line[0] == '`' {
+ valQuote = "`"
+ } else if unescapeValueDoubleQuotes && line[0] == '"' {
+ valQuote = `"`
+ }
+
+ if len(valQuote) > 0 {
+ startIdx := len(valQuote)
+ pos := strings.LastIndex(line[startIdx:], valQuote)
+ // Check for multi-line value
+ if pos == -1 {
+ return p.readMultilines(line, line[startIdx:], valQuote)
+ }
+
+ if unescapeValueDoubleQuotes && valQuote == `"` {
+ return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
+ }
+ return line[startIdx : pos+startIdx], nil
+ }
+
+ lastChar := line[len(line)-1]
+ // Won't be able to reach here if value only contains whitespace
+ line = strings.TrimSpace(line)
+ trimmedLastChar := line[len(line)-1]
+
+ // Check continuation lines when desired
+ if !ignoreContinuation && trimmedLastChar == '\\' {
+ return p.readContinuationLines(line[:len(line)-1])
+ }
+
+ // Check if ignore inline comment
+ if !ignoreInlineComment {
+ var i int
+ if spaceBeforeInlineComment {
+ i = strings.Index(line, " #")
+ if i == -1 {
+ i = strings.Index(line, " ;")
+ }
+
+ } else {
+ i = strings.IndexAny(line, "#;")
+ }
+
+ if i > -1 {
+ p.comment.WriteString(line[i:])
+ line = strings.TrimSpace(line[:i])
+ }
+
+ }
+
+ // Trim single and double quotes
+ if hasSurroundedQuote(line, '\'') ||
+ hasSurroundedQuote(line, '"') {
+ line = line[1 : len(line)-1]
+ } else if len(valQuote) == 0 && unescapeValueCommentSymbols {
+ if strings.Contains(line, `\;`) {
+ line = strings.Replace(line, `\;`, ";", -1)
+ }
+ if strings.Contains(line, `\#`) {
+ line = strings.Replace(line, `\#`, "#", -1)
+ }
+ } else if allowPythonMultilines && lastChar == '\n' {
+ parserBufferPeekResult, _ := p.buf.Peek(parserBufferSize)
+ peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
+
+ identSize := -1
+ val := line
+
+ for {
+ peekData, peekErr := peekBuffer.ReadBytes('\n')
+ if peekErr != nil {
+ if peekErr == io.EOF {
+ return val, nil
+ }
+ return "", peekErr
+ }
+
+ peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
+ if len(peekMatches) != 3 {
+ return val, nil
+ }
+
+ currentIdentSize := len(peekMatches[1])
+ // NOTE: Return if not a python-ini multi-line value.
+ if currentIdentSize < 0 {
+ return val, nil
+ }
+ identSize = currentIdentSize
+
+ // NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer.
+ _, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+
+ val += fmt.Sprintf("\n%s", peekMatches[2])
+ }
+
+ // NOTE: If it was a Python multi-line value,
+ // return the appended value.
+ if identSize > 0 {
+ return val, nil
+ }
+ }
+
+ return line, nil
+}
+
+// parse parses data through an io.Reader.
+func (f *File) parse(reader io.Reader) (err error) {
+ p := newParser(reader)
+ if err = p.BOM(); err != nil {
+ return fmt.Errorf("BOM: %v", err)
+ }
+
+ // Ignore error because default section name is never empty string.
+ name := DEFAULT_SECTION
+ if f.options.Insensitive {
+ name = strings.ToLower(DEFAULT_SECTION)
+ }
+ section, _ := f.NewSection(name)
+
+ // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key
+ var isLastValueEmpty bool
+ var lastRegularKey *Key
+
+ var line []byte
+ var inUnparseableSection bool
+
+ // NOTE: Iterate and increase `currentPeekSize` until
+ // the size of the parser buffer is found.
+ // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
+ parserBufferSize := 0
+ // NOTE: Peek 1kb at a time.
+ currentPeekSize := 1024
+
+ if f.options.AllowPythonMultilineValues {
+ for {
+ peekBytes, _ := p.buf.Peek(currentPeekSize)
+ peekBytesLength := len(peekBytes)
+
+ if parserBufferSize >= peekBytesLength {
+ break
+ }
+
+ currentPeekSize *= 2
+ parserBufferSize = peekBytesLength
+ }
+ }
+
+ for !p.isEOF {
+ line, err = p.readUntil('\n')
+ if err != nil {
+ return err
+ }
+
+ if f.options.AllowNestedValues &&
+ isLastValueEmpty && len(line) > 0 {
+ if line[0] == ' ' || line[0] == '\t' {
+ lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
+ continue
+ }
+ }
+
+ line = bytes.TrimLeftFunc(line, unicode.IsSpace)
+ if len(line) == 0 {
+ continue
+ }
+
+ // Comments
+ if line[0] == '#' || line[0] == ';' {
+ // Note: we do not care ending line break,
+ // it is needed for adding second line,
+ // so just clean it once at the end when set to value.
+ p.comment.Write(line)
+ continue
+ }
+
+ // Section
+ if line[0] == '[' {
+ // Read to the next ']' (TODO: support quoted strings)
+ closeIdx := bytes.LastIndexByte(line, ']')
+ if closeIdx == -1 {
+ return fmt.Errorf("unclosed section: %s", line)
+ }
+
+ name := string(line[1:closeIdx])
+ section, err = f.NewSection(name)
+ if err != nil {
+ return err
+ }
+
+ comment, has := cleanComment(line[closeIdx+1:])
+ if has {
+ p.comment.Write(comment)
+ }
+
+ section.Comment = strings.TrimSpace(p.comment.String())
+
+ // Reset aotu-counter and comments
+ p.comment.Reset()
+ p.count = 1
+
+ inUnparseableSection = false
+ for i := range f.options.UnparseableSections {
+ if f.options.UnparseableSections[i] == name ||
+ (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
+ inUnparseableSection = true
+ continue
+ }
+ }
+ continue
+ }
+
+ if inUnparseableSection {
+ section.isRawSection = true
+ section.rawBody += string(line)
+ continue
+ }
+
+ kname, offset, err := readKeyName(line)
+ if err != nil {
+ // Treat as boolean key when desired, and whole line is key name.
+ if IsErrDelimiterNotFound(err) {
+ switch {
+ case f.options.AllowBooleanKeys:
+ kname, err := p.readValue(line,
+ parserBufferSize,
+ f.options.IgnoreContinuation,
+ f.options.IgnoreInlineComment,
+ f.options.UnescapeValueDoubleQuotes,
+ f.options.UnescapeValueCommentSymbols,
+ f.options.AllowPythonMultilineValues,
+ f.options.SpaceBeforeInlineComment)
+ if err != nil {
+ return err
+ }
+ key, err := section.NewBooleanKey(kname)
+ if err != nil {
+ return err
+ }
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ continue
+
+ case f.options.SkipUnrecognizableLines:
+ continue
+ }
+ }
+ return err
+ }
+
+ // Auto increment.
+ isAutoIncr := false
+ if kname == "-" {
+ isAutoIncr = true
+ kname = "#" + strconv.Itoa(p.count)
+ p.count++
+ }
+
+ value, err := p.readValue(line[offset:],
+ parserBufferSize,
+ f.options.IgnoreContinuation,
+ f.options.IgnoreInlineComment,
+ f.options.UnescapeValueDoubleQuotes,
+ f.options.UnescapeValueCommentSymbols,
+ f.options.AllowPythonMultilineValues,
+ f.options.SpaceBeforeInlineComment)
+ if err != nil {
+ return err
+ }
+ isLastValueEmpty = len(value) == 0
+
+ key, err := section.NewKey(kname, value)
+ if err != nil {
+ return err
+ }
+ key.isAutoIncrement = isAutoIncr
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ lastRegularKey = key
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go
new file mode 100755
index 0000000..340a1ef
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/section.go
@@ -0,0 +1,258 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Section represents a config section.
+type Section struct {
+ f *File
+ Comment string
+ name string
+ keys map[string]*Key
+ keyList []string
+ keysHash map[string]string
+
+ isRawSection bool
+ rawBody string
+}
+
+func newSection(f *File, name string) *Section {
+ return &Section{
+ f: f,
+ name: name,
+ keys: make(map[string]*Key),
+ keyList: make([]string, 0, 10),
+ keysHash: make(map[string]string),
+ }
+}
+
+// Name returns name of Section.
+func (s *Section) Name() string {
+ return s.name
+}
+
+// Body returns rawBody of Section if the section was marked as unparseable.
+// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
+func (s *Section) Body() string {
+ return strings.TrimSpace(s.rawBody)
+}
+
+// SetBody updates body content only if section is raw.
+func (s *Section) SetBody(body string) {
+ if !s.isRawSection {
+ return
+ }
+ s.rawBody = body
+}
+
+// NewKey creates a new key to given section.
+func (s *Section) NewKey(name, val string) (*Key, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new key: empty key name")
+ } else if s.f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ if inSlice(name, s.keyList) {
+ if s.f.options.AllowShadows {
+ if err := s.keys[name].addShadow(val); err != nil {
+ return nil, err
+ }
+ } else {
+ s.keys[name].value = val
+ s.keysHash[name] = val
+ }
+ return s.keys[name], nil
+ }
+
+ s.keyList = append(s.keyList, name)
+ s.keys[name] = newKey(s, name, val)
+ s.keysHash[name] = val
+ return s.keys[name], nil
+}
+
+// NewBooleanKey creates a new boolean type key to given section.
+func (s *Section) NewBooleanKey(name string) (*Key, error) {
+ key, err := s.NewKey(name, "true")
+ if err != nil {
+ return nil, err
+ }
+
+ key.isBooleanType = true
+ return key, nil
+}
+
+// GetKey returns key in section by given name.
+func (s *Section) GetKey(name string) (*Key, error) {
+ // FIXME: change to section level lock?
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ }
+ if s.f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+ key := s.keys[name]
+ if s.f.BlockMode {
+ s.f.lock.RUnlock()
+ }
+
+ if key == nil {
+ // Check if it is a child-section.
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, "."); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ return sec.GetKey(name)
+ } else {
+ break
+ }
+ }
+ return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
+ }
+ return key, nil
+}
+
+// HasKey returns true if section contains a key with given name.
+func (s *Section) HasKey(name string) bool {
+ key, _ := s.GetKey(name)
+ return key != nil
+}
+
+// Haskey is a backwards-compatible name for HasKey.
+// TODO: delete me in v2
+func (s *Section) Haskey(name string) bool {
+ return s.HasKey(name)
+}
+
+// HasValue returns true if section contains given raw value.
+func (s *Section) HasValue(value string) bool {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ for _, k := range s.keys {
+ if value == k.value {
+ return true
+ }
+ }
+ return false
+}
+
+// Key assumes named Key exists in section and returns a zero-value when not.
+func (s *Section) Key(name string) *Key {
+ key, err := s.GetKey(name)
+ if err != nil {
+ // It's OK here because the only possible error is empty key name,
+ // but if it's empty, this piece of code won't be executed.
+ key, _ = s.NewKey(name, "")
+ return key
+ }
+ return key
+}
+
+// Keys returns list of keys of section.
+func (s *Section) Keys() []*Key {
+ keys := make([]*Key, len(s.keyList))
+ for i := range s.keyList {
+ keys[i] = s.Key(s.keyList[i])
+ }
+ return keys
+}
+
+// ParentKeys returns list of keys of parent section.
+func (s *Section) ParentKeys() []*Key {
+ var parentKeys []*Key
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, "."); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ parentKeys = append(parentKeys, sec.Keys()...)
+ } else {
+ break
+ }
+
+ }
+ return parentKeys
+}
+
+// KeyStrings returns list of key names of section.
+func (s *Section) KeyStrings() []string {
+ list := make([]string, len(s.keyList))
+ copy(list, s.keyList)
+ return list
+}
+
+// KeysHash returns keys hash consisting of names and values.
+func (s *Section) KeysHash() map[string]string {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ hash := map[string]string{}
+ for key, value := range s.keysHash {
+ hash[key] = value
+ }
+ return hash
+}
+
+// DeleteKey deletes a key from section.
+func (s *Section) DeleteKey(name string) {
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ for i, k := range s.keyList {
+ if k == name {
+ s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
+ delete(s.keys, name)
+ return
+ }
+ }
+}
+
+// ChildSections returns a list of child sections of current section.
+// For example, "[parent.child1]" and "[parent.child12]" are child sections
+// of section "[parent]".
+func (s *Section) ChildSections() []*Section {
+ prefix := s.name + "."
+ children := make([]*Section, 0, 3)
+ for _, name := range s.f.sectionList {
+ if strings.HasPrefix(name, prefix) {
+ children = append(children, s.f.sections[name])
+ }
+ }
+ return children
+}
diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go
new file mode 100755
index 0000000..9719dc6
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/struct.go
@@ -0,0 +1,512 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+ "unicode"
+)
+
+// NameMapper represents a ini tag name mapper.
+type NameMapper func(string) string
+
+// Built-in name getters.
+var (
+ // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
+ AllCapsUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ }
+ newstr = append(newstr, unicode.ToUpper(chr))
+ }
+ return string(newstr)
+ }
+ // TitleUnderscore converts to format title_underscore.
+ TitleUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ chr -= ('A' - 'a')
+ }
+ newstr = append(newstr, chr)
+ }
+ return string(newstr)
+ }
+)
+
+func (s *Section) parseFieldName(raw, actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ if s.f.NameMapper != nil {
+ return s.f.NameMapper(raw)
+ }
+ return raw
+}
+
+func parseDelim(actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ return ","
+}
+
+var reflectTime = reflect.TypeOf(time.Now()).Kind()
+
+// setSliceWithProperType sets proper values to slice based on its type.
+func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ var strs []string
+ if allowShadow {
+ strs = key.StringsWithShadows(delim)
+ } else {
+ strs = key.Strings(delim)
+ }
+
+ numVals := len(strs)
+ if numVals == 0 {
+ return nil
+ }
+
+ var vals interface{}
+ var err error
+
+ sliceOf := field.Type().Elem().Kind()
+ switch sliceOf {
+ case reflect.String:
+ vals = strs
+ case reflect.Int:
+ vals, err = key.parseInts(strs, true, false)
+ case reflect.Int64:
+ vals, err = key.parseInt64s(strs, true, false)
+ case reflect.Uint:
+ vals, err = key.parseUints(strs, true, false)
+ case reflect.Uint64:
+ vals, err = key.parseUint64s(strs, true, false)
+ case reflect.Float64:
+ vals, err = key.parseFloat64s(strs, true, false)
+ case reflectTime:
+ vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ if err != nil && isStrict {
+ return err
+ }
+
+ slice := reflect.MakeSlice(field.Type(), numVals, numVals)
+ for i := 0; i < numVals; i++ {
+ switch sliceOf {
+ case reflect.String:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
+ case reflect.Int:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
+ case reflect.Int64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
+ case reflect.Uint:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
+ case reflect.Uint64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
+ case reflect.Float64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
+ case reflectTime:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
+ }
+ }
+ field.Set(slice)
+ return nil
+}
+
+func wrapStrictError(err error, isStrict bool) error {
+ if isStrict {
+ return err
+ }
+ return nil
+}
+
+// setWithProperType sets proper value to field based on its type,
+// but it does not return error for failing parsing,
+// because we want to use default value that is already assigned to strcut.
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ switch t.Kind() {
+ case reflect.String:
+ if len(key.String()) == 0 {
+ return nil
+ }
+ field.SetString(key.String())
+ case reflect.Bool:
+ boolVal, err := key.Bool()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.SetBool(boolVal)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && int64(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ intVal, err := key.Int64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.SetInt(intVal)
+ // byte is an alias for uint8, so supporting uint8 breaks support for byte
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && int(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ uintVal, err := key.Uint64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.SetUint(uintVal)
+
+ case reflect.Float32, reflect.Float64:
+ floatVal, err := key.Float64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.SetFloat(floatVal)
+ case reflectTime:
+ timeVal, err := key.Time()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.Set(reflect.ValueOf(timeVal))
+ case reflect.Slice:
+ return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
+ opts := strings.SplitN(tag, ",", 3)
+ rawName = opts[0]
+ if len(opts) > 1 {
+ omitEmpty = opts[1] == "omitempty"
+ }
+ if len(opts) > 2 {
+ allowShadow = opts[2] == "allowshadow"
+ }
+ return rawName, omitEmpty, allowShadow
+}
+
+func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ rawName, _, allowShadow := parseTagOptions(tag)
+ fieldName := s.parseFieldName(tpField.Name, rawName)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+ isStruct := tpField.Type.Kind() == reflect.Struct
+ if isAnonymous {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+
+ if isAnonymous || isStruct {
+ if sec, err := s.f.GetSection(fieldName); err == nil {
+ if err = sec.mapTo(field, isStrict); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ continue
+ }
+ }
+
+ if key, err := s.GetKey(fieldName); err == nil {
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ }
+ }
+ return nil
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot map to non-pointer struct")
+ }
+
+ return s.mapTo(val, false)
+}
+
+// MapTo maps section to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (s *Section) StrictMapTo(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot map to non-pointer struct")
+ }
+
+ return s.mapTo(val, true)
+}
+
+// MapTo maps file to given struct.
+func (f *File) MapTo(v interface{}) error {
+ return f.Section("").MapTo(v)
+}
+
+// MapTo maps file to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (f *File) StrictMapTo(v interface{}) error {
+ return f.Section("").StrictMapTo(v)
+}
+
+// MapTo maps data sources to given struct with name mapper.
+func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.MapTo(v)
+}
+
+// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.StrictMapTo(v)
+}
+
+// MapTo maps data sources to given struct.
+func MapTo(v, source interface{}, others ...interface{}) error {
+ return MapToWithMapper(v, nil, source, others...)
+}
+
+// StrictMapTo maps data sources to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapTo(v, source interface{}, others ...interface{}) error {
+ return StrictMapToWithMapper(v, nil, source, others...)
+}
+
+// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
+func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+
+ var buf bytes.Buffer
+ sliceOf := field.Type().Elem().Kind()
+ for i := 0; i < field.Len(); i++ {
+ switch sliceOf {
+ case reflect.String:
+ buf.WriteString(slice.Index(i).String())
+ case reflect.Int, reflect.Int64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
+ case reflect.Uint, reflect.Uint64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
+ case reflect.Float64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
+ case reflectTime:
+ buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ buf.WriteString(delim)
+ }
+ key.SetValue(buf.String()[:buf.Len()-1])
+ return nil
+}
+
+// reflectWithProperType does the opposite thing as setWithProperType.
+func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+ switch t.Kind() {
+ case reflect.String:
+ key.SetValue(field.String())
+ case reflect.Bool:
+ key.SetValue(fmt.Sprint(field.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ key.SetValue(fmt.Sprint(field.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ key.SetValue(fmt.Sprint(field.Uint()))
+ case reflect.Float32, reflect.Float64:
+ key.SetValue(fmt.Sprint(field.Float()))
+ case reflectTime:
+ key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
+ case reflect.Slice:
+ return reflectSliceWithProperType(key, field, delim)
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+// CR: copied from encoding/json/encode.go with modifications of time.Time support.
+// TODO: add more test coverage.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflectTime:
+ t, ok := v.Interface().(time.Time)
+ return ok && t.IsZero()
+ }
+ return false
+}
+
+func (s *Section) reflectFrom(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ opts := strings.SplitN(tag, ",", 2)
+ if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) {
+ continue
+ }
+
+ fieldName := s.parseFieldName(tpField.Name, opts[0])
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
+ (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
+ // Note: The only error here is section doesn't exist.
+ sec, err := s.f.GetSection(fieldName)
+ if err != nil {
+ // Note: fieldName can never be empty here, ignore error.
+ sec, _ = s.f.NewSection(fieldName)
+ }
+
+ // Add comment from comment tag
+ if len(sec.Comment) == 0 {
+ sec.Comment = tpField.Tag.Get("comment")
+ }
+
+ if err = sec.reflectFrom(field); err != nil {
+ return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+ }
+ continue
+ }
+
+ // Note: Same reason as secion.
+ key, err := s.GetKey(fieldName)
+ if err != nil {
+ key, _ = s.NewKey(fieldName, "")
+ }
+
+ // Add comment from comment tag
+ if len(key.Comment) == 0 {
+ key.Comment = tpField.Tag.Get("comment")
+ }
+
+ if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+ return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+ }
+
+ }
+ return nil
+}
+
+// ReflectFrom reflects secion from given struct.
+func (s *Section) ReflectFrom(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot reflect from non-pointer struct")
+ }
+
+ return s.reflectFrom(val)
+}
+
+// ReflectFrom reflects file from given struct.
+func (f *File) ReflectFrom(v interface{}) error {
+ return f.Section("").ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct with name mapper.
+func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
+ cfg.NameMapper = mapper
+ return cfg.ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct.
+func ReflectFrom(cfg *File, v interface{}) error {
+ return ReflectFromWithMapper(cfg, v, nil)
+}
diff --git a/vendor/github.com/go-stack/stack/LICENSE.md b/vendor/github.com/go-stack/stack/LICENSE.md
new file mode 100755
index 0000000..2abf98e
--- /dev/null
+++ b/vendor/github.com/go-stack/stack/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Chris Hines
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-stack/stack/README.md b/vendor/github.com/go-stack/stack/README.md
new file mode 100755
index 0000000..f11cccc
--- /dev/null
+++ b/vendor/github.com/go-stack/stack/README.md
@@ -0,0 +1,38 @@
+[](https://godoc.org/github.com/go-stack/stack)
+[](https://goreportcard.com/report/go-stack/stack)
+[](https://travis-ci.org/go-stack/stack)
+[](https://coveralls.io/github/go-stack/stack?branch=master)
+
+# stack
+
+Package stack implements utilities to capture, manipulate, and format call
+stacks. It provides a simpler API than package runtime.
+
+The implementation takes care of the minutia and special cases of interpreting
+the program counter (pc) values returned by runtime.Callers.
+
+## Versioning
+
+Package stack publishes releases via [semver](http://semver.org/) compatible Git
+tags prefixed with a single 'v'. The master branch always contains the latest
+release. The develop branch contains unreleased commits.
+
+## Formatting
+
+Package stack's types implement fmt.Formatter, which provides a simple and
+flexible way to declaratively configure formatting when used with logging or
+error tracking packages.
+
+```go
+func DoTheThing() {
+ c := stack.Caller(0)
+ log.Print(c) // "source.go:10"
+ log.Printf("%+v", c) // "pkg/path/source.go:10"
+ log.Printf("%n", c) // "DoTheThing"
+
+ s := stack.Trace().TrimRuntime()
+ log.Print(s) // "[source.go:15 caller.go:42 main.go:14]"
+}
+```
+
+See the docs for all of the supported formatting options.
diff --git a/vendor/github.com/go-stack/stack/go.mod b/vendor/github.com/go-stack/stack/go.mod
new file mode 100755
index 0000000..96a53a1
--- /dev/null
+++ b/vendor/github.com/go-stack/stack/go.mod
@@ -0,0 +1 @@
+module github.com/go-stack/stack
diff --git a/vendor/github.com/go-stack/stack/stack.go b/vendor/github.com/go-stack/stack/stack.go
new file mode 100755
index 0000000..ac3b93b
--- /dev/null
+++ b/vendor/github.com/go-stack/stack/stack.go
@@ -0,0 +1,400 @@
+// +build go1.7
+
+// Package stack implements utilities to capture, manipulate, and format call
+// stacks. It provides a simpler API than package runtime.
+//
+// The implementation takes care of the minutia and special cases of
+// interpreting the program counter (pc) values returned by runtime.Callers.
+//
+// Package stack's types implement fmt.Formatter, which provides a simple and
+// flexible way to declaratively configure formatting when used with logging
+// or error tracking packages.
+package stack
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// Call records a single function invocation from a goroutine stack.
+type Call struct {
+ frame runtime.Frame
+}
+
+// Caller returns a Call from the stack of the current goroutine. The argument
+// skip is the number of stack frames to ascend, with 0 identifying the
+// calling function.
+func Caller(skip int) Call {
+ // As of Go 1.9 we need room for up to three PC entries.
+ //
+ // 0. An entry for the stack frame prior to the target to check for
+ // special handling needed if that prior entry is runtime.sigpanic.
+ // 1. A possible second entry to hold metadata about skipped inlined
+ // functions. If inline functions were not skipped the target frame
+ // PC will be here.
+ // 2. A third entry for the target frame PC when the second entry
+ // is used for skipped inline functions.
+ var pcs [3]uintptr
+ n := runtime.Callers(skip+1, pcs[:])
+ frames := runtime.CallersFrames(pcs[:n])
+ frame, _ := frames.Next()
+ frame, _ = frames.Next()
+
+ return Call{
+ frame: frame,
+ }
+}
+
+// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c).
+func (c Call) String() string {
+ return fmt.Sprint(c)
+}
+
+// MarshalText implements encoding.TextMarshaler. It formats the Call the same
+// as fmt.Sprintf("%v", c).
+func (c Call) MarshalText() ([]byte, error) {
+ if c.frame == (runtime.Frame{}) {
+ return nil, ErrNoFunc
+ }
+
+ buf := bytes.Buffer{}
+ fmt.Fprint(&buf, c)
+ return buf.Bytes(), nil
+}
+
+// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely
+// cause is a Call with the zero value.
+var ErrNoFunc = errors.New("no call stack information")
+
+// Format implements fmt.Formatter with support for the following verbs.
+//
+// %s source file
+// %d line number
+// %n function name
+// %k last segment of the package path
+// %v equivalent to %s:%d
+//
+// It accepts the '+' and '#' flags for most of the verbs as follows.
+//
+// %+s path of source file relative to the compile time GOPATH,
+// or the module path joined to the path of source file relative
+// to module root
+// %#s full path of source file
+// %+n import path qualified function name
+// %+k full package path
+// %+v equivalent to %+s:%d
+// %#v equivalent to %#s:%d
+func (c Call) Format(s fmt.State, verb rune) {
+ if c.frame == (runtime.Frame{}) {
+ fmt.Fprintf(s, "%%!%c(NOFUNC)", verb)
+ return
+ }
+
+ switch verb {
+ case 's', 'v':
+ file := c.frame.File
+ switch {
+ case s.Flag('#'):
+ // done
+ case s.Flag('+'):
+ file = pkgFilePath(&c.frame)
+ default:
+ const sep = "/"
+ if i := strings.LastIndex(file, sep); i != -1 {
+ file = file[i+len(sep):]
+ }
+ }
+ io.WriteString(s, file)
+ if verb == 'v' {
+ buf := [7]byte{':'}
+ s.Write(strconv.AppendInt(buf[:1], int64(c.frame.Line), 10))
+ }
+
+ case 'd':
+ buf := [6]byte{}
+ s.Write(strconv.AppendInt(buf[:0], int64(c.frame.Line), 10))
+
+ case 'k':
+ name := c.frame.Function
+ const pathSep = "/"
+ start, end := 0, len(name)
+ if i := strings.LastIndex(name, pathSep); i != -1 {
+ start = i + len(pathSep)
+ }
+ const pkgSep = "."
+ if i := strings.Index(name[start:], pkgSep); i != -1 {
+ end = start + i
+ }
+ if s.Flag('+') {
+ start = 0
+ }
+ io.WriteString(s, name[start:end])
+
+ case 'n':
+ name := c.frame.Function
+ if !s.Flag('+') {
+ const pathSep = "/"
+ if i := strings.LastIndex(name, pathSep); i != -1 {
+ name = name[i+len(pathSep):]
+ }
+ const pkgSep = "."
+ if i := strings.Index(name, pkgSep); i != -1 {
+ name = name[i+len(pkgSep):]
+ }
+ }
+ io.WriteString(s, name)
+ }
+}
+
+// Frame returns the call frame infomation for the Call.
+func (c Call) Frame() runtime.Frame {
+ return c.frame
+}
+
+// PC returns the program counter for this call frame; multiple frames may
+// have the same PC value.
+//
+// Deprecated: Use Call.Frame instead.
+func (c Call) PC() uintptr {
+ return c.frame.PC
+}
+
+// CallStack records a sequence of function invocations from a goroutine
+// stack.
+type CallStack []Call
+
+// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs).
+func (cs CallStack) String() string {
+ return fmt.Sprint(cs)
+}
+
+var (
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ spaceBytes = []byte(" ")
+)
+
+// MarshalText implements encoding.TextMarshaler. It formats the CallStack the
+// same as fmt.Sprintf("%v", cs).
+func (cs CallStack) MarshalText() ([]byte, error) {
+ buf := bytes.Buffer{}
+ buf.Write(openBracketBytes)
+ for i, pc := range cs {
+ if i > 0 {
+ buf.Write(spaceBytes)
+ }
+ fmt.Fprint(&buf, pc)
+ }
+ buf.Write(closeBracketBytes)
+ return buf.Bytes(), nil
+}
+
+// Format implements fmt.Formatter by printing the CallStack as square brackets
+// ([, ]) surrounding a space separated list of Calls each formatted with the
+// supplied verb and options.
+func (cs CallStack) Format(s fmt.State, verb rune) {
+ s.Write(openBracketBytes)
+ for i, pc := range cs {
+ if i > 0 {
+ s.Write(spaceBytes)
+ }
+ pc.Format(s, verb)
+ }
+ s.Write(closeBracketBytes)
+}
+
+// Trace returns a CallStack for the current goroutine with element 0
+// identifying the calling function.
+func Trace() CallStack {
+ var pcs [512]uintptr
+ n := runtime.Callers(1, pcs[:])
+
+ frames := runtime.CallersFrames(pcs[:n])
+ cs := make(CallStack, 0, n)
+
+ // Skip extra frame retrieved just to make sure the runtime.sigpanic
+ // special case is handled.
+ frame, more := frames.Next()
+
+ for more {
+ frame, more = frames.Next()
+ cs = append(cs, Call{frame: frame})
+ }
+
+ return cs
+}
+
+// TrimBelow returns a slice of the CallStack with all entries below c
+// removed.
+func (cs CallStack) TrimBelow(c Call) CallStack {
+ for len(cs) > 0 && cs[0] != c {
+ cs = cs[1:]
+ }
+ return cs
+}
+
+// TrimAbove returns a slice of the CallStack with all entries above c
+// removed.
+func (cs CallStack) TrimAbove(c Call) CallStack {
+ for len(cs) > 0 && cs[len(cs)-1] != c {
+ cs = cs[:len(cs)-1]
+ }
+ return cs
+}
+
+// pkgIndex returns the index that results in file[index:] being the path of
+// file relative to the compile time GOPATH, and file[:index] being the
+// $GOPATH/src/ portion of file. funcName must be the name of a function in
+// file as returned by runtime.Func.Name.
+func pkgIndex(file, funcName string) int {
+ // As of Go 1.6.2 there is no direct way to know the compile time GOPATH
+ // at runtime, but we can infer the number of path segments in the GOPATH.
+ // We note that runtime.Func.Name() returns the function name qualified by
+ // the import path, which does not include the GOPATH. Thus we can trim
+ // segments from the beginning of the file path until the number of path
+ // separators remaining is one more than the number of path separators in
+ // the function name. For example, given:
+ //
+ // GOPATH /home/user
+ // file /home/user/src/pkg/sub/file.go
+ // fn.Name() pkg/sub.Type.Method
+ //
+ // We want to produce:
+ //
+ // file[:idx] == /home/user/src/
+ // file[idx:] == pkg/sub/file.go
+ //
+ // From this we can easily see that fn.Name() has one less path separator
+ // than our desired result for file[idx:]. We count separators from the
+ // end of the file path until it finds two more than in the function name
+ // and then move one character forward to preserve the initial path
+ // segment without a leading separator.
+ const sep = "/"
+ i := len(file)
+ for n := strings.Count(funcName, sep) + 2; n > 0; n-- {
+ i = strings.LastIndex(file[:i], sep)
+ if i == -1 {
+ i = -len(sep)
+ break
+ }
+ }
+ // get back to 0 or trim the leading separator
+ return i + len(sep)
+}
+
+// pkgFilePath returns the frame's filepath relative to the compile-time GOPATH,
+// or its module path joined to its path relative to the module root.
+//
+// As of Go 1.11 there is no direct way to know the compile time GOPATH or
+// module paths at runtime, but we can piece together the desired information
+// from available information. We note that runtime.Frame.Function contains the
+// function name qualified by the package path, which includes the module path
+// but not the GOPATH. We can extract the package path from that and append the
+// last segments of the file path to arrive at the desired package qualified
+// file path. For example, given:
+//
+// GOPATH /home/user
+// import path pkg/sub
+// frame.File /home/user/src/pkg/sub/file.go
+// frame.Function pkg/sub.Type.Method
+// Desired return pkg/sub/file.go
+//
+// It appears that we simply need to trim ".Type.Method" from frame.Function and
+// append "/" + path.Base(file).
+//
+// But there are other wrinkles. Although it is idiomatic to do so, the internal
+// name of a package is not required to match the last segment of its import
+// path. In addition, the introduction of modules in Go 1.11 allows working
+// without a GOPATH. So we also must make these work right:
+//
+// GOPATH /home/user
+// import path pkg/go-sub
+// package name sub
+// frame.File /home/user/src/pkg/go-sub/file.go
+// frame.Function pkg/sub.Type.Method
+// Desired return pkg/go-sub/file.go
+//
+// Module path pkg/v2
+// import path pkg/v2/go-sub
+// package name sub
+// frame.File /home/user/cloned-pkg/go-sub/file.go
+// frame.Function pkg/v2/sub.Type.Method
+// Desired return pkg/v2/go-sub/file.go
+//
+// We can handle all of these situations by using the package path extracted
+// from frame.Function up to, but not including, the last segment as the prefix
+// and the last two segments of frame.File as the suffix of the returned path.
+// This preserves the existing behavior when working in a GOPATH without modules
+// and a semantically equivalent behavior when used in module aware project.
+func pkgFilePath(frame *runtime.Frame) string {
+ pre := pkgPrefix(frame.Function)
+ post := pathSuffix(frame.File)
+ if pre == "" {
+ return post
+ }
+ return pre + "/" + post
+}
+
+// pkgPrefix returns the import path of the function's package with the final
+// segment removed.
+func pkgPrefix(funcName string) string {
+ const pathSep = "/"
+ end := strings.LastIndex(funcName, pathSep)
+ if end == -1 {
+ return ""
+ }
+ return funcName[:end]
+}
+
+// pathSuffix returns the last two segments of path.
+func pathSuffix(path string) string {
+ const pathSep = "/"
+ lastSep := strings.LastIndex(path, pathSep)
+ if lastSep == -1 {
+ return path
+ }
+ return path[strings.LastIndex(path[:lastSep], pathSep)+1:]
+}
+
+var runtimePath string
+
+func init() {
+ var pcs [3]uintptr
+ runtime.Callers(0, pcs[:])
+ frames := runtime.CallersFrames(pcs[:])
+ frame, _ := frames.Next()
+ file := frame.File
+
+ idx := pkgIndex(frame.File, frame.Function)
+
+ runtimePath = file[:idx]
+ if runtime.GOOS == "windows" {
+ runtimePath = strings.ToLower(runtimePath)
+ }
+}
+
+func inGoroot(c Call) bool {
+ file := c.frame.File
+ if len(file) == 0 || file[0] == '?' {
+ return true
+ }
+ if runtime.GOOS == "windows" {
+ file = strings.ToLower(file)
+ }
+ return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go")
+}
+
+// TrimRuntime returns a slice of the CallStack with the topmost entries from
+// the go runtime removed. It considers any calls originating from unknown
+// files, files under GOROOT, or _testmain.go as part of the runtime.
+func (cs CallStack) TrimRuntime() CallStack {
+ for len(cs) > 0 && inGoroot(cs[len(cs)-1]) {
+ cs = cs[:len(cs)-1]
+ }
+ return cs
+}
diff --git a/vendor/github.com/golang-collections/go-datastructures/LICENSE b/vendor/github.com/golang-collections/go-datastructures/LICENSE
old mode 100644
new mode 100755
diff --git a/vendor/github.com/golang-collections/go-datastructures/queue/error.go b/vendor/github.com/golang-collections/go-datastructures/queue/error.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/golang-collections/go-datastructures/queue/priority_queue.go b/vendor/github.com/golang-collections/go-datastructures/queue/priority_queue.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/golang-collections/go-datastructures/queue/queue.go b/vendor/github.com/golang-collections/go-datastructures/queue/queue.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/golang-collections/go-datastructures/queue/ring.go b/vendor/github.com/golang-collections/go-datastructures/queue/ring.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100755
index 0000000..bcfa195
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100755
index 0000000..931ae31
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name
+
+# Please keep the list sorted.
+
+Damian Gryski
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman
+Marc-Antoine Ruel
+Nigel Tao
+Rob Pike
+Rodolfo Carvalho
+Russ Cox
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100755
index 0000000..6050c10
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100755
index 0000000..cea1287
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8 2.19GB/s ± 0% html
+_UFlat1-8 1.41GB/s ± 0% urls
+_UFlat2-8 23.5GB/s ± 2% jpg
+_UFlat3-8 1.91GB/s ± 0% jpg_200
+_UFlat4-8 14.0GB/s ± 1% pdf
+_UFlat5-8 1.97GB/s ± 0% html4
+_UFlat6-8 814MB/s ± 0% txt1
+_UFlat7-8 785MB/s ± 0% txt2
+_UFlat8-8 857MB/s ± 0% txt3
+_UFlat9-8 719MB/s ± 1% txt4
+_UFlat10-8 2.84GB/s ± 0% pb
+_UFlat11-8 1.05GB/s ± 0% gaviota
+
+_ZFlat0-8 1.04GB/s ± 0% html
+_ZFlat1-8 534MB/s ± 0% urls
+_ZFlat2-8 15.7GB/s ± 1% jpg
+_ZFlat3-8 740MB/s ± 3% jpg_200
+_ZFlat4-8 9.20GB/s ± 1% pdf
+_ZFlat5-8 991MB/s ± 0% html4
+_ZFlat6-8 379MB/s ± 0% txt1
+_ZFlat7-8 352MB/s ± 0% txt2
+_ZFlat8-8 396MB/s ± 1% txt3
+_ZFlat9-8 327MB/s ± 1% txt4
+_ZFlat10-8 1.33GB/s ± 1% pb
+_ZFlat11-8 605MB/s ± 1% gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8 621MB/s ± 2% html
+_UFlat1-8 494MB/s ± 1% urls
+_UFlat2-8 23.2GB/s ± 1% jpg
+_UFlat3-8 1.12GB/s ± 1% jpg_200
+_UFlat4-8 4.35GB/s ± 1% pdf
+_UFlat5-8 609MB/s ± 0% html4
+_UFlat6-8 296MB/s ± 0% txt1
+_UFlat7-8 288MB/s ± 0% txt2
+_UFlat8-8 309MB/s ± 1% txt3
+_UFlat9-8 280MB/s ± 1% txt4
+_UFlat10-8 753MB/s ± 0% pb
+_UFlat11-8 400MB/s ± 0% gaviota
+
+_ZFlat0-8 409MB/s ± 1% html
+_ZFlat1-8 250MB/s ± 1% urls
+_ZFlat2-8 12.3GB/s ± 1% jpg
+_ZFlat3-8 132MB/s ± 0% jpg_200
+_ZFlat4-8 2.92GB/s ± 0% pdf
+_ZFlat5-8 405MB/s ± 1% html4
+_ZFlat6-8 179MB/s ± 1% txt1
+_ZFlat7-8 170MB/s ± 1% txt2
+_ZFlat8-8 189MB/s ± 1% txt3
+_ZFlat9-8 164MB/s ± 1% txt4
+_ZFlat10-8 479MB/s ± 1% pb
+_ZFlat11-8 270MB/s ± 1% gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0 2.4GB/s html
+BM_UFlat/1 1.4GB/s urls
+BM_UFlat/2 21.8GB/s jpg
+BM_UFlat/3 1.5GB/s jpg_200
+BM_UFlat/4 13.3GB/s pdf
+BM_UFlat/5 2.1GB/s html4
+BM_UFlat/6 1.0GB/s txt1
+BM_UFlat/7 959.4MB/s txt2
+BM_UFlat/8 1.0GB/s txt3
+BM_UFlat/9 864.5MB/s txt4
+BM_UFlat/10 2.9GB/s pb
+BM_UFlat/11 1.2GB/s gaviota
+
+BM_ZFlat/0 944.3MB/s html (22.31 %)
+BM_ZFlat/1 501.6MB/s urls (47.78 %)
+BM_ZFlat/2 14.3GB/s jpg (99.95 %)
+BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
+BM_ZFlat/4 8.3GB/s pdf (83.30 %)
+BM_ZFlat/5 903.5MB/s html4 (22.52 %)
+BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
+BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
+BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
+BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
+BM_ZFlat/10 1.2GB/s pb (19.68 %)
+BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100755
index 0000000..72efb03
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+
+ errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+const (
+ decodeErrCodeCorrupt = 1
+ decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= len(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ switch decode(dst, src[s:]) {
+ case 0:
+ return dst, nil
+ case decodeErrCodeUnsupportedLiteralLength:
+ return nil, errUnsupportedLiteralLength
+ }
+ return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxBlockSize),
+ buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+ }
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4], true) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.decoded[:n], false) {
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen], false) {
+ return 0, r.err
+ }
+ }
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100755
index 0000000..fcd192b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100755
index 0000000..e6179f6
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - AX scratch
+// - BX scratch
+// - CX length or x
+// - DX offset
+// - SI &src[s]
+// - DI &dst[d]
+// + R8 dst_base
+// + R9 dst_len
+// + R10 dst_base + dst_len
+// + R11 src_base
+// + R12 src_len
+// + R13 src_base + src_len
+// - R14 used by doCopy
+// - R15 used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+ // Initialize SI, DI and R8-R13.
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, DI
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, SI
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+loop:
+ // for s < len(src)
+ CMPQ SI, R13
+ JEQ end
+
+ // CX = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBLZX (SI), CX
+ MOVL CX, BX
+ ANDL $3, BX
+ CMPL BX, $1
+ JAE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ SHRL $2, CX
+ CMPL CX, $60
+ JAE tagLit60Plus
+
+ // case x < 60:
+ // s++
+ INCQ SI
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that CX == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // CX can hold 64 bits, so the increment cannot overflow.
+ INCQ CX
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // AX = len(dst) - d
+ // BX = len(src) - s
+ MOVQ R10, AX
+ SUBQ DI, AX
+ MOVQ R13, BX
+ SUBQ SI, BX
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMPQ CX, $16
+ JGT callMemmove
+ CMPQ AX, $16
+ JLT callMemmove
+ CMPQ BX, $16
+ JLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(SI), X0
+ MOVOU X0, 0(DI)
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMPQ CX, AX
+ JGT errCorrupt
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // DI, SI and CX as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R8-R13.
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADDQ CX, SI
+ SUBQ $58, SI
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // case x == 60:
+ CMPL CX, $61
+ JEQ tagLit61
+ JA tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBLZX -1(SI), CX
+ JMP doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVWLZX -2(SI), CX
+ JMP doLit
+
+tagLit62Plus:
+ CMPL CX, $62
+ JA tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ MOVWLZX -3(SI), CX
+ MOVBLZX -1(SI), BX
+ SHLL $16, BX
+ ORL BX, CX
+ JMP doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVL -4(SI), CX
+ JMP doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADDQ $5, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVLQZX -4(SI), DX
+ JMP doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADDQ $3, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-3])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVWQZX -2(SI), DX
+ JMP doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - BX == src[s] & 0x03
+ // - CX == src[s]
+ CMPQ BX, $2
+ JEQ tagCopy2
+ JA tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADDQ $2, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ MOVQ CX, DX
+ ANDQ $0xe0, DX
+ SHLQ $3, DX
+ MOVBQZX -1(SI), BX
+ ORQ BX, DX
+
+ // length = 4 + int(src[s-2])>>2&0x7
+ SHRQ $2, CX
+ ANDQ $7, CX
+ ADDQ $4, CX
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - CX == length && CX > 0
+ // - DX == offset
+
+ // if offset <= 0 { etc }
+ CMPQ DX, $0
+ JLE errCorrupt
+
+ // if d < offset { etc }
+ MOVQ DI, BX
+ SUBQ R8, BX
+ CMPQ BX, DX
+ JLT errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVQ R10, BX
+ SUBQ DI, BX
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R14 = len(dst)-d
+ // - R15 = &dst[d-offset]
+ MOVQ R10, R14
+ SUBQ DI, R14
+ MOVQ DI, R15
+ SUBQ DX, R15
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMPQ CX, $16
+ JGT slowForwardCopy
+ CMPQ DX, $8
+ JLT slowForwardCopy
+ CMPQ R14, $16
+ JLT slowForwardCopy
+ MOVQ 0(R15), AX
+ MOVQ AX, 0(DI)
+ MOVQ 8(R15), BX
+ MOVQ BX, 8(DI)
+ ADDQ CX, DI
+ JMP loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte and patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from to will repeat the pattern
+ // once, after which we can move two bytes without moving :
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUBQ $10, R14
+ CMPQ CX, R14
+ JGT verySlowForwardCopy
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R15, is unchanged.
+ // }
+ CMPQ DX, $8
+ JGE fixUpSlowForwardCopy
+ MOVQ (R15), BX
+ MOVQ BX, (DI)
+ SUBQ DX, CX
+ ADDQ DX, DI
+ ADDQ DX, DX
+ JMP makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by DI being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save DI to AX so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVQ DI, AX
+ ADDQ CX, DI
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ CMPQ CX, $0
+ JLE loop
+ MOVQ (R15), BX
+ MOVQ BX, (AX)
+ ADDQ $8, R15
+ ADDQ $8, AX
+ SUBQ $8, CX
+ JMP finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R15), BX
+ MOVB BX, (DI)
+ INCQ R15
+ INCQ DI
+ DECQ CX
+ JNZ verySlowForwardCopy
+ JMP loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMPQ DI, R10
+ JNE errCorrupt
+
+ // return 0
+ MOVQ $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVQ $1, ret+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100755
index 0000000..8c9f204
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+ var d, s, offset, length int
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length <= 0 {
+ return decodeErrCodeUnsupportedLiteralLength
+ }
+ if length > len(dst)-d || length > len(src)-s {
+ return decodeErrCodeCorrupt
+ }
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ return decodeErrCodeCorrupt
+ }
+ // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+ // the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ for end := d + length; d != end; d++ {
+ dst[d] = dst[d-offset]
+ }
+ }
+ if d != len(dst) {
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100755
index 0000000..8d393e9
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+ n := uint64(srcLen)
+ if n > 0xffffffff {
+ return -1
+ }
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ n = 32 + n + n/6
+ if n > 0xffffffff {
+ return -1
+ }
+ return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ ibuf: make([]byte, 0, maxBlockSize),
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+ w io.Writer
+ err error
+
+ // ibuf is a buffer for the incoming (uncompressed) bytes.
+ //
+ // Its use is optional. For backwards compatibility, Writers created by the
+ // NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+ // therefore do not need to be Flush'ed or Close'd.
+ ibuf []byte
+
+ // obuf is a buffer for the outgoing (compressed) bytes.
+ obuf []byte
+
+ // wroteStreamHeader is whether we have written the stream header.
+ wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ if w.ibuf != nil {
+ w.ibuf = w.ibuf[:0]
+ }
+ w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+ if w.ibuf == nil {
+ // Do not buffer incoming bytes. This does not perform or compress well
+ // if the caller of Writer.Write writes many small slices. This
+ // behavior is therefore deprecated, but still supported for backwards
+ // compatibility with code that doesn't explicitly Flush or Close.
+ return w.write(p)
+ }
+
+ // The remainder of this method is based on bufio.Writer.Write from the
+ // standard library.
+
+ for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+ var n int
+ if len(w.ibuf) == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, _ = w.write(p)
+ } else {
+ n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ w.Flush()
+ }
+ nRet += n
+ p = p[n:]
+ }
+ if w.err != nil {
+ return nRet, w.err
+ }
+ n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ nRet += n
+ return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for len(p) > 0 {
+ obufStart := len(magicChunk)
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ copy(w.obuf, magicChunk)
+ obufStart = 0
+ }
+
+ var uncompressed []byte
+ if len(p) > maxBlockSize {
+ uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkLen := 4 + len(compressed)
+ obufEnd := obufHeaderLen + len(compressed)
+ if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType = chunkTypeUncompressedData
+ chunkLen = 4 + len(uncompressed)
+ obufEnd = obufHeaderLen
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ w.obuf[len(magicChunk)+0] = chunkType
+ w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+ w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+ w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+ w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+ w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+ w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+ w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+ if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ if chunkType == chunkTypeUncompressedData {
+ if _, err := w.w.Write(uncompressed); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ }
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+ if w.err != nil {
+ return w.err
+ }
+ if len(w.ibuf) == 0 {
+ return nil
+ }
+ w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+ w.Flush()
+ ret := w.err
+ if w.err == nil {
+ w.err = errClosed
+ }
+ return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100755
index 0000000..150d91b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100755
index 0000000..adfd979
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX len(lit)
+// - BX n
+// - DX return value
+// - DI &dst[i]
+// - R10 &lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ lit_base+24(FP), R10
+ MOVQ lit_len+32(FP), AX
+ MOVQ AX, DX
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT oneByte
+ CMPL BX, $256
+ JLT twoBytes
+
+threeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ ADDQ $3, DX
+ JMP memmove
+
+twoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ ADDQ $2, DX
+ JMP memmove
+
+oneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+ ADDQ $1, DX
+
+memmove:
+ MOVQ DX, ret+48(FP)
+
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ CALL runtime·memmove(SB)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX length
+// - SI &dst[0]
+// - DI &dst[i]
+// - R11 offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+ MOVQ dst_base+0(FP), DI
+ MOVQ DI, SI
+ MOVQ offset+24(FP), R11
+ MOVQ length+32(FP), AX
+
+loop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT step1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP loop0
+
+step1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE step2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+step2:
+ // if length >= 12 || offset >= 2048 { goto step3 }
+ CMPL AX, $12
+ JGE step3
+ CMPL R11, $2048
+ JGE step3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+step3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+// - DX &src[0]
+// - SI &src[j]
+// - R13 &src[len(src) - 8]
+// - R14 &src[len(src)]
+// - R15 &src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+ MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), R14
+ MOVQ i+24(FP), R15
+ MOVQ j+32(FP), SI
+ ADDQ DX, R14
+ ADDQ DX, R15
+ ADDQ DX, SI
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+cmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA cmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE bsf
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP cmp8
+
+bsf:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+cmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE extendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE extendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP cmp1
+
+extendMatchEnd:
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+// - AX . .
+// - BX . .
+// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
+// - DX 64 &src[0], tableSize
+// - SI 72 &src[s]
+// - DI 80 &dst[d]
+// - R9 88 sLimit
+// - R10 . &src[nextEmit]
+// - R11 96 prevHash, currHash, nextHash, offset
+// - R12 104 &src[base], skip
+// - R13 . &src[nextS], &src[len(src) - 8]
+// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
+// - R15 112 candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ src_base+24(FP), SI
+ MOVQ src_len+32(FP), R14
+
+ // shift, tableSize := uint32(32-8), 1<<8
+ MOVQ $24, CX
+ MOVQ $256, DX
+
+calcShift:
+ // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ // shift--
+ // }
+ CMPQ DX, $16384
+ JGE varTable
+ CMPQ DX, R14
+ JGE varTable
+ SUBQ $1, CX
+ SHLQ $1, DX
+ JMP calcShift
+
+varTable:
+ // var table [maxTableSize]uint16
+ //
+ // In the asm code, unlike the Go code, we can zero-initialize only the
+ // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+ // writes 16 bytes, so we can do only tableSize/8 writes instead of the
+ // 2048 writes that would zero-initialize all of table's 32768 bytes.
+ SHRQ $3, DX
+ LEAQ table-32768(SP), BX
+ PXOR X0, X0
+
+memclr:
+ MOVOU X0, 0(BX)
+ ADDQ $16, BX
+ SUBQ $1, DX
+ JNZ memclr
+
+ // !!! DX = &src[0]
+ MOVQ SI, DX
+
+ // sLimit := len(src) - inputMargin
+ MOVQ R14, R9
+ SUBQ $15, R9
+
+ // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+ // change for the rest of the function.
+ MOVQ CX, 56(SP)
+ MOVQ DX, 64(SP)
+ MOVQ R9, 88(SP)
+
+ // nextEmit := 0
+ MOVQ DX, R10
+
+ // s := 1
+ ADDQ $1, SI
+
+ // nextHash := hash(load32(src, s), shift)
+ MOVL 0(SI), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+outer:
+ // for { etc }
+
+ // skip := 32
+ MOVQ $32, R12
+
+ // nextS := s
+ MOVQ SI, R13
+
+ // candidate := 0
+ MOVQ $0, R15
+
+inner0:
+ // for { etc }
+
+ // s := nextS
+ MOVQ R13, SI
+
+ // bytesBetweenHashLookups := skip >> 5
+ MOVQ R12, R14
+ SHRQ $5, R14
+
+ // nextS = s + bytesBetweenHashLookups
+ ADDQ R14, R13
+
+ // skip += bytesBetweenHashLookups
+ ADDQ R14, R12
+
+ // if nextS > sLimit { goto emitRemainder }
+ MOVQ R13, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JA emitRemainder
+
+ // candidate = int(table[nextHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[nextHash] = uint16(s)
+ MOVQ SI, AX
+ SUBQ DX, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // nextHash = hash(load32(src, nextS), shift)
+ MOVL 0(R13), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // if load32(src, s) != load32(src, candidate) { continue } break
+ MOVL 0(SI), AX
+ MOVL (DX)(R15*1), BX
+ CMPL AX, BX
+ JNE inner0
+
+fourByteMatch:
+ // As per the encode_other.go code:
+ //
+ // A 4-byte match has been found. We'll later see etc.
+
+ // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+ // on inputMargin in encode.go.
+ MOVQ SI, AX
+ SUBQ R10, AX
+ CMPQ AX, $16
+ JLE emitLiteralFastPath
+
+ // ----------------------------------------
+ // Begin inline of the emitLiteral call.
+ //
+ // d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT inlineEmitLiteralOneByte
+ CMPL BX, $256
+ JLT inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+ // Spill local variables (registers) onto the stack; call; unspill.
+ //
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
+ MOVQ SI, 72(SP)
+ MOVQ DI, 80(SP)
+ MOVQ R15, 112(SP)
+ CALL runtime·memmove(SB)
+ MOVQ 56(SP), CX
+ MOVQ 64(SP), DX
+ MOVQ 72(SP), SI
+ MOVQ 80(SP), DI
+ MOVQ 88(SP), R9
+ MOVQ 112(SP), R15
+ JMP inner1
+
+inlineEmitLiteralEnd:
+ // End inline of the emitLiteral call.
+ // ----------------------------------------
+
+emitLiteralFastPath:
+ // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+ MOVB AX, BX
+ SUBB $1, BX
+ SHLB $2, BX
+ MOVB BX, (DI)
+ ADDQ $1, DI
+
+ // !!! Implement the copy from lit to dst as a 16-byte load and store.
+ // (Encode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only len(lit) bytes, but that's
+ // OK. Subsequent iterations will fix up the overrun.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(R10), X0
+ MOVOU X0, 0(DI)
+ ADDQ AX, DI
+
+inner1:
+ // for { etc }
+
+ // base := s
+ MOVQ SI, R12
+
+ // !!! offset := base - candidate
+ MOVQ R12, R11
+ SUBQ R15, R11
+ SUBQ DX, R11
+
+ // ----------------------------------------
+ // Begin inline of the extendMatch call.
+ //
+ // s = extendMatch(src, candidate+4, s+4)
+
+ // !!! R14 = &src[len(src)]
+ MOVQ src_len+32(FP), R14
+ ADDQ DX, R14
+
+ // !!! R13 = &src[len(src) - 8]
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+ // !!! R15 = &src[candidate + 4]
+ ADDQ $4, R15
+ ADDQ DX, R15
+
+ // !!! s += 4
+ ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA inlineExtendMatchCmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE inlineExtendMatchBSF
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+ JMP inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE inlineExtendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE inlineExtendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+ // End inline of the extendMatch call.
+ // ----------------------------------------
+
+ // ----------------------------------------
+ // Begin inline of the emitCopy call.
+ //
+ // d += emitCopy(dst[d:], base-candidate, s-base)
+
+ // !!! length := s - base
+ MOVQ SI, AX
+ SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT inlineEmitCopyStep1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE inlineEmitCopyStep2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+inlineEmitCopyStep2:
+ // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+ CMPL AX, $12
+ JGE inlineEmitCopyStep3
+ CMPL R11, $2048
+ JGE inlineEmitCopyStep3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+ JMP inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+inlineEmitCopyEnd:
+ // End inline of the emitCopy call.
+ // ----------------------------------------
+
+ // nextEmit = s
+ MOVQ SI, R10
+
+ // if s >= sLimit { goto emitRemainder }
+ MOVQ SI, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JAE emitRemainder
+
+ // As per the encode_other.go code:
+ //
+ // We could immediately etc.
+
+ // x := load64(src, s-1)
+ MOVQ -1(SI), R14
+
+ // prevHash := hash(uint32(x>>0), shift)
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // table[prevHash] = uint16(s-1)
+ MOVQ SI, AX
+ SUBQ DX, AX
+ SUBQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // currHash := hash(uint32(x>>8), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // candidate = int(table[currHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[currHash] = uint16(s)
+ ADDQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // if uint32(x>>8) == load32(src, candidate) { continue }
+ MOVL (DX)(R15*1), BX
+ CMPL R14, BX
+ JEQ inner1
+
+ // nextHash = hash(uint32(x>>16), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // s++
+ ADDQ $1, SI
+
+ // break out of the inner1 for loop, i.e. continue the outer loop.
+ JMP outer
+
+emitRemainder:
+ // if nextEmit < len(src) { etc }
+ MOVQ src_len+32(FP), AX
+ ADDQ DX, AX
+ CMPQ R10, AX
+ JEQ encodeBlockEnd
+
+ // d += emitLiteral(dst[d:], src[nextEmit:])
+ //
+ // Push args.
+ MOVQ DI, 0(SP)
+ MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ R10, 24(SP)
+ SUBQ R10, AX
+ MOVQ AX, 32(SP)
+ MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
+
+ // Spill local variables (registers) onto the stack; call; unspill.
+ MOVQ DI, 80(SP)
+ CALL ·emitLiteral(SB)
+ MOVQ 80(SP), DI
+
+ // Finish the "d +=" part of "d += emitLiteral(etc)".
+ ADDQ 48(SP), DI
+
+encodeBlockEnd:
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, DI
+ MOVQ DI, d+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100755
index 0000000..dbcae90
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+ b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[0] = 60<<2 | tagLiteral
+ dst[1] = uint8(n)
+ i = 2
+ default:
+ dst[0] = 61<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ i = 3
+ }
+ return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= 65535
+// 4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+ i := 0
+ // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+ // threshold for this loop is a little higher (at 68 = 64 + 4), and the
+ // length emitted down below is is a little lower (at 60 = 64 - 4), because
+ // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+ // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+ // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+ // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+ // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+ // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+ for length >= 68 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[i+0] = 63<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 64
+ }
+ if length > 64 {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ dst[i+0] = 59<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 60
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ return i + 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ dst[i+1] = uint8(offset)
+ return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+// 0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+ for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+ }
+ return j
+}
+
+func hash(u, shift uint32) uint32 {
+ return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+ // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+ // The table element type is uint16, as s < sLimit and sLimit < len(src)
+ // and len(src) <= maxBlockSize and maxBlockSize == 65536.
+ const (
+ maxTableSize = 1 << 14
+ // tableMask is redundant, but helps the compiler eliminate bounds
+ // checks.
+ tableMask = maxTableSize - 1
+ )
+ shift := uint32(32 - 8)
+ for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ shift--
+ }
+ // In Go, all array elements are zero-initialized, so there is no advantage
+ // to a smaller tableSize per se. However, it matches the C++ algorithm,
+ // and in the asm versions of this code, we can get away with zeroing only
+ // the first tableSize elements.
+ var table [maxTableSize]uint16
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ nextHash := hash(load32(src, s), shift)
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := 32
+
+ nextS := s
+ candidate := 0
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = int(table[nextHash&tableMask])
+ table[nextHash&tableMask] = uint16(s)
+ nextHash = hash(load32(src, nextS), shift)
+ if load32(src, s) == load32(src, candidate) {
+ break
+ }
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+
+ // Extend the 4-byte match as long as possible.
+ //
+ // This is an inlined version of:
+ // s = extendMatch(src, candidate+4, s+4)
+ s += 4
+ for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+ }
+
+ d += emitCopy(dst[d:], base-candidate, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load64(src, s-1)
+ prevHash := hash(uint32(x>>0), shift)
+ table[prevHash&tableMask] = uint16(s - 1)
+ currHash := hash(uint32(x>>8), shift)
+ candidate = int(table[currHash&tableMask])
+ table[currHash&tableMask] = uint16(s)
+ if uint32(x>>8) != load32(src, candidate) {
+ nextHash = hash(uint32(x>>16), shift)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100755
index 0000000..ece692e
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snappy // import "github.com/golang/snappy"
+
+import (
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer issued by most
+ encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+
+ // maxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ maxBlockSize = 65536
+
+ // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+ // hard coded to be a const instead of a variable, so that obufLen can also
+ // be a const. Their equivalence is confirmed by
+ // TestMaxEncodedLenOfMaxBlockSize.
+ maxEncodedLenOfMaxBlockSize = 76490
+
+ obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+ obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/gomodule/redigo/LICENSE b/vendor/github.com/gomodule/redigo/LICENSE
new file mode 100755
index 0000000..67db858
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/LICENSE
@@ -0,0 +1,175 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/gomodule/redigo/redis/commandinfo.go b/vendor/github.com/gomodule/redigo/redis/commandinfo.go
new file mode 100755
index 0000000..b6df6a2
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/commandinfo.go
@@ -0,0 +1,55 @@
+// Copyright 2014 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "strings"
+)
+
+const (
+ connectionWatchState = 1 << iota
+ connectionMultiState
+ connectionSubscribeState
+ connectionMonitorState
+)
+
+type commandInfo struct {
+ // Set or Clear these states on connection.
+ Set, Clear int
+}
+
+var commandInfos = map[string]commandInfo{
+ "WATCH": {Set: connectionWatchState},
+ "UNWATCH": {Clear: connectionWatchState},
+ "MULTI": {Set: connectionMultiState},
+ "EXEC": {Clear: connectionWatchState | connectionMultiState},
+ "DISCARD": {Clear: connectionWatchState | connectionMultiState},
+ "PSUBSCRIBE": {Set: connectionSubscribeState},
+ "SUBSCRIBE": {Set: connectionSubscribeState},
+ "MONITOR": {Set: connectionMonitorState},
+}
+
+func init() {
+ for n, ci := range commandInfos {
+ commandInfos[strings.ToLower(n)] = ci
+ }
+}
+
+func lookupCommandInfo(commandName string) commandInfo {
+ if ci, ok := commandInfos[commandName]; ok {
+ return ci
+ }
+ return commandInfos[strings.ToUpper(commandName)]
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/conn.go b/vendor/github.com/gomodule/redigo/redis/conn.go
new file mode 100755
index 0000000..895d29e
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/conn.go
@@ -0,0 +1,700 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/url"
+ "regexp"
+ "strconv"
+ "sync"
+ "time"
+)
+
+var (
+ _ ConnWithTimeout = (*conn)(nil)
+)
+
+// conn is the low-level implementation of Conn
+type conn struct {
+ // Shared
+ mu sync.Mutex
+ pending int
+ err error
+ conn net.Conn
+
+ // Read
+ readTimeout time.Duration
+ br *bufio.Reader
+
+ // Write
+ writeTimeout time.Duration
+ bw *bufio.Writer
+
+ // Scratch space for formatting argument length.
+ // '*' or '$', length, "\r\n"
+ lenScratch [32]byte
+
+ // Scratch space for formatting integers and floats.
+ numScratch [40]byte
+}
+
+// DialTimeout acts like Dial but takes timeouts for establishing the
+// connection to the server, writing a command and reading a reply.
+//
+// Deprecated: Use Dial with options instead.
+func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) {
+ return Dial(network, address,
+ DialConnectTimeout(connectTimeout),
+ DialReadTimeout(readTimeout),
+ DialWriteTimeout(writeTimeout))
+}
+
+// DialOption specifies an option for dialing a Redis server.
+type DialOption struct {
+ f func(*dialOptions)
+}
+
+type dialOptions struct {
+ readTimeout time.Duration
+ writeTimeout time.Duration
+ dialer *net.Dialer
+ dial func(network, addr string) (net.Conn, error)
+ db int
+ password string
+ clientName string
+ useTLS bool
+ skipVerify bool
+ tlsConfig *tls.Config
+}
+
+// DialReadTimeout specifies the timeout for reading a single command reply.
+func DialReadTimeout(d time.Duration) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.readTimeout = d
+ }}
+}
+
+// DialWriteTimeout specifies the timeout for writing a single command.
+func DialWriteTimeout(d time.Duration) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.writeTimeout = d
+ }}
+}
+
+// DialConnectTimeout specifies the timeout for connecting to the Redis server when
+// no DialNetDial option is specified.
+func DialConnectTimeout(d time.Duration) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.dialer.Timeout = d
+ }}
+}
+
+// DialKeepAlive specifies the keep-alive period for TCP connections to the Redis server
+// when no DialNetDial option is specified.
+// If zero, keep-alives are not enabled. If no DialKeepAlive option is specified then
+// the default of 5 minutes is used to ensure that half-closed TCP sessions are detected.
+func DialKeepAlive(d time.Duration) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.dialer.KeepAlive = d
+ }}
+}
+
+// DialNetDial specifies a custom dial function for creating TCP
+// connections, otherwise a net.Dialer customized via the other options is used.
+// DialNetDial overrides DialConnectTimeout and DialKeepAlive.
+func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.dial = dial
+ }}
+}
+
+// DialDatabase specifies the database to select when dialing a connection.
+func DialDatabase(db int) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.db = db
+ }}
+}
+
+// DialPassword specifies the password to use when connecting to
+// the Redis server.
+func DialPassword(password string) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.password = password
+ }}
+}
+
+// DialClientName specifies a client name to be used
+// by the Redis server connection.
+func DialClientName(name string) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.clientName = name
+ }}
+}
+
+// DialTLSConfig specifies the config to use when a TLS connection is dialed.
+// Has no effect when not dialing a TLS connection.
+func DialTLSConfig(c *tls.Config) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.tlsConfig = c
+ }}
+}
+
+// DialTLSSkipVerify disables server name verification when connecting over
+// TLS. Has no effect when not dialing a TLS connection.
+func DialTLSSkipVerify(skip bool) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.skipVerify = skip
+ }}
+}
+
+// DialUseTLS specifies whether TLS should be used when connecting to the
+// server. This option is ignore by DialURL.
+func DialUseTLS(useTLS bool) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.useTLS = useTLS
+ }}
+}
+
+// Dial connects to the Redis server at the given network and
+// address using the specified options.
+func Dial(network, address string, options ...DialOption) (Conn, error) {
+ do := dialOptions{
+ dialer: &net.Dialer{
+ KeepAlive: time.Minute * 5,
+ },
+ }
+ for _, option := range options {
+ option.f(&do)
+ }
+ if do.dial == nil {
+ do.dial = do.dialer.Dial
+ }
+
+ netConn, err := do.dial(network, address)
+ if err != nil {
+ return nil, err
+ }
+
+ if do.useTLS {
+ var tlsConfig *tls.Config
+ if do.tlsConfig == nil {
+ tlsConfig = &tls.Config{InsecureSkipVerify: do.skipVerify}
+ } else {
+ tlsConfig = cloneTLSConfig(do.tlsConfig)
+ }
+ if tlsConfig.ServerName == "" {
+ host, _, err := net.SplitHostPort(address)
+ if err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ tlsConfig.ServerName = host
+ }
+
+ tlsConn := tls.Client(netConn, tlsConfig)
+ if err := tlsConn.Handshake(); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ netConn = tlsConn
+ }
+
+ c := &conn{
+ conn: netConn,
+ bw: bufio.NewWriter(netConn),
+ br: bufio.NewReader(netConn),
+ readTimeout: do.readTimeout,
+ writeTimeout: do.writeTimeout,
+ }
+
+ if do.password != "" {
+ if _, err := c.Do("AUTH", do.password); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ }
+
+ if do.clientName != "" {
+ if _, err := c.Do("CLIENT", "SETNAME", do.clientName); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ }
+
+ if do.db != 0 {
+ if _, err := c.Do("SELECT", do.db); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ }
+
+ return c, nil
+}
+
+var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`)
+
+// DialURL connects to a Redis server at the given URL using the Redis
+// URI scheme. URLs should follow the draft IANA specification for the
+// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis).
+func DialURL(rawurl string, options ...DialOption) (Conn, error) {
+ u, err := url.Parse(rawurl)
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Scheme != "redis" && u.Scheme != "rediss" {
+ return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme)
+ }
+
+ // As per the IANA draft spec, the host defaults to localhost and
+ // the port defaults to 6379.
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ // assume port is missing
+ host = u.Host
+ port = "6379"
+ }
+ if host == "" {
+ host = "localhost"
+ }
+ address := net.JoinHostPort(host, port)
+
+ if u.User != nil {
+ password, isSet := u.User.Password()
+ if isSet {
+ options = append(options, DialPassword(password))
+ }
+ }
+
+ match := pathDBRegexp.FindStringSubmatch(u.Path)
+ if len(match) == 2 {
+ db := 0
+ if len(match[1]) > 0 {
+ db, err = strconv.Atoi(match[1])
+ if err != nil {
+ return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
+ }
+ }
+ if db != 0 {
+ options = append(options, DialDatabase(db))
+ }
+ } else if u.Path != "" {
+ return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
+ }
+
+ options = append(options, DialUseTLS(u.Scheme == "rediss"))
+
+ return Dial("tcp", address, options...)
+}
+
+// NewConn returns a new Redigo connection for the given net connection.
+func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn {
+ return &conn{
+ conn: netConn,
+ bw: bufio.NewWriter(netConn),
+ br: bufio.NewReader(netConn),
+ readTimeout: readTimeout,
+ writeTimeout: writeTimeout,
+ }
+}
+
+func (c *conn) Close() error {
+ c.mu.Lock()
+ err := c.err
+ if c.err == nil {
+ c.err = errors.New("redigo: closed")
+ err = c.conn.Close()
+ }
+ c.mu.Unlock()
+ return err
+}
+
+func (c *conn) fatal(err error) error {
+ c.mu.Lock()
+ if c.err == nil {
+ c.err = err
+ // Close connection to force errors on subsequent calls and to unblock
+ // other reader or writer.
+ c.conn.Close()
+ }
+ c.mu.Unlock()
+ return err
+}
+
+func (c *conn) Err() error {
+ c.mu.Lock()
+ err := c.err
+ c.mu.Unlock()
+ return err
+}
+
+func (c *conn) writeLen(prefix byte, n int) error {
+ c.lenScratch[len(c.lenScratch)-1] = '\n'
+ c.lenScratch[len(c.lenScratch)-2] = '\r'
+ i := len(c.lenScratch) - 3
+ for {
+ c.lenScratch[i] = byte('0' + n%10)
+ i -= 1
+ n = n / 10
+ if n == 0 {
+ break
+ }
+ }
+ c.lenScratch[i] = prefix
+ _, err := c.bw.Write(c.lenScratch[i:])
+ return err
+}
+
+func (c *conn) writeString(s string) error {
+ c.writeLen('$', len(s))
+ c.bw.WriteString(s)
+ _, err := c.bw.WriteString("\r\n")
+ return err
+}
+
+func (c *conn) writeBytes(p []byte) error {
+ c.writeLen('$', len(p))
+ c.bw.Write(p)
+ _, err := c.bw.WriteString("\r\n")
+ return err
+}
+
+func (c *conn) writeInt64(n int64) error {
+ return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10))
+}
+
+func (c *conn) writeFloat64(n float64) error {
+ return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64))
+}
+
+func (c *conn) writeCommand(cmd string, args []interface{}) error {
+ c.writeLen('*', 1+len(args))
+ if err := c.writeString(cmd); err != nil {
+ return err
+ }
+ for _, arg := range args {
+ if err := c.writeArg(arg, true); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *conn) writeArg(arg interface{}, argumentTypeOK bool) (err error) {
+ switch arg := arg.(type) {
+ case string:
+ return c.writeString(arg)
+ case []byte:
+ return c.writeBytes(arg)
+ case int:
+ return c.writeInt64(int64(arg))
+ case int64:
+ return c.writeInt64(arg)
+ case float64:
+ return c.writeFloat64(arg)
+ case bool:
+ if arg {
+ return c.writeString("1")
+ } else {
+ return c.writeString("0")
+ }
+ case nil:
+ return c.writeString("")
+ case Argument:
+ if argumentTypeOK {
+ return c.writeArg(arg.RedisArg(), false)
+ }
+ // See comment in default clause below.
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, arg)
+ return c.writeBytes(buf.Bytes())
+ default:
+ // This default clause is intended to handle builtin numeric types.
+ // The function should return an error for other types, but this is not
+ // done for compatibility with previous versions of the package.
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, arg)
+ return c.writeBytes(buf.Bytes())
+ }
+}
+
+type protocolError string
+
+func (pe protocolError) Error() string {
+ return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe))
+}
+
+// readLine reads a line of input from the RESP stream.
+func (c *conn) readLine() ([]byte, error) {
+ // To avoid allocations, attempt to read the line using ReadSlice. This
+ // call typically succeeds. The known case where the call fails is when
+ // reading the output from the MONITOR command.
+ p, err := c.br.ReadSlice('\n')
+ if err == bufio.ErrBufferFull {
+ // The line does not fit in the bufio.Reader's buffer. Fall back to
+ // allocating a buffer for the line.
+ buf := append([]byte{}, p...)
+ for err == bufio.ErrBufferFull {
+ p, err = c.br.ReadSlice('\n')
+ buf = append(buf, p...)
+ }
+ p = buf
+ }
+ if err != nil {
+ return nil, err
+ }
+ i := len(p) - 2
+ if i < 0 || p[i] != '\r' {
+ return nil, protocolError("bad response line terminator")
+ }
+ return p[:i], nil
+}
+
+// parseLen parses bulk string and array lengths.
+func parseLen(p []byte) (int, error) {
+ if len(p) == 0 {
+ return -1, protocolError("malformed length")
+ }
+
+ if p[0] == '-' && len(p) == 2 && p[1] == '1' {
+ // handle $-1 and $-1 null replies.
+ return -1, nil
+ }
+
+ var n int
+ for _, b := range p {
+ n *= 10
+ if b < '0' || b > '9' {
+ return -1, protocolError("illegal bytes in length")
+ }
+ n += int(b - '0')
+ }
+
+ return n, nil
+}
+
+// parseInt parses an integer reply.
+func parseInt(p []byte) (interface{}, error) {
+ if len(p) == 0 {
+ return 0, protocolError("malformed integer")
+ }
+
+ var negate bool
+ if p[0] == '-' {
+ negate = true
+ p = p[1:]
+ if len(p) == 0 {
+ return 0, protocolError("malformed integer")
+ }
+ }
+
+ var n int64
+ for _, b := range p {
+ n *= 10
+ if b < '0' || b > '9' {
+ return 0, protocolError("illegal bytes in length")
+ }
+ n += int64(b - '0')
+ }
+
+ if negate {
+ n = -n
+ }
+ return n, nil
+}
+
+var (
+ okReply interface{} = "OK"
+ pongReply interface{} = "PONG"
+)
+
+func (c *conn) readReply() (interface{}, error) {
+ line, err := c.readLine()
+ if err != nil {
+ return nil, err
+ }
+ if len(line) == 0 {
+ return nil, protocolError("short response line")
+ }
+ switch line[0] {
+ case '+':
+ switch {
+ case len(line) == 3 && line[1] == 'O' && line[2] == 'K':
+ // Avoid allocation for frequent "+OK" response.
+ return okReply, nil
+ case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G':
+ // Avoid allocation in PING command benchmarks :)
+ return pongReply, nil
+ default:
+ return string(line[1:]), nil
+ }
+ case '-':
+ return Error(string(line[1:])), nil
+ case ':':
+ return parseInt(line[1:])
+ case '$':
+ n, err := parseLen(line[1:])
+ if n < 0 || err != nil {
+ return nil, err
+ }
+ p := make([]byte, n)
+ _, err = io.ReadFull(c.br, p)
+ if err != nil {
+ return nil, err
+ }
+ if line, err := c.readLine(); err != nil {
+ return nil, err
+ } else if len(line) != 0 {
+ return nil, protocolError("bad bulk string format")
+ }
+ return p, nil
+ case '*':
+ n, err := parseLen(line[1:])
+ if n < 0 || err != nil {
+ return nil, err
+ }
+ r := make([]interface{}, n)
+ for i := range r {
+ r[i], err = c.readReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return r, nil
+ }
+ return nil, protocolError("unexpected response line")
+}
+
+func (c *conn) Send(cmd string, args ...interface{}) error {
+ c.mu.Lock()
+ c.pending += 1
+ c.mu.Unlock()
+ if c.writeTimeout != 0 {
+ c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+ }
+ if err := c.writeCommand(cmd, args); err != nil {
+ return c.fatal(err)
+ }
+ return nil
+}
+
+func (c *conn) Flush() error {
+ if c.writeTimeout != 0 {
+ c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+ }
+ if err := c.bw.Flush(); err != nil {
+ return c.fatal(err)
+ }
+ return nil
+}
+
+func (c *conn) Receive() (interface{}, error) {
+ return c.ReceiveWithTimeout(c.readTimeout)
+}
+
+func (c *conn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) {
+ var deadline time.Time
+ if timeout != 0 {
+ deadline = time.Now().Add(timeout)
+ }
+ c.conn.SetReadDeadline(deadline)
+
+ if reply, err = c.readReply(); err != nil {
+ return nil, c.fatal(err)
+ }
+ // When using pub/sub, the number of receives can be greater than the
+ // number of sends. To enable normal use of the connection after
+ // unsubscribing from all channels, we do not decrement pending to a
+ // negative value.
+ //
+ // The pending field is decremented after the reply is read to handle the
+ // case where Receive is called before Send.
+ c.mu.Lock()
+ if c.pending > 0 {
+ c.pending -= 1
+ }
+ c.mu.Unlock()
+ if err, ok := reply.(Error); ok {
+ return nil, err
+ }
+ return
+}
+
+func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) {
+ return c.DoWithTimeout(c.readTimeout, cmd, args...)
+}
+
+func (c *conn) DoWithTimeout(readTimeout time.Duration, cmd string, args ...interface{}) (interface{}, error) {
+ c.mu.Lock()
+ pending := c.pending
+ c.pending = 0
+ c.mu.Unlock()
+
+ if cmd == "" && pending == 0 {
+ return nil, nil
+ }
+
+ if c.writeTimeout != 0 {
+ c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+ }
+
+ if cmd != "" {
+ if err := c.writeCommand(cmd, args); err != nil {
+ return nil, c.fatal(err)
+ }
+ }
+
+ if err := c.bw.Flush(); err != nil {
+ return nil, c.fatal(err)
+ }
+
+ var deadline time.Time
+ if readTimeout != 0 {
+ deadline = time.Now().Add(readTimeout)
+ }
+ c.conn.SetReadDeadline(deadline)
+
+ if cmd == "" {
+ reply := make([]interface{}, pending)
+ for i := range reply {
+ r, e := c.readReply()
+ if e != nil {
+ return nil, c.fatal(e)
+ }
+ reply[i] = r
+ }
+ return reply, nil
+ }
+
+ var err error
+ var reply interface{}
+ for i := 0; i <= pending; i++ {
+ var e error
+ if reply, e = c.readReply(); e != nil {
+ return nil, c.fatal(e)
+ }
+ if e, ok := reply.(Error); ok && err == nil {
+ err = e
+ }
+ }
+ return reply, err
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/doc.go b/vendor/github.com/gomodule/redigo/redis/doc.go
new file mode 100755
index 0000000..69ad506
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/doc.go
@@ -0,0 +1,177 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package redis is a client for the Redis database.
+//
+// The Redigo FAQ (https://github.com/gomodule/redigo/wiki/FAQ) contains more
+// documentation about this package.
+//
+// Connections
+//
+// The Conn interface is the primary interface for working with Redis.
+// Applications create connections by calling the Dial, DialWithTimeout or
+// NewConn functions. In the future, functions will be added for creating
+// sharded and other types of connections.
+//
+// The application must call the connection Close method when the application
+// is done with the connection.
+//
+// Executing Commands
+//
+// The Conn interface has a generic method for executing Redis commands:
+//
+// Do(commandName string, args ...interface{}) (reply interface{}, err error)
+//
+// The Redis command reference (http://redis.io/commands) lists the available
+// commands. An example of using the Redis APPEND command is:
+//
+// n, err := conn.Do("APPEND", "key", "value")
+//
+// The Do method converts command arguments to bulk strings for transmission
+// to the server as follows:
+//
+// Go Type Conversion
+// []byte Sent as is
+// string Sent as is
+// int, int64 strconv.FormatInt(v)
+// float64 strconv.FormatFloat(v, 'g', -1, 64)
+// bool true -> "1", false -> "0"
+// nil ""
+// all other types fmt.Fprint(w, v)
+//
+// Redis command reply types are represented using the following Go types:
+//
+// Redis type Go type
+// error redis.Error
+// integer int64
+// simple string string
+// bulk string []byte or nil if value not present.
+// array []interface{} or nil if value not present.
+//
+// Use type assertions or the reply helper functions to convert from
+// interface{} to the specific Go type for the command result.
+//
+// Pipelining
+//
+// Connections support pipelining using the Send, Flush and Receive methods.
+//
+// Send(commandName string, args ...interface{}) error
+// Flush() error
+// Receive() (reply interface{}, err error)
+//
+// Send writes the command to the connection's output buffer. Flush flushes the
+// connection's output buffer to the server. Receive reads a single reply from
+// the server. The following example shows a simple pipeline.
+//
+// c.Send("SET", "foo", "bar")
+// c.Send("GET", "foo")
+// c.Flush()
+// c.Receive() // reply from SET
+// v, err = c.Receive() // reply from GET
+//
+// The Do method combines the functionality of the Send, Flush and Receive
+// methods. The Do method starts by writing the command and flushing the output
+// buffer. Next, the Do method receives all pending replies including the reply
+// for the command just sent by Do. If any of the received replies is an error,
+// then Do returns the error. If there are no errors, then Do returns the last
+// reply. If the command argument to the Do method is "", then the Do method
+// will flush the output buffer and receive pending replies without sending a
+// command.
+//
+// Use the Send and Do methods to implement pipelined transactions.
+//
+// c.Send("MULTI")
+// c.Send("INCR", "foo")
+// c.Send("INCR", "bar")
+// r, err := c.Do("EXEC")
+// fmt.Println(r) // prints [1, 1]
+//
+// Concurrency
+//
+// Connections support one concurrent caller to the Receive method and one
+// concurrent caller to the Send and Flush methods. No other concurrency is
+// supported including concurrent calls to the Do and Close methods.
+//
+// For full concurrent access to Redis, use the thread-safe Pool to get, use
+// and release a connection from within a goroutine. Connections returned from
+// a Pool have the concurrency restrictions described in the previous
+// paragraph.
+//
+// Publish and Subscribe
+//
+// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers.
+//
+// c.Send("SUBSCRIBE", "example")
+// c.Flush()
+// for {
+// reply, err := c.Receive()
+// if err != nil {
+// return err
+// }
+// // process pushed message
+// }
+//
+// The PubSubConn type wraps a Conn with convenience methods for implementing
+// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods
+// send and flush a subscription management command. The receive method
+// converts a pushed message to convenient types for use in a type switch.
+//
+// psc := redis.PubSubConn{Conn: c}
+// psc.Subscribe("example")
+// for {
+// switch v := psc.Receive().(type) {
+// case redis.Message:
+// fmt.Printf("%s: message: %s\n", v.Channel, v.Data)
+// case redis.Subscription:
+// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count)
+// case error:
+// return v
+// }
+// }
+//
+// Reply Helpers
+//
+// The Bool, Int, Bytes, String, Strings and Values functions convert a reply
+// to a value of a specific type. To allow convenient wrapping of calls to the
+// connection Do and Receive methods, the functions take a second argument of
+// type error. If the error is non-nil, then the helper function returns the
+// error. If the error is nil, the function converts the reply to the specified
+// type:
+//
+// exists, err := redis.Bool(c.Do("EXISTS", "foo"))
+// if err != nil {
+// // handle error return from c.Do or type conversion error.
+// }
+//
+// The Scan function converts elements of a array reply to Go types:
+//
+// var value1 int
+// var value2 string
+// reply, err := redis.Values(c.Do("MGET", "key1", "key2"))
+// if err != nil {
+// // handle error
+// }
+// if _, err := redis.Scan(reply, &value1, &value2); err != nil {
+// // handle error
+// }
+//
+// Errors
+//
+// Connection methods return error replies from the server as type redis.Error.
+//
+// Call the connection Err() method to determine if the connection encountered
+// non-recoverable error such as a network error or protocol parsing error. If
+// Err() returns a non-nil value, then the connection is not usable and should
+// be closed.
+package redis
diff --git a/vendor/github.com/gomodule/redigo/redis/go17.go b/vendor/github.com/gomodule/redigo/redis/go17.go
new file mode 100755
index 0000000..5f36379
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/go17.go
@@ -0,0 +1,29 @@
+// +build go1.7,!go1.8
+
+package redis
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: cfg.Rand,
+ Time: cfg.Time,
+ Certificates: cfg.Certificates,
+ NameToCertificate: cfg.NameToCertificate,
+ GetCertificate: cfg.GetCertificate,
+ RootCAs: cfg.RootCAs,
+ NextProtos: cfg.NextProtos,
+ ServerName: cfg.ServerName,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ CipherSuites: cfg.CipherSuites,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ ClientSessionCache: cfg.ClientSessionCache,
+ MinVersion: cfg.MinVersion,
+ MaxVersion: cfg.MaxVersion,
+ CurvePreferences: cfg.CurvePreferences,
+ DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled,
+ Renegotiation: cfg.Renegotiation,
+ }
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/go18.go b/vendor/github.com/gomodule/redigo/redis/go18.go
new file mode 100755
index 0000000..558363b
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/go18.go
@@ -0,0 +1,9 @@
+// +build go1.8
+
+package redis
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ return cfg.Clone()
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/log.go b/vendor/github.com/gomodule/redigo/redis/log.go
new file mode 100755
index 0000000..a06db9d
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/log.go
@@ -0,0 +1,146 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "time"
+)
+
+var (
+ _ ConnWithTimeout = (*loggingConn)(nil)
+)
+
+// NewLoggingConn returns a logging wrapper around a connection.
+func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn {
+ if prefix != "" {
+ prefix = prefix + "."
+ }
+ return &loggingConn{conn, logger, prefix, nil}
+}
+
+//NewLoggingConnFilter returns a logging wrapper around a connection and a filter function.
+func NewLoggingConnFilter(conn Conn, logger *log.Logger, prefix string, skip func(cmdName string) bool) Conn {
+ if prefix != "" {
+ prefix = prefix + "."
+ }
+ return &loggingConn{conn, logger, prefix, skip}
+}
+
+type loggingConn struct {
+ Conn
+ logger *log.Logger
+ prefix string
+ skip func(cmdName string) bool
+}
+
+func (c *loggingConn) Close() error {
+ err := c.Conn.Close()
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err)
+ c.logger.Output(2, buf.String())
+ return err
+}
+
+func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) {
+ const chop = 32
+ switch v := v.(type) {
+ case []byte:
+ if len(v) > chop {
+ fmt.Fprintf(buf, "%q...", v[:chop])
+ } else {
+ fmt.Fprintf(buf, "%q", v)
+ }
+ case string:
+ if len(v) > chop {
+ fmt.Fprintf(buf, "%q...", v[:chop])
+ } else {
+ fmt.Fprintf(buf, "%q", v)
+ }
+ case []interface{}:
+ if len(v) == 0 {
+ buf.WriteString("[]")
+ } else {
+ sep := "["
+ fin := "]"
+ if len(v) > chop {
+ v = v[:chop]
+ fin = "...]"
+ }
+ for _, vv := range v {
+ buf.WriteString(sep)
+ c.printValue(buf, vv)
+ sep = ", "
+ }
+ buf.WriteString(fin)
+ }
+ default:
+ fmt.Fprint(buf, v)
+ }
+}
+
+func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) {
+ if c.skip != nil && c.skip(commandName) {
+ return
+ }
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "%s%s(", c.prefix, method)
+ if method != "Receive" {
+ buf.WriteString(commandName)
+ for _, arg := range args {
+ buf.WriteString(", ")
+ c.printValue(&buf, arg)
+ }
+ }
+ buf.WriteString(") -> (")
+ if method != "Send" {
+ c.printValue(&buf, reply)
+ buf.WriteString(", ")
+ }
+ fmt.Fprintf(&buf, "%v)", err)
+ c.logger.Output(3, buf.String())
+}
+
+func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) {
+ reply, err := c.Conn.Do(commandName, args...)
+ c.print("Do", commandName, args, reply, err)
+ return reply, err
+}
+
+func (c *loggingConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (interface{}, error) {
+ reply, err := DoWithTimeout(c.Conn, timeout, commandName, args...)
+ c.print("DoWithTimeout", commandName, args, reply, err)
+ return reply, err
+}
+
+func (c *loggingConn) Send(commandName string, args ...interface{}) error {
+ err := c.Conn.Send(commandName, args...)
+ c.print("Send", commandName, args, nil, err)
+ return err
+}
+
+func (c *loggingConn) Receive() (interface{}, error) {
+ reply, err := c.Conn.Receive()
+ c.print("Receive", "", nil, reply, err)
+ return reply, err
+}
+
+func (c *loggingConn) ReceiveWithTimeout(timeout time.Duration) (interface{}, error) {
+ reply, err := ReceiveWithTimeout(c.Conn, timeout)
+ c.print("ReceiveWithTimeout", "", nil, reply, err)
+ return reply, err
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/pool.go b/vendor/github.com/gomodule/redigo/redis/pool.go
new file mode 100755
index 0000000..568374a
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/pool.go
@@ -0,0 +1,621 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "crypto/sha1"
+ "errors"
+ "io"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+var (
+ _ ConnWithTimeout = (*activeConn)(nil)
+ _ ConnWithTimeout = (*errorConn)(nil)
+)
+
+var nowFunc = time.Now // for testing
+
+// ErrPoolExhausted is returned from a pool connection method (Do, Send,
+// Receive, Flush, Err) when the maximum number of database connections in the
+// pool has been reached.
+var ErrPoolExhausted = errors.New("redigo: connection pool exhausted")
+
+var (
+ errPoolClosed = errors.New("redigo: connection pool closed")
+ errConnClosed = errors.New("redigo: connection closed")
+)
+
+// Pool maintains a pool of connections. The application calls the Get method
+// to get a connection from the pool and the connection's Close method to
+// return the connection's resources to the pool.
+//
+// The following example shows how to use a pool in a web application. The
+// application creates a pool at application startup and makes it available to
+// request handlers using a package level variable. The pool configuration used
+// here is an example, not a recommendation.
+//
+// func newPool(addr string) *redis.Pool {
+// return &redis.Pool{
+// MaxIdle: 3,
+// IdleTimeout: 240 * time.Second,
+// // Dial or DialContext must be set. When both are set, DialContext takes precedence over Dial.
+// Dial: func () (redis.Conn, error) { return redis.Dial("tcp", addr) },
+// }
+// }
+//
+// var (
+// pool *redis.Pool
+// redisServer = flag.String("redisServer", ":6379", "")
+// )
+//
+// func main() {
+// flag.Parse()
+// pool = newPool(*redisServer)
+// ...
+// }
+//
+// A request handler gets a connection from the pool and closes the connection
+// when the handler is done:
+//
+// func serveHome(w http.ResponseWriter, r *http.Request) {
+// conn := pool.Get()
+// defer conn.Close()
+// ...
+// }
+//
+// Use the Dial function to authenticate connections with the AUTH command or
+// select a database with the SELECT command:
+//
+// pool := &redis.Pool{
+// // Other pool configuration not shown in this example.
+// Dial: func () (redis.Conn, error) {
+// c, err := redis.Dial("tcp", server)
+// if err != nil {
+// return nil, err
+// }
+// if _, err := c.Do("AUTH", password); err != nil {
+// c.Close()
+// return nil, err
+// }
+// if _, err := c.Do("SELECT", db); err != nil {
+// c.Close()
+// return nil, err
+// }
+// return c, nil
+// },
+// }
+//
+// Use the TestOnBorrow function to check the health of an idle connection
+// before the connection is returned to the application. This example PINGs
+// connections that have been idle more than a minute:
+//
+// pool := &redis.Pool{
+// // Other pool configuration not shown in this example.
+// TestOnBorrow: func(c redis.Conn, t time.Time) error {
+// if time.Since(t) < time.Minute {
+// return nil
+// }
+// _, err := c.Do("PING")
+// return err
+// },
+// }
+//
+type Pool struct {
+ // Dial is an application supplied function for creating and configuring a
+ // connection.
+ //
+ // The connection returned from Dial must not be in a special state
+ // (subscribed to pubsub channel, transaction started, ...).
+ Dial func() (Conn, error)
+
+ // DialContext is an application supplied function for creating and configuring a
+ // connection with the given context.
+ //
+ // The connection returned from Dial must not be in a special state
+ // (subscribed to pubsub channel, transaction started, ...).
+ DialContext func(ctx context.Context) (Conn, error)
+
+ // TestOnBorrow is an optional application supplied function for checking
+ // the health of an idle connection before the connection is used again by
+ // the application. Argument t is the time that the connection was returned
+ // to the pool. If the function returns an error, then the connection is
+ // closed.
+ TestOnBorrow func(c Conn, t time.Time) error
+
+ // Maximum number of idle connections in the pool.
+ MaxIdle int
+
+ // Maximum number of connections allocated by the pool at a given time.
+ // When zero, there is no limit on the number of connections in the pool.
+ MaxActive int
+
+ // Close connections after remaining idle for this duration. If the value
+ // is zero, then idle connections are not closed. Applications should set
+ // the timeout to a value less than the server's timeout.
+ IdleTimeout time.Duration
+
+ // If Wait is true and the pool is at the MaxActive limit, then Get() waits
+ // for a connection to be returned to the pool before returning.
+ Wait bool
+
+ // Close connections older than this duration. If the value is zero, then
+ // the pool does not close connections based on age.
+ MaxConnLifetime time.Duration
+
+ chInitialized uint32 // set to 1 when field ch is initialized
+
+ mu sync.Mutex // mu protects the following fields
+ closed bool // set to true when the pool is closed.
+ active int // the number of open connections in the pool
+ ch chan struct{} // limits open connections when p.Wait is true
+ idle idleList // idle connections
+ waitCount int64 // total number of connections waited for.
+ waitDuration time.Duration // total time waited for new connections.
+}
+
+// NewPool creates a new pool.
+//
+// Deprecated: Initialize the Pool directory as shown in the example.
+func NewPool(newFn func() (Conn, error), maxIdle int) *Pool {
+ return &Pool{Dial: newFn, MaxIdle: maxIdle}
+}
+
+// Get gets a connection. The application must close the returned connection.
+// This method always returns a valid connection so that applications can defer
+// error handling to the first use of the connection. If there is an error
+// getting an underlying connection, then the connection Err, Do, Send, Flush
+// and Receive methods return that error.
+func (p *Pool) Get() Conn {
+ pc, err := p.get(nil)
+ if err != nil {
+ return errorConn{err}
+ }
+ return &activeConn{p: p, pc: pc}
+}
+
+// GetContext gets a connection using the provided context.
+//
+// The provided Context must be non-nil. If the context expires before the
+// connection is complete, an error is returned. Any expiration on the context
+// will not affect the returned connection.
+//
+// If the function completes without error, then the application must close the
+// returned connection.
+func (p *Pool) GetContext(ctx context.Context) (Conn, error) {
+ pc, err := p.get(ctx)
+ if err != nil {
+ return errorConn{err}, err
+ }
+ return &activeConn{p: p, pc: pc}, nil
+}
+
+// PoolStats contains pool statistics.
+type PoolStats struct {
+ // ActiveCount is the number of connections in the pool. The count includes
+ // idle connections and connections in use.
+ ActiveCount int
+ // IdleCount is the number of idle connections in the pool.
+ IdleCount int
+
+ // WaitCount is the total number of connections waited for.
+ // This value is currently not guaranteed to be 100% accurate.
+ WaitCount int64
+
+ // WaitDuration is the total time blocked waiting for a new connection.
+ // This value is currently not guaranteed to be 100% accurate.
+ WaitDuration time.Duration
+}
+
+// Stats returns pool's statistics.
+func (p *Pool) Stats() PoolStats {
+ p.mu.Lock()
+ stats := PoolStats{
+ ActiveCount: p.active,
+ IdleCount: p.idle.count,
+ WaitCount: p.waitCount,
+ WaitDuration: p.waitDuration,
+ }
+ p.mu.Unlock()
+
+ return stats
+}
+
+// ActiveCount returns the number of connections in the pool. The count
+// includes idle connections and connections in use.
+func (p *Pool) ActiveCount() int {
+ p.mu.Lock()
+ active := p.active
+ p.mu.Unlock()
+ return active
+}
+
+// IdleCount returns the number of idle connections in the pool.
+func (p *Pool) IdleCount() int {
+ p.mu.Lock()
+ idle := p.idle.count
+ p.mu.Unlock()
+ return idle
+}
+
+// Close releases the resources used by the pool.
+func (p *Pool) Close() error {
+ p.mu.Lock()
+ if p.closed {
+ p.mu.Unlock()
+ return nil
+ }
+ p.closed = true
+ p.active -= p.idle.count
+ pc := p.idle.front
+ p.idle.count = 0
+ p.idle.front, p.idle.back = nil, nil
+ if p.ch != nil {
+ close(p.ch)
+ }
+ p.mu.Unlock()
+ for ; pc != nil; pc = pc.next {
+ pc.c.Close()
+ }
+ return nil
+}
+
+func (p *Pool) lazyInit() {
+ // Fast path.
+ if atomic.LoadUint32(&p.chInitialized) == 1 {
+ return
+ }
+ // Slow path.
+ p.mu.Lock()
+ if p.chInitialized == 0 {
+ p.ch = make(chan struct{}, p.MaxActive)
+ if p.closed {
+ close(p.ch)
+ } else {
+ for i := 0; i < p.MaxActive; i++ {
+ p.ch <- struct{}{}
+ }
+ }
+ atomic.StoreUint32(&p.chInitialized, 1)
+ }
+ p.mu.Unlock()
+}
+
+// get prunes stale connections and returns a connection from the idle list or
+// creates a new connection.
+func (p *Pool) get(ctx context.Context) (*poolConn, error) {
+
+ // Handle limit for p.Wait == true.
+ var waited time.Duration
+ if p.Wait && p.MaxActive > 0 {
+ p.lazyInit()
+
+ // wait indicates if we believe it will block so its not 100% accurate
+ // however for stats it should be good enough.
+ wait := len(p.ch) == 0
+ var start time.Time
+ if wait {
+ start = time.Now()
+ }
+ if ctx == nil {
+ <-p.ch
+ } else {
+ select {
+ case <-p.ch:
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ }
+ if wait {
+ waited = time.Since(start)
+ }
+ }
+
+ p.mu.Lock()
+
+ if waited > 0 {
+ p.waitCount++
+ p.waitDuration += waited
+ }
+
+ // Prune stale connections at the back of the idle list.
+ if p.IdleTimeout > 0 {
+ n := p.idle.count
+ for i := 0; i < n && p.idle.back != nil && p.idle.back.t.Add(p.IdleTimeout).Before(nowFunc()); i++ {
+ pc := p.idle.back
+ p.idle.popBack()
+ p.mu.Unlock()
+ pc.c.Close()
+ p.mu.Lock()
+ p.active--
+ }
+ }
+
+ // Get idle connection from the front of idle list.
+ for p.idle.front != nil {
+ pc := p.idle.front
+ p.idle.popFront()
+ p.mu.Unlock()
+ if (p.TestOnBorrow == nil || p.TestOnBorrow(pc.c, pc.t) == nil) &&
+ (p.MaxConnLifetime == 0 || nowFunc().Sub(pc.created) < p.MaxConnLifetime) {
+ return pc, nil
+ }
+ pc.c.Close()
+ p.mu.Lock()
+ p.active--
+ }
+
+ // Check for pool closed before dialing a new connection.
+ if p.closed {
+ p.mu.Unlock()
+ return nil, errors.New("redigo: get on closed pool")
+ }
+
+ // Handle limit for p.Wait == false.
+ if !p.Wait && p.MaxActive > 0 && p.active >= p.MaxActive {
+ p.mu.Unlock()
+ return nil, ErrPoolExhausted
+ }
+
+ p.active++
+ p.mu.Unlock()
+ c, err := p.dial(ctx)
+ if err != nil {
+ c = nil
+ p.mu.Lock()
+ p.active--
+ if p.ch != nil && !p.closed {
+ p.ch <- struct{}{}
+ }
+ p.mu.Unlock()
+ }
+ return &poolConn{c: c, created: nowFunc()}, err
+}
+
+func (p *Pool) dial(ctx context.Context) (Conn, error) {
+ if p.DialContext != nil {
+ return p.DialContext(ctx)
+ }
+ if p.Dial != nil {
+ return p.Dial()
+ }
+ return nil, errors.New("redigo: must pass Dial or DialContext to pool")
+}
+
+func (p *Pool) put(pc *poolConn, forceClose bool) error {
+ p.mu.Lock()
+ if !p.closed && !forceClose {
+ pc.t = nowFunc()
+ p.idle.pushFront(pc)
+ if p.idle.count > p.MaxIdle {
+ pc = p.idle.back
+ p.idle.popBack()
+ } else {
+ pc = nil
+ }
+ }
+
+ if pc != nil {
+ p.mu.Unlock()
+ pc.c.Close()
+ p.mu.Lock()
+ p.active--
+ }
+
+ if p.ch != nil && !p.closed {
+ p.ch <- struct{}{}
+ }
+ p.mu.Unlock()
+ return nil
+}
+
+type activeConn struct {
+ p *Pool
+ pc *poolConn
+ state int
+}
+
+var (
+ sentinel []byte
+ sentinelOnce sync.Once
+)
+
+func initSentinel() {
+ p := make([]byte, 64)
+ if _, err := rand.Read(p); err == nil {
+ sentinel = p
+ } else {
+ h := sha1.New()
+ io.WriteString(h, "Oops, rand failed. Use time instead.")
+ io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10))
+ sentinel = h.Sum(nil)
+ }
+}
+
+func (ac *activeConn) Close() error {
+ pc := ac.pc
+ if pc == nil {
+ return nil
+ }
+ ac.pc = nil
+
+ if ac.state&connectionMultiState != 0 {
+ pc.c.Send("DISCARD")
+ ac.state &^= (connectionMultiState | connectionWatchState)
+ } else if ac.state&connectionWatchState != 0 {
+ pc.c.Send("UNWATCH")
+ ac.state &^= connectionWatchState
+ }
+ if ac.state&connectionSubscribeState != 0 {
+ pc.c.Send("UNSUBSCRIBE")
+ pc.c.Send("PUNSUBSCRIBE")
+ // To detect the end of the message stream, ask the server to echo
+ // a sentinel value and read until we see that value.
+ sentinelOnce.Do(initSentinel)
+ pc.c.Send("ECHO", sentinel)
+ pc.c.Flush()
+ for {
+ p, err := pc.c.Receive()
+ if err != nil {
+ break
+ }
+ if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) {
+ ac.state &^= connectionSubscribeState
+ break
+ }
+ }
+ }
+ pc.c.Do("")
+ ac.p.put(pc, ac.state != 0 || pc.c.Err() != nil)
+ return nil
+}
+
+func (ac *activeConn) Err() error {
+ pc := ac.pc
+ if pc == nil {
+ return errConnClosed
+ }
+ return pc.c.Err()
+}
+
+func (ac *activeConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) {
+ pc := ac.pc
+ if pc == nil {
+ return nil, errConnClosed
+ }
+ ci := lookupCommandInfo(commandName)
+ ac.state = (ac.state | ci.Set) &^ ci.Clear
+ return pc.c.Do(commandName, args...)
+}
+
+func (ac *activeConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) {
+ pc := ac.pc
+ if pc == nil {
+ return nil, errConnClosed
+ }
+ cwt, ok := pc.c.(ConnWithTimeout)
+ if !ok {
+ return nil, errTimeoutNotSupported
+ }
+ ci := lookupCommandInfo(commandName)
+ ac.state = (ac.state | ci.Set) &^ ci.Clear
+ return cwt.DoWithTimeout(timeout, commandName, args...)
+}
+
+func (ac *activeConn) Send(commandName string, args ...interface{}) error {
+ pc := ac.pc
+ if pc == nil {
+ return errConnClosed
+ }
+ ci := lookupCommandInfo(commandName)
+ ac.state = (ac.state | ci.Set) &^ ci.Clear
+ return pc.c.Send(commandName, args...)
+}
+
+func (ac *activeConn) Flush() error {
+ pc := ac.pc
+ if pc == nil {
+ return errConnClosed
+ }
+ return pc.c.Flush()
+}
+
+func (ac *activeConn) Receive() (reply interface{}, err error) {
+ pc := ac.pc
+ if pc == nil {
+ return nil, errConnClosed
+ }
+ return pc.c.Receive()
+}
+
+func (ac *activeConn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) {
+ pc := ac.pc
+ if pc == nil {
+ return nil, errConnClosed
+ }
+ cwt, ok := pc.c.(ConnWithTimeout)
+ if !ok {
+ return nil, errTimeoutNotSupported
+ }
+ return cwt.ReceiveWithTimeout(timeout)
+}
+
+type errorConn struct{ err error }
+
+func (ec errorConn) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err }
+func (ec errorConn) DoWithTimeout(time.Duration, string, ...interface{}) (interface{}, error) {
+ return nil, ec.err
+}
+func (ec errorConn) Send(string, ...interface{}) error { return ec.err }
+func (ec errorConn) Err() error { return ec.err }
+func (ec errorConn) Close() error { return nil }
+func (ec errorConn) Flush() error { return ec.err }
+func (ec errorConn) Receive() (interface{}, error) { return nil, ec.err }
+func (ec errorConn) ReceiveWithTimeout(time.Duration) (interface{}, error) { return nil, ec.err }
+
+type idleList struct {
+ count int
+ front, back *poolConn
+}
+
+type poolConn struct {
+ c Conn
+ t time.Time
+ created time.Time
+ next, prev *poolConn
+}
+
+func (l *idleList) pushFront(pc *poolConn) {
+ pc.next = l.front
+ pc.prev = nil
+ if l.count == 0 {
+ l.back = pc
+ } else {
+ l.front.prev = pc
+ }
+ l.front = pc
+ l.count++
+ return
+}
+
+func (l *idleList) popFront() {
+ pc := l.front
+ l.count--
+ if l.count == 0 {
+ l.front, l.back = nil, nil
+ } else {
+ pc.next.prev = nil
+ l.front = pc.next
+ }
+ pc.next, pc.prev = nil, nil
+}
+
+func (l *idleList) popBack() {
+ pc := l.back
+ l.count--
+ if l.count == 0 {
+ l.front, l.back = nil, nil
+ } else {
+ pc.prev.next = nil
+ l.back = pc.prev
+ }
+ pc.next, pc.prev = nil, nil
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/pubsub.go b/vendor/github.com/gomodule/redigo/redis/pubsub.go
new file mode 100755
index 0000000..2da6021
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/pubsub.go
@@ -0,0 +1,148 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "time"
+)
+
+// Subscription represents a subscribe or unsubscribe notification.
+type Subscription struct {
+ // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe"
+ Kind string
+
+ // The channel that was changed.
+ Channel string
+
+ // The current number of subscriptions for connection.
+ Count int
+}
+
+// Message represents a message notification.
+type Message struct {
+ // The originating channel.
+ Channel string
+
+ // The matched pattern, if any
+ Pattern string
+
+ // The message data.
+ Data []byte
+}
+
+// Pong represents a pubsub pong notification.
+type Pong struct {
+ Data string
+}
+
+// PubSubConn wraps a Conn with convenience methods for subscribers.
+type PubSubConn struct {
+ Conn Conn
+}
+
+// Close closes the connection.
+func (c PubSubConn) Close() error {
+ return c.Conn.Close()
+}
+
+// Subscribe subscribes the connection to the specified channels.
+func (c PubSubConn) Subscribe(channel ...interface{}) error {
+ c.Conn.Send("SUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// PSubscribe subscribes the connection to the given patterns.
+func (c PubSubConn) PSubscribe(channel ...interface{}) error {
+ c.Conn.Send("PSUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// Unsubscribe unsubscribes the connection from the given channels, or from all
+// of them if none is given.
+func (c PubSubConn) Unsubscribe(channel ...interface{}) error {
+ c.Conn.Send("UNSUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// PUnsubscribe unsubscribes the connection from the given patterns, or from all
+// of them if none is given.
+func (c PubSubConn) PUnsubscribe(channel ...interface{}) error {
+ c.Conn.Send("PUNSUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// Ping sends a PING to the server with the specified data.
+//
+// The connection must be subscribed to at least one channel or pattern when
+// calling this method.
+func (c PubSubConn) Ping(data string) error {
+ c.Conn.Send("PING", data)
+ return c.Conn.Flush()
+}
+
+// Receive returns a pushed message as a Subscription, Message, Pong or error.
+// The return value is intended to be used directly in a type switch as
+// illustrated in the PubSubConn example.
+func (c PubSubConn) Receive() interface{} {
+ return c.receiveInternal(c.Conn.Receive())
+}
+
+// ReceiveWithTimeout is like Receive, but it allows the application to
+// override the connection's default timeout.
+func (c PubSubConn) ReceiveWithTimeout(timeout time.Duration) interface{} {
+ return c.receiveInternal(ReceiveWithTimeout(c.Conn, timeout))
+}
+
+func (c PubSubConn) receiveInternal(replyArg interface{}, errArg error) interface{} {
+ reply, err := Values(replyArg, errArg)
+ if err != nil {
+ return err
+ }
+
+ var kind string
+ reply, err = Scan(reply, &kind)
+ if err != nil {
+ return err
+ }
+
+ switch kind {
+ case "message":
+ var m Message
+ if _, err := Scan(reply, &m.Channel, &m.Data); err != nil {
+ return err
+ }
+ return m
+ case "pmessage":
+ var m Message
+ if _, err := Scan(reply, &m.Pattern, &m.Channel, &m.Data); err != nil {
+ return err
+ }
+ return m
+ case "subscribe", "psubscribe", "unsubscribe", "punsubscribe":
+ s := Subscription{Kind: kind}
+ if _, err := Scan(reply, &s.Channel, &s.Count); err != nil {
+ return err
+ }
+ return s
+ case "pong":
+ var p Pong
+ if _, err := Scan(reply, &p.Data); err != nil {
+ return err
+ }
+ return p
+ }
+ return errors.New("redigo: unknown pubsub notification")
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/redis.go b/vendor/github.com/gomodule/redigo/redis/redis.go
new file mode 100755
index 0000000..141fa4a
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/redis.go
@@ -0,0 +1,117 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "time"
+)
+
+// Error represents an error returned in a command reply.
+type Error string
+
+func (err Error) Error() string { return string(err) }
+
+// Conn represents a connection to a Redis server.
+type Conn interface {
+ // Close closes the connection.
+ Close() error
+
+ // Err returns a non-nil value when the connection is not usable.
+ Err() error
+
+ // Do sends a command to the server and returns the received reply.
+ Do(commandName string, args ...interface{}) (reply interface{}, err error)
+
+ // Send writes the command to the client's output buffer.
+ Send(commandName string, args ...interface{}) error
+
+ // Flush flushes the output buffer to the Redis server.
+ Flush() error
+
+ // Receive receives a single reply from the Redis server
+ Receive() (reply interface{}, err error)
+}
+
+// Argument is the interface implemented by an object which wants to control how
+// the object is converted to Redis bulk strings.
+type Argument interface {
+ // RedisArg returns a value to be encoded as a bulk string per the
+ // conversions listed in the section 'Executing Commands'.
+ // Implementations should typically return a []byte or string.
+ RedisArg() interface{}
+}
+
+// Scanner is implemented by an object which wants to control its value is
+// interpreted when read from Redis.
+type Scanner interface {
+ // RedisScan assigns a value from a Redis value. The argument src is one of
+ // the reply types listed in the section `Executing Commands`.
+ //
+ // An error should be returned if the value cannot be stored without
+ // loss of information.
+ RedisScan(src interface{}) error
+}
+
+// ConnWithTimeout is an optional interface that allows the caller to override
+// a connection's default read timeout. This interface is useful for executing
+// the BLPOP, BRPOP, BRPOPLPUSH, XREAD and other commands that block at the
+// server.
+//
+// A connection's default read timeout is set with the DialReadTimeout dial
+// option. Applications should rely on the default timeout for commands that do
+// not block at the server.
+//
+// All of the Conn implementations in this package satisfy the ConnWithTimeout
+// interface.
+//
+// Use the DoWithTimeout and ReceiveWithTimeout helper functions to simplify
+// use of this interface.
+type ConnWithTimeout interface {
+ Conn
+
+ // Do sends a command to the server and returns the received reply.
+ // The timeout overrides the read timeout set when dialing the
+ // connection.
+ DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error)
+
+ // Receive receives a single reply from the Redis server. The timeout
+ // overrides the read timeout set when dialing the connection.
+ ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error)
+}
+
+var errTimeoutNotSupported = errors.New("redis: connection does not support ConnWithTimeout")
+
+// DoWithTimeout executes a Redis command with the specified read timeout. If
+// the connection does not satisfy the ConnWithTimeout interface, then an error
+// is returned.
+func DoWithTimeout(c Conn, timeout time.Duration, cmd string, args ...interface{}) (interface{}, error) {
+ cwt, ok := c.(ConnWithTimeout)
+ if !ok {
+ return nil, errTimeoutNotSupported
+ }
+ return cwt.DoWithTimeout(timeout, cmd, args...)
+}
+
+// ReceiveWithTimeout receives a reply with the specified read timeout. If the
+// connection does not satisfy the ConnWithTimeout interface, then an error is
+// returned.
+func ReceiveWithTimeout(c Conn, timeout time.Duration) (interface{}, error) {
+ cwt, ok := c.(ConnWithTimeout)
+ if !ok {
+ return nil, errTimeoutNotSupported
+ }
+ return cwt.ReceiveWithTimeout(timeout)
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/reply.go b/vendor/github.com/gomodule/redigo/redis/reply.go
new file mode 100755
index 0000000..c2b3b2b
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/reply.go
@@ -0,0 +1,479 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+)
+
+// ErrNil indicates that a reply value is nil.
+var ErrNil = errors.New("redigo: nil returned")
+
+// Int is a helper that converts a command reply to an integer. If err is not
+// equal to nil, then Int returns 0, err. Otherwise, Int converts the
+// reply to an int as follows:
+//
+// Reply type Result
+// integer int(reply), nil
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Int(reply interface{}, err error) (int, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ x := int(reply)
+ if int64(x) != reply {
+ return 0, strconv.ErrRange
+ }
+ return x, nil
+ case []byte:
+ n, err := strconv.ParseInt(string(reply), 10, 0)
+ return int(n), err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply)
+}
+
+// Int64 is a helper that converts a command reply to 64 bit integer. If err is
+// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
+// reply to an int64 as follows:
+//
+// Reply type Result
+// integer reply, nil
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Int64(reply interface{}, err error) (int64, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ return reply, nil
+ case []byte:
+ n, err := strconv.ParseInt(string(reply), 10, 64)
+ return n, err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply)
+}
+
+var errNegativeInt = errors.New("redigo: unexpected value for Uint64")
+
+// Uint64 is a helper that converts a command reply to 64 bit integer. If err is
+// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
+// reply to an int64 as follows:
+//
+// Reply type Result
+// integer reply, nil
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Uint64(reply interface{}, err error) (uint64, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ if reply < 0 {
+ return 0, errNegativeInt
+ }
+ return uint64(reply), nil
+ case []byte:
+ n, err := strconv.ParseUint(string(reply), 10, 64)
+ return n, err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply)
+}
+
+// Float64 is a helper that converts a command reply to 64 bit float. If err is
+// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts
+// the reply to an int as follows:
+//
+// Reply type Result
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Float64(reply interface{}, err error) (float64, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case []byte:
+ n, err := strconv.ParseFloat(string(reply), 64)
+ return n, err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply)
+}
+
+// String is a helper that converts a command reply to a string. If err is not
+// equal to nil, then String returns "", err. Otherwise String converts the
+// reply to a string as follows:
+//
+// Reply type Result
+// bulk string string(reply), nil
+// simple string reply, nil
+// nil "", ErrNil
+// other "", error
+func String(reply interface{}, err error) (string, error) {
+ if err != nil {
+ return "", err
+ }
+ switch reply := reply.(type) {
+ case []byte:
+ return string(reply), nil
+ case string:
+ return reply, nil
+ case nil:
+ return "", ErrNil
+ case Error:
+ return "", reply
+ }
+ return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply)
+}
+
+// Bytes is a helper that converts a command reply to a slice of bytes. If err
+// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts
+// the reply to a slice of bytes as follows:
+//
+// Reply type Result
+// bulk string reply, nil
+// simple string []byte(reply), nil
+// nil nil, ErrNil
+// other nil, error
+func Bytes(reply interface{}, err error) ([]byte, error) {
+ if err != nil {
+ return nil, err
+ }
+ switch reply := reply.(type) {
+ case []byte:
+ return reply, nil
+ case string:
+ return []byte(reply), nil
+ case nil:
+ return nil, ErrNil
+ case Error:
+ return nil, reply
+ }
+ return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply)
+}
+
+// Bool is a helper that converts a command reply to a boolean. If err is not
+// equal to nil, then Bool returns false, err. Otherwise Bool converts the
+// reply to boolean as follows:
+//
+// Reply type Result
+// integer value != 0, nil
+// bulk string strconv.ParseBool(reply)
+// nil false, ErrNil
+// other false, error
+func Bool(reply interface{}, err error) (bool, error) {
+ if err != nil {
+ return false, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ return reply != 0, nil
+ case []byte:
+ return strconv.ParseBool(string(reply))
+ case nil:
+ return false, ErrNil
+ case Error:
+ return false, reply
+ }
+ return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply)
+}
+
+// MultiBulk is a helper that converts an array command reply to a []interface{}.
+//
+// Deprecated: Use Values instead.
+func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) }
+
+// Values is a helper that converts an array command reply to a []interface{}.
+// If err is not equal to nil, then Values returns nil, err. Otherwise, Values
+// converts the reply as follows:
+//
+// Reply type Result
+// array reply, nil
+// nil nil, ErrNil
+// other nil, error
+func Values(reply interface{}, err error) ([]interface{}, error) {
+ if err != nil {
+ return nil, err
+ }
+ switch reply := reply.(type) {
+ case []interface{}:
+ return reply, nil
+ case nil:
+ return nil, ErrNil
+ case Error:
+ return nil, reply
+ }
+ return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply)
+}
+
+func sliceHelper(reply interface{}, err error, name string, makeSlice func(int), assign func(int, interface{}) error) error {
+ if err != nil {
+ return err
+ }
+ switch reply := reply.(type) {
+ case []interface{}:
+ makeSlice(len(reply))
+ for i := range reply {
+ if reply[i] == nil {
+ continue
+ }
+ if err := assign(i, reply[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+ case nil:
+ return ErrNil
+ case Error:
+ return reply
+ }
+ return fmt.Errorf("redigo: unexpected type for %s, got type %T", name, reply)
+}
+
+// Float64s is a helper that converts an array command reply to a []float64. If
+// err is not equal to nil, then Float64s returns nil, err. Nil array items are
+// converted to 0 in the output slice. Floats64 returns an error if an array
+// item is not a bulk string or nil.
+func Float64s(reply interface{}, err error) ([]float64, error) {
+ var result []float64
+ err = sliceHelper(reply, err, "Float64s", func(n int) { result = make([]float64, n) }, func(i int, v interface{}) error {
+ p, ok := v.([]byte)
+ if !ok {
+ return fmt.Errorf("redigo: unexpected element type for Floats64, got type %T", v)
+ }
+ f, err := strconv.ParseFloat(string(p), 64)
+ result[i] = f
+ return err
+ })
+ return result, err
+}
+
+// Strings is a helper that converts an array command reply to a []string. If
+// err is not equal to nil, then Strings returns nil, err. Nil array items are
+// converted to "" in the output slice. Strings returns an error if an array
+// item is not a bulk string or nil.
+func Strings(reply interface{}, err error) ([]string, error) {
+ var result []string
+ err = sliceHelper(reply, err, "Strings", func(n int) { result = make([]string, n) }, func(i int, v interface{}) error {
+ switch v := v.(type) {
+ case string:
+ result[i] = v
+ return nil
+ case []byte:
+ result[i] = string(v)
+ return nil
+ default:
+ return fmt.Errorf("redigo: unexpected element type for Strings, got type %T", v)
+ }
+ })
+ return result, err
+}
+
+// ByteSlices is a helper that converts an array command reply to a [][]byte.
+// If err is not equal to nil, then ByteSlices returns nil, err. Nil array
+// items are stay nil. ByteSlices returns an error if an array item is not a
+// bulk string or nil.
+func ByteSlices(reply interface{}, err error) ([][]byte, error) {
+ var result [][]byte
+ err = sliceHelper(reply, err, "ByteSlices", func(n int) { result = make([][]byte, n) }, func(i int, v interface{}) error {
+ p, ok := v.([]byte)
+ if !ok {
+ return fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", v)
+ }
+ result[i] = p
+ return nil
+ })
+ return result, err
+}
+
+// Int64s is a helper that converts an array command reply to a []int64.
+// If err is not equal to nil, then Int64s returns nil, err. Nil array
+// items are stay nil. Int64s returns an error if an array item is not a
+// bulk string or nil.
+func Int64s(reply interface{}, err error) ([]int64, error) {
+ var result []int64
+ err = sliceHelper(reply, err, "Int64s", func(n int) { result = make([]int64, n) }, func(i int, v interface{}) error {
+ switch v := v.(type) {
+ case int64:
+ result[i] = v
+ return nil
+ case []byte:
+ n, err := strconv.ParseInt(string(v), 10, 64)
+ result[i] = n
+ return err
+ default:
+ return fmt.Errorf("redigo: unexpected element type for Int64s, got type %T", v)
+ }
+ })
+ return result, err
+}
+
+// Ints is a helper that converts an array command reply to a []in.
+// If err is not equal to nil, then Ints returns nil, err. Nil array
+// items are stay nil. Ints returns an error if an array item is not a
+// bulk string or nil.
+func Ints(reply interface{}, err error) ([]int, error) {
+ var result []int
+ err = sliceHelper(reply, err, "Ints", func(n int) { result = make([]int, n) }, func(i int, v interface{}) error {
+ switch v := v.(type) {
+ case int64:
+ n := int(v)
+ if int64(n) != v {
+ return strconv.ErrRange
+ }
+ result[i] = n
+ return nil
+ case []byte:
+ n, err := strconv.Atoi(string(v))
+ result[i] = n
+ return err
+ default:
+ return fmt.Errorf("redigo: unexpected element type for Ints, got type %T", v)
+ }
+ })
+ return result, err
+}
+
+// StringMap is a helper that converts an array of strings (alternating key, value)
+// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format.
+// Requires an even number of values in result.
+func StringMap(result interface{}, err error) (map[string]string, error) {
+ values, err := Values(result, err)
+ if err != nil {
+ return nil, err
+ }
+ if len(values)%2 != 0 {
+ return nil, errors.New("redigo: StringMap expects even number of values result")
+ }
+ m := make(map[string]string, len(values)/2)
+ for i := 0; i < len(values); i += 2 {
+ key, okKey := values[i].([]byte)
+ value, okValue := values[i+1].([]byte)
+ if !okKey || !okValue {
+ return nil, errors.New("redigo: StringMap key not a bulk string value")
+ }
+ m[string(key)] = string(value)
+ }
+ return m, nil
+}
+
+// IntMap is a helper that converts an array of strings (alternating key, value)
+// into a map[string]int. The HGETALL commands return replies in this format.
+// Requires an even number of values in result.
+func IntMap(result interface{}, err error) (map[string]int, error) {
+ values, err := Values(result, err)
+ if err != nil {
+ return nil, err
+ }
+ if len(values)%2 != 0 {
+ return nil, errors.New("redigo: IntMap expects even number of values result")
+ }
+ m := make(map[string]int, len(values)/2)
+ for i := 0; i < len(values); i += 2 {
+ key, ok := values[i].([]byte)
+ if !ok {
+ return nil, errors.New("redigo: IntMap key not a bulk string value")
+ }
+ value, err := Int(values[i+1], nil)
+ if err != nil {
+ return nil, err
+ }
+ m[string(key)] = value
+ }
+ return m, nil
+}
+
+// Int64Map is a helper that converts an array of strings (alternating key, value)
+// into a map[string]int64. The HGETALL commands return replies in this format.
+// Requires an even number of values in result.
+func Int64Map(result interface{}, err error) (map[string]int64, error) {
+ values, err := Values(result, err)
+ if err != nil {
+ return nil, err
+ }
+ if len(values)%2 != 0 {
+ return nil, errors.New("redigo: Int64Map expects even number of values result")
+ }
+ m := make(map[string]int64, len(values)/2)
+ for i := 0; i < len(values); i += 2 {
+ key, ok := values[i].([]byte)
+ if !ok {
+ return nil, errors.New("redigo: Int64Map key not a bulk string value")
+ }
+ value, err := Int64(values[i+1], nil)
+ if err != nil {
+ return nil, err
+ }
+ m[string(key)] = value
+ }
+ return m, nil
+}
+
+// Positions is a helper that converts an array of positions (lat, long)
+// into a [][2]float64. The GEOPOS command returns replies in this format.
+func Positions(result interface{}, err error) ([]*[2]float64, error) {
+ values, err := Values(result, err)
+ if err != nil {
+ return nil, err
+ }
+ positions := make([]*[2]float64, len(values))
+ for i := range values {
+ if values[i] == nil {
+ continue
+ }
+ p, ok := values[i].([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("redigo: unexpected element type for interface slice, got type %T", values[i])
+ }
+ if len(p) != 2 {
+ return nil, fmt.Errorf("redigo: unexpected number of values for a member position, got %d", len(p))
+ }
+ lat, err := Float64(p[0], nil)
+ if err != nil {
+ return nil, err
+ }
+ long, err := Float64(p[1], nil)
+ if err != nil {
+ return nil, err
+ }
+ positions[i] = &[2]float64{lat, long}
+ }
+ return positions, nil
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/scan.go b/vendor/github.com/gomodule/redigo/redis/scan.go
new file mode 100755
index 0000000..5227aac
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/scan.go
@@ -0,0 +1,630 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+func ensureLen(d reflect.Value, n int) {
+ if n > d.Cap() {
+ d.Set(reflect.MakeSlice(d.Type(), n, n))
+ } else {
+ d.SetLen(n)
+ }
+}
+
+func cannotConvert(d reflect.Value, s interface{}) error {
+ var sname string
+ switch s.(type) {
+ case string:
+ sname = "Redis simple string"
+ case Error:
+ sname = "Redis error"
+ case int64:
+ sname = "Redis integer"
+ case []byte:
+ sname = "Redis bulk string"
+ case []interface{}:
+ sname = "Redis array"
+ case nil:
+ sname = "Redis nil"
+ default:
+ sname = reflect.TypeOf(s).String()
+ }
+ return fmt.Errorf("cannot convert from %s to %s", sname, d.Type())
+}
+
+func convertAssignNil(d reflect.Value) (err error) {
+ switch d.Type().Kind() {
+ case reflect.Slice, reflect.Interface:
+ d.Set(reflect.Zero(d.Type()))
+ default:
+ err = cannotConvert(d, nil)
+ }
+ return err
+}
+
+func convertAssignError(d reflect.Value, s Error) (err error) {
+ if d.Kind() == reflect.String {
+ d.SetString(string(s))
+ } else if d.Kind() == reflect.Slice && d.Type().Elem().Kind() == reflect.Uint8 {
+ d.SetBytes([]byte(s))
+ } else {
+ err = cannotConvert(d, s)
+ }
+ return
+}
+
+func convertAssignString(d reflect.Value, s string) (err error) {
+ switch d.Type().Kind() {
+ case reflect.Float32, reflect.Float64:
+ var x float64
+ x, err = strconv.ParseFloat(s, d.Type().Bits())
+ d.SetFloat(x)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ var x int64
+ x, err = strconv.ParseInt(s, 10, d.Type().Bits())
+ d.SetInt(x)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ var x uint64
+ x, err = strconv.ParseUint(s, 10, d.Type().Bits())
+ d.SetUint(x)
+ case reflect.Bool:
+ var x bool
+ x, err = strconv.ParseBool(s)
+ d.SetBool(x)
+ case reflect.String:
+ d.SetString(s)
+ case reflect.Slice:
+ if d.Type().Elem().Kind() == reflect.Uint8 {
+ d.SetBytes([]byte(s))
+ } else {
+ err = cannotConvert(d, s)
+ }
+ default:
+ err = cannotConvert(d, s)
+ }
+ return
+}
+
+func convertAssignBulkString(d reflect.Value, s []byte) (err error) {
+ switch d.Type().Kind() {
+ case reflect.Slice:
+ // Handle []byte destination here to avoid unnecessary
+ // []byte -> string -> []byte converion.
+ if d.Type().Elem().Kind() == reflect.Uint8 {
+ d.SetBytes(s)
+ } else {
+ err = cannotConvert(d, s)
+ }
+ default:
+ err = convertAssignString(d, string(s))
+ }
+ return err
+}
+
+func convertAssignInt(d reflect.Value, s int64) (err error) {
+ switch d.Type().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ d.SetInt(s)
+ if d.Int() != s {
+ err = strconv.ErrRange
+ d.SetInt(0)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if s < 0 {
+ err = strconv.ErrRange
+ } else {
+ x := uint64(s)
+ d.SetUint(x)
+ if d.Uint() != x {
+ err = strconv.ErrRange
+ d.SetUint(0)
+ }
+ }
+ case reflect.Bool:
+ d.SetBool(s != 0)
+ default:
+ err = cannotConvert(d, s)
+ }
+ return
+}
+
+func convertAssignValue(d reflect.Value, s interface{}) (err error) {
+ if d.Kind() != reflect.Ptr {
+ if d.CanAddr() {
+ d2 := d.Addr()
+ if d2.CanInterface() {
+ if scanner, ok := d2.Interface().(Scanner); ok {
+ return scanner.RedisScan(s)
+ }
+ }
+ }
+ } else if d.CanInterface() {
+ // Already a reflect.Ptr
+ if d.IsNil() {
+ d.Set(reflect.New(d.Type().Elem()))
+ }
+ if scanner, ok := d.Interface().(Scanner); ok {
+ return scanner.RedisScan(s)
+ }
+ }
+
+ switch s := s.(type) {
+ case nil:
+ err = convertAssignNil(d)
+ case []byte:
+ err = convertAssignBulkString(d, s)
+ case int64:
+ err = convertAssignInt(d, s)
+ case string:
+ err = convertAssignString(d, s)
+ case Error:
+ err = convertAssignError(d, s)
+ default:
+ err = cannotConvert(d, s)
+ }
+ return err
+}
+
+func convertAssignArray(d reflect.Value, s []interface{}) error {
+ if d.Type().Kind() != reflect.Slice {
+ return cannotConvert(d, s)
+ }
+ ensureLen(d, len(s))
+ for i := 0; i < len(s); i++ {
+ if err := convertAssignValue(d.Index(i), s[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func convertAssign(d interface{}, s interface{}) (err error) {
+ if scanner, ok := d.(Scanner); ok {
+ return scanner.RedisScan(s)
+ }
+
+ // Handle the most common destination types using type switches and
+ // fall back to reflection for all other types.
+ switch s := s.(type) {
+ case nil:
+ // ignore
+ case []byte:
+ switch d := d.(type) {
+ case *string:
+ *d = string(s)
+ case *int:
+ *d, err = strconv.Atoi(string(s))
+ case *bool:
+ *d, err = strconv.ParseBool(string(s))
+ case *[]byte:
+ *d = s
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+ err = cannotConvert(d, s)
+ } else {
+ err = convertAssignBulkString(d.Elem(), s)
+ }
+ }
+ case int64:
+ switch d := d.(type) {
+ case *int:
+ x := int(s)
+ if int64(x) != s {
+ err = strconv.ErrRange
+ x = 0
+ }
+ *d = x
+ case *bool:
+ *d = s != 0
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+ err = cannotConvert(d, s)
+ } else {
+ err = convertAssignInt(d.Elem(), s)
+ }
+ }
+ case string:
+ switch d := d.(type) {
+ case *string:
+ *d = s
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ err = cannotConvert(reflect.ValueOf(d), s)
+ }
+ case []interface{}:
+ switch d := d.(type) {
+ case *[]interface{}:
+ *d = s
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+ err = cannotConvert(d, s)
+ } else {
+ err = convertAssignArray(d.Elem(), s)
+ }
+ }
+ case Error:
+ err = s
+ default:
+ err = cannotConvert(reflect.ValueOf(d), s)
+ }
+ return
+}
+
+// Scan copies from src to the values pointed at by dest.
+//
+// Scan uses RedisScan if available otherwise:
+//
+// The values pointed at by dest must be an integer, float, boolean, string,
+// []byte, interface{} or slices of these types. Scan uses the standard strconv
+// package to convert bulk strings to numeric and boolean types.
+//
+// If a dest value is nil, then the corresponding src value is skipped.
+//
+// If a src element is nil, then the corresponding dest value is not modified.
+//
+// To enable easy use of Scan in a loop, Scan returns the slice of src
+// following the copied values.
+func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) {
+ if len(src) < len(dest) {
+ return nil, errors.New("redigo.Scan: array short")
+ }
+ var err error
+ for i, d := range dest {
+ err = convertAssign(d, src[i])
+ if err != nil {
+ err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err)
+ break
+ }
+ }
+ return src[len(dest):], err
+}
+
+type fieldSpec struct {
+ name string
+ index []int
+ omitEmpty bool
+}
+
+type structSpec struct {
+ m map[string]*fieldSpec
+ l []*fieldSpec
+}
+
+func (ss *structSpec) fieldSpec(name []byte) *fieldSpec {
+ return ss.m[string(name)]
+}
+
+func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) {
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ switch {
+ case f.PkgPath != "" && !f.Anonymous:
+ // Ignore unexported fields.
+ case f.Anonymous:
+ // TODO: Handle pointers. Requires change to decoder and
+ // protection against infinite recursion.
+ if f.Type.Kind() == reflect.Struct {
+ compileStructSpec(f.Type, depth, append(index, i), ss)
+ }
+ default:
+ fs := &fieldSpec{name: f.Name}
+ tag := f.Tag.Get("redis")
+ p := strings.Split(tag, ",")
+ if len(p) > 0 {
+ if p[0] == "-" {
+ continue
+ }
+ if len(p[0]) > 0 {
+ fs.name = p[0]
+ }
+ for _, s := range p[1:] {
+ switch s {
+ case "omitempty":
+ fs.omitEmpty = true
+ default:
+ panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name()))
+ }
+ }
+ }
+ d, found := depth[fs.name]
+ if !found {
+ d = 1 << 30
+ }
+ switch {
+ case len(index) == d:
+ // At same depth, remove from result.
+ delete(ss.m, fs.name)
+ j := 0
+ for i := 0; i < len(ss.l); i++ {
+ if fs.name != ss.l[i].name {
+ ss.l[j] = ss.l[i]
+ j += 1
+ }
+ }
+ ss.l = ss.l[:j]
+ case len(index) < d:
+ fs.index = make([]int, len(index)+1)
+ copy(fs.index, index)
+ fs.index[len(index)] = i
+ depth[fs.name] = len(index)
+ ss.m[fs.name] = fs
+ ss.l = append(ss.l, fs)
+ }
+ }
+ }
+}
+
+var (
+ structSpecMutex sync.RWMutex
+ structSpecCache = make(map[reflect.Type]*structSpec)
+ defaultFieldSpec = &fieldSpec{}
+)
+
+func structSpecForType(t reflect.Type) *structSpec {
+
+ structSpecMutex.RLock()
+ ss, found := structSpecCache[t]
+ structSpecMutex.RUnlock()
+ if found {
+ return ss
+ }
+
+ structSpecMutex.Lock()
+ defer structSpecMutex.Unlock()
+ ss, found = structSpecCache[t]
+ if found {
+ return ss
+ }
+
+ ss = &structSpec{m: make(map[string]*fieldSpec)}
+ compileStructSpec(t, make(map[string]int), nil, ss)
+ structSpecCache[t] = ss
+ return ss
+}
+
+var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct")
+
+// ScanStruct scans alternating names and values from src to a struct. The
+// HGETALL and CONFIG GET commands return replies in this format.
+//
+// ScanStruct uses exported field names to match values in the response. Use
+// 'redis' field tag to override the name:
+//
+// Field int `redis:"myName"`
+//
+// Fields with the tag redis:"-" are ignored.
+//
+// Each field uses RedisScan if available otherwise:
+// Integer, float, boolean, string and []byte fields are supported. Scan uses the
+// standard strconv package to convert bulk string values to numeric and
+// boolean types.
+//
+// If a src element is nil, then the corresponding field is not modified.
+func ScanStruct(src []interface{}, dest interface{}) error {
+ d := reflect.ValueOf(dest)
+ if d.Kind() != reflect.Ptr || d.IsNil() {
+ return errScanStructValue
+ }
+ d = d.Elem()
+ if d.Kind() != reflect.Struct {
+ return errScanStructValue
+ }
+ ss := structSpecForType(d.Type())
+
+ if len(src)%2 != 0 {
+ return errors.New("redigo.ScanStruct: number of values not a multiple of 2")
+ }
+
+ for i := 0; i < len(src); i += 2 {
+ s := src[i+1]
+ if s == nil {
+ continue
+ }
+ name, ok := src[i].([]byte)
+ if !ok {
+ return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i)
+ }
+ fs := ss.fieldSpec(name)
+ if fs == nil {
+ continue
+ }
+ if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
+ return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err)
+ }
+ }
+ return nil
+}
+
+var (
+ errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct")
+)
+
+// ScanSlice scans src to the slice pointed to by dest. The elements the dest
+// slice must be integer, float, boolean, string, struct or pointer to struct
+// values.
+//
+// Struct fields must be integer, float, boolean or string values. All struct
+// fields are used unless a subset is specified using fieldNames.
+func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error {
+ d := reflect.ValueOf(dest)
+ if d.Kind() != reflect.Ptr || d.IsNil() {
+ return errScanSliceValue
+ }
+ d = d.Elem()
+ if d.Kind() != reflect.Slice {
+ return errScanSliceValue
+ }
+
+ isPtr := false
+ t := d.Type().Elem()
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
+ isPtr = true
+ t = t.Elem()
+ }
+
+ if t.Kind() != reflect.Struct {
+ ensureLen(d, len(src))
+ for i, s := range src {
+ if s == nil {
+ continue
+ }
+ if err := convertAssignValue(d.Index(i), s); err != nil {
+ return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err)
+ }
+ }
+ return nil
+ }
+
+ ss := structSpecForType(t)
+ fss := ss.l
+ if len(fieldNames) > 0 {
+ fss = make([]*fieldSpec, len(fieldNames))
+ for i, name := range fieldNames {
+ fss[i] = ss.m[name]
+ if fss[i] == nil {
+ return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name)
+ }
+ }
+ }
+
+ if len(fss) == 0 {
+ return errors.New("redigo.ScanSlice: no struct fields")
+ }
+
+ n := len(src) / len(fss)
+ if n*len(fss) != len(src) {
+ return errors.New("redigo.ScanSlice: length not a multiple of struct field count")
+ }
+
+ ensureLen(d, n)
+ for i := 0; i < n; i++ {
+ d := d.Index(i)
+ if isPtr {
+ if d.IsNil() {
+ d.Set(reflect.New(t))
+ }
+ d = d.Elem()
+ }
+ for j, fs := range fss {
+ s := src[i*len(fss)+j]
+ if s == nil {
+ continue
+ }
+ if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
+ return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err)
+ }
+ }
+ }
+ return nil
+}
+
+// Args is a helper for constructing command arguments from structured values.
+type Args []interface{}
+
+// Add returns the result of appending value to args.
+func (args Args) Add(value ...interface{}) Args {
+ return append(args, value...)
+}
+
+// AddFlat returns the result of appending the flattened value of v to args.
+//
+// Maps are flattened by appending the alternating keys and map values to args.
+//
+// Slices are flattened by appending the slice elements to args.
+//
+// Structs are flattened by appending the alternating names and values of
+// exported fields to args. If v is a nil struct pointer, then nothing is
+// appended. The 'redis' field tag overrides struct field names. See ScanStruct
+// for more information on the use of the 'redis' field tag.
+//
+// Other types are appended to args as is.
+func (args Args) AddFlat(v interface{}) Args {
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Struct:
+ args = flattenStruct(args, rv)
+ case reflect.Slice:
+ for i := 0; i < rv.Len(); i++ {
+ args = append(args, rv.Index(i).Interface())
+ }
+ case reflect.Map:
+ for _, k := range rv.MapKeys() {
+ args = append(args, k.Interface(), rv.MapIndex(k).Interface())
+ }
+ case reflect.Ptr:
+ if rv.Type().Elem().Kind() == reflect.Struct {
+ if !rv.IsNil() {
+ args = flattenStruct(args, rv.Elem())
+ }
+ } else {
+ args = append(args, v)
+ }
+ default:
+ args = append(args, v)
+ }
+ return args
+}
+
+func flattenStruct(args Args, v reflect.Value) Args {
+ ss := structSpecForType(v.Type())
+ for _, fs := range ss.l {
+ fv := v.FieldByIndex(fs.index)
+ if fs.omitEmpty {
+ var empty = false
+ switch fv.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ empty = fv.Len() == 0
+ case reflect.Bool:
+ empty = !fv.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ empty = fv.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ empty = fv.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ empty = fv.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ empty = fv.IsNil()
+ }
+ if empty {
+ continue
+ }
+ }
+ args = append(args, fs.name, fv.Interface())
+ }
+ return args
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/script.go b/vendor/github.com/gomodule/redigo/redis/script.go
new file mode 100755
index 0000000..0ef1c82
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/script.go
@@ -0,0 +1,91 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+ "strings"
+)
+
+// Script encapsulates the source, hash and key count for a Lua script. See
+// http://redis.io/commands/eval for information on scripts in Redis.
+type Script struct {
+ keyCount int
+ src string
+ hash string
+}
+
+// NewScript returns a new script object. If keyCount is greater than or equal
+// to zero, then the count is automatically inserted in the EVAL command
+// argument list. If keyCount is less than zero, then the application supplies
+// the count as the first value in the keysAndArgs argument to the Do, Send and
+// SendHash methods.
+func NewScript(keyCount int, src string) *Script {
+ h := sha1.New()
+ io.WriteString(h, src)
+ return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))}
+}
+
+func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} {
+ var args []interface{}
+ if s.keyCount < 0 {
+ args = make([]interface{}, 1+len(keysAndArgs))
+ args[0] = spec
+ copy(args[1:], keysAndArgs)
+ } else {
+ args = make([]interface{}, 2+len(keysAndArgs))
+ args[0] = spec
+ args[1] = s.keyCount
+ copy(args[2:], keysAndArgs)
+ }
+ return args
+}
+
+// Hash returns the script hash.
+func (s *Script) Hash() string {
+ return s.hash
+}
+
+// Do evaluates the script. Under the covers, Do optimistically evaluates the
+// script using the EVALSHA command. If the command fails because the script is
+// not loaded, then Do evaluates the script using the EVAL command (thus
+// causing the script to load).
+func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) {
+ v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...)
+ if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") {
+ v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...)
+ }
+ return v, err
+}
+
+// SendHash evaluates the script without waiting for the reply. The script is
+// evaluated with the EVALSHA command. The application must ensure that the
+// script is loaded by a previous call to Send, Do or Load methods.
+func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error {
+ return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...)
+}
+
+// Send evaluates the script without waiting for the reply.
+func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error {
+ return c.Send("EVAL", s.args(s.src, keysAndArgs)...)
+}
+
+// Load loads the script without evaluating it.
+func (s *Script) Load(c Conn) error {
+ _, err := c.Do("SCRIPT", "LOAD", s.src)
+ return err
+}
diff --git a/vendor/github.com/gorilla/feeds/AUTHORS b/vendor/github.com/gorilla/feeds/AUTHORS
old mode 100644
new mode 100755
diff --git a/vendor/github.com/gorilla/feeds/LICENSE b/vendor/github.com/gorilla/feeds/LICENSE
old mode 100644
new mode 100755
diff --git a/vendor/github.com/gorilla/feeds/README.md b/vendor/github.com/gorilla/feeds/README.md
old mode 100644
new mode 100755
index 4d733cf..f8eee6d
--- a/vendor/github.com/gorilla/feeds/README.md
+++ b/vendor/github.com/gorilla/feeds/README.md
@@ -1,6 +1,6 @@
## gorilla/feeds
[](https://godoc.org/github.com/gorilla/feeds)
-[](https://travis-ci.org/gorilla/feeds)
+[](https://circleci.com/gh/gorilla/feeds)
feeds is a web feed generator library for generating RSS, Atom and JSON feeds from Go
applications.
diff --git a/vendor/github.com/gorilla/feeds/atom.go b/vendor/github.com/gorilla/feeds/atom.go
old mode 100644
new mode 100755
index 5f483fe..7196f47
--- a/vendor/github.com/gorilla/feeds/atom.go
+++ b/vendor/github.com/gorilla/feeds/atom.go
@@ -80,7 +80,7 @@ type AtomFeed struct {
Link *AtomLink
Author *AtomAuthor `xml:"author,omitempty"`
Contributor *AtomContributor
- Entries []*AtomEntry
+ Entries []*AtomEntry `xml:"entry"`
}
type Atom struct {
@@ -158,12 +158,12 @@ func (a *Atom) AtomFeed() *AtomFeed {
return feed
}
-// return an XML-Ready object for an Atom object
+// FeedXml returns an XML-Ready object for an Atom object
func (a *Atom) FeedXml() interface{} {
return a.AtomFeed()
}
-// return an XML-ready object for an AtomFeed object
+// FeedXml returns an XML-ready object for an AtomFeed object
func (a *AtomFeed) FeedXml() interface{} {
return a
}
diff --git a/vendor/github.com/gorilla/feeds/doc.go b/vendor/github.com/gorilla/feeds/doc.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/gorilla/feeds/feed.go b/vendor/github.com/gorilla/feeds/feed.go
old mode 100644
new mode 100755
index 3474289..790a1b6
--- a/vendor/github.com/gorilla/feeds/feed.go
+++ b/vendor/github.com/gorilla/feeds/feed.go
@@ -85,7 +85,7 @@ func ToXML(feed XmlFeed) (string, error) {
return s, nil
}
-// Write a feed object (either a Feed, AtomFeed, or RssFeed) as XML into
+// WriteXML writes a feed object (either a Feed, AtomFeed, or RssFeed) as XML into
// the writer. Returns an error if XML marshaling fails.
func WriteXML(feed XmlFeed, w io.Writer) error {
x := feed.FeedXml()
@@ -104,7 +104,7 @@ func (f *Feed) ToAtom() (string, error) {
return ToXML(a)
}
-// Writes an Atom representation of this feed to the writer.
+// WriteAtom writes an Atom representation of this feed to the writer.
func (f *Feed) WriteAtom(w io.Writer) error {
return WriteXML(&Atom{f}, w)
}
@@ -115,7 +115,7 @@ func (f *Feed) ToRss() (string, error) {
return ToXML(r)
}
-// Writes an RSS representation of this feed to the writer.
+// WriteRss writes an RSS representation of this feed to the writer.
func (f *Feed) WriteRss(w io.Writer) error {
return WriteXML(&Rss{f}, w)
}
diff --git a/vendor/github.com/gorilla/feeds/json.go b/vendor/github.com/gorilla/feeds/json.go
old mode 100644
new mode 100755
index 75a82fd..0302c22
--- a/vendor/github.com/gorilla/feeds/json.go
+++ b/vendor/github.com/gorilla/feeds/json.go
@@ -103,7 +103,7 @@ type JSONFeed struct {
Favicon string `json:"favicon,omitempty"`
Author *JSONAuthor `json:"author,omitempty"`
Expired *bool `json:"expired,omitempty"`
- Hubs []*JSONItem `json:"hubs,omitempty"`
+ Hubs []*JSONHub `json:"hubs,omitempty"`
Items []*JSONItem `json:"items,omitempty"`
}
diff --git a/vendor/github.com/gorilla/feeds/rss.go b/vendor/github.com/gorilla/feeds/rss.go
old mode 100644
new mode 100755
index 39fe84b..09179df
--- a/vendor/github.com/gorilla/feeds/rss.go
+++ b/vendor/github.com/gorilla/feeds/rss.go
@@ -11,7 +11,7 @@ import (
)
// private wrapper around the RssFeed which gives us the .. xml
-type rssFeedXml struct {
+type RssFeedXml struct {
XMLName xml.Name `xml:"rss"`
Version string `xml:"version,attr"`
ContentNamespace string `xml:"xmlns:content,attr"`
@@ -61,7 +61,7 @@ type RssFeed struct {
SkipDays string `xml:"skipDays,omitempty"`
Image *RssImage
TextInput *RssTextInput
- Items []*RssItem
+ Items []*RssItem `xml:"item"`
}
type RssItem struct {
@@ -151,16 +151,16 @@ func (r *Rss) RssFeed() *RssFeed {
return channel
}
-// return an XML-Ready object for an Rss object
+// FeedXml returns an XML-Ready object for an Rss object
func (r *Rss) FeedXml() interface{} {
// only generate version 2.0 feeds for now
return r.RssFeed().FeedXml()
}
-// return an XML-ready object for an RssFeed object
+// FeedXml returns an XML-ready object for an RssFeed object
func (r *RssFeed) FeedXml() interface{} {
- return &rssFeedXml{
+ return &RssFeedXml{
Version: "2.0",
Channel: r,
ContentNamespace: "http://purl.org/rss/1.0/modules/content/",
diff --git a/vendor/github.com/gorilla/feeds/test.atom b/vendor/github.com/gorilla/feeds/test.atom
new file mode 100755
index 0000000..aa15214
--- /dev/null
+++ b/vendor/github.com/gorilla/feeds/test.atom
@@ -0,0 +1,92 @@
+
+
+
+
+ http://example.com/
+ RSS for Node
+ Tue, 30 Oct 2018 23:22:37 GMT
+
+ Tue, 30 Oct 2018 23:22:00 GMT
+
+ 60
+
+
+
+ http://example.com/test/1540941720
+ http://example.com/test/1540941720
+
+ Tue, 30 Oct 2018 23:22:00 GMT
+
+
+
+
+ http://example.com/test/1540941660
+ http://example.com/test/1540941660
+
+ Tue, 30 Oct 2018 23:21:00 GMT
+
+
+
+
+ http://example.com/test/1540941600
+ http://example.com/test/1540941600
+
+ Tue, 30 Oct 2018 23:20:00 GMT
+
+
+
+
+ http://example.com/test/1540941540
+ http://example.com/test/1540941540
+
+ Tue, 30 Oct 2018 23:19:00 GMT
+
+
+
+
+ http://example.com/test/1540941480
+ http://example.com/test/1540941480
+
+ Tue, 30 Oct 2018 23:18:00 GMT
+
+
+
+
+ http://example.com/test/1540941420
+ http://example.com/test/1540941420
+
+ Tue, 30 Oct 2018 23:17:00 GMT
+
+
+
+
+ http://example.com/test/1540941360
+ http://example.com/test/1540941360
+
+ Tue, 30 Oct 2018 23:16:00 GMT
+
+
+
+
+ http://example.com/test/1540941300
+ http://example.com/test/1540941300
+
+ Tue, 30 Oct 2018 23:15:00 GMT
+
+
+
+
+ http://example.com/test/1540941240
+ http://example.com/test/1540941240
+
+ Tue, 30 Oct 2018 23:14:00 GMT
+
+
+
+
+ http://example.com/test/1540941180
+ http://example.com/test/1540941180
+
+ Tue, 30 Oct 2018 23:13:00 GMT
+
+
\ No newline at end of file
diff --git a/vendor/github.com/gorilla/feeds/test.rss b/vendor/github.com/gorilla/feeds/test.rss
new file mode 100755
index 0000000..8d912ab
--- /dev/null
+++ b/vendor/github.com/gorilla/feeds/test.rss
@@ -0,0 +1,96 @@
+
+
+
+
+
+ http://example.com/
+ RSS for Node
+ Tue, 30 Oct 2018 23:22:37 GMT
+
+ Tue, 30 Oct 2018 23:22:00 GMT
+
+ 60
+
+
+
+ http://example.com/test/1540941720
+ http://example.com/test/1540941720
+
+ Tue, 30 Oct 2018 23:22:00 GMT
+
+
+
+
+ http://example.com/test/1540941660
+ http://example.com/test/1540941660
+
+ Tue, 30 Oct 2018 23:21:00 GMT
+
+
+
+
+ http://example.com/test/1540941600
+ http://example.com/test/1540941600
+
+ Tue, 30 Oct 2018 23:20:00 GMT
+
+
+
+
+ http://example.com/test/1540941540
+ http://example.com/test/1540941540
+
+ Tue, 30 Oct 2018 23:19:00 GMT
+
+
+
+
+ http://example.com/test/1540941480
+ http://example.com/test/1540941480
+
+ Tue, 30 Oct 2018 23:18:00 GMT
+
+
+
+
+ http://example.com/test/1540941420
+ http://example.com/test/1540941420
+
+ Tue, 30 Oct 2018 23:17:00 GMT
+
+
+
+
+ http://example.com/test/1540941360
+ http://example.com/test/1540941360
+
+ Tue, 30 Oct 2018 23:16:00 GMT
+
+
+
+
+ http://example.com/test/1540941300
+ http://example.com/test/1540941300
+
+ Tue, 30 Oct 2018 23:15:00 GMT
+
+
+
+
+ http://example.com/test/1540941240
+ http://example.com/test/1540941240
+
+ Tue, 30 Oct 2018 23:14:00 GMT
+
+
+
+
+ http://example.com/test/1540941180
+ http://example.com/test/1540941180
+
+ Tue, 30 Oct 2018 23:13:00 GMT
+
+
+
\ No newline at end of file
diff --git a/vendor/github.com/gorilla/feeds/to-implement.md b/vendor/github.com/gorilla/feeds/to-implement.md
old mode 100644
new mode 100755
diff --git a/vendor/github.com/gorilla/feeds/uuid.go b/vendor/github.com/gorilla/feeds/uuid.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE
new file mode 100755
index 0000000..b03310a
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2015 James Saryerwinnie
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile
new file mode 100755
index 0000000..a828d28
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/Makefile
@@ -0,0 +1,44 @@
+
+CMD = jpgo
+
+help:
+ @echo "Please use \`make ' where is one of"
+ @echo " test to run all the tests"
+ @echo " build to build the library and jp executable"
+ @echo " generate to run codegen"
+
+
+generate:
+ go generate ./...
+
+build:
+ rm -f $(CMD)
+ go build ./...
+ rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
+ mv cmd/$(CMD)/$(CMD) .
+
+test:
+ go test -v ./...
+
+check:
+ go vet ./...
+ @echo "golint ./..."
+ @lint=`golint ./...`; \
+ lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
+ echo "$$lint"; \
+ if [ "$$lint" != "" ]; then exit 1; fi
+
+htmlc:
+ go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov
+
+buildfuzz:
+ go-fuzz-build github.com/jmespath/go-jmespath/fuzz
+
+fuzz: buildfuzz
+ go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata
+
+bench:
+ go test -bench . -cpuprofile cpu.out
+
+pprof-cpu:
+ go tool pprof ./go-jmespath.test ./cpu.out
diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md
new file mode 100755
index 0000000..187ef67
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/README.md
@@ -0,0 +1,7 @@
+# go-jmespath - A JMESPath implementation in Go
+
+[](https://travis-ci.org/jmespath/go-jmespath)
+
+
+
+See http://jmespath.org for more info.
diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go
new file mode 100755
index 0000000..8e26ffe
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/api.go
@@ -0,0 +1,49 @@
+package jmespath
+
+import "strconv"
+
+// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is
+// safe for concurrent use by multiple goroutines.
+type JMESPath struct {
+ ast ASTNode
+ intr *treeInterpreter
+}
+
+// Compile parses a JMESPath expression and returns, if successful, a JMESPath
+// object that can be used to match against data.
+func Compile(expression string) (*JMESPath, error) {
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
+ return jmespath, nil
+}
+
+// MustCompile is like Compile but panics if the expression cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled
+// JMESPaths.
+func MustCompile(expression string) *JMESPath {
+ jmespath, err := Compile(expression)
+ if err != nil {
+ panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
+ }
+ return jmespath
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
+ return jp.intr.Execute(jp.ast, data)
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func Search(expression string, data interface{}) (interface{}, error) {
+ intr := newInterpreter()
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(ast, data)
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
new file mode 100755
index 0000000..1cd2d23
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type astNodeType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
+
+var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
+
+func (i astNodeType) String() string {
+ if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
+ return fmt.Sprintf("astNodeType(%d)", i)
+ }
+ return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go
new file mode 100755
index 0000000..9b7cd89
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/functions.go
@@ -0,0 +1,842 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type jpFunction func(arguments []interface{}) (interface{}, error)
+
+type jpType string
+
+const (
+ jpUnknown jpType = "unknown"
+ jpNumber jpType = "number"
+ jpString jpType = "string"
+ jpArray jpType = "array"
+ jpObject jpType = "object"
+ jpArrayNumber jpType = "array[number]"
+ jpArrayString jpType = "array[string]"
+ jpExpref jpType = "expref"
+ jpAny jpType = "any"
+)
+
+type functionEntry struct {
+ name string
+ arguments []argSpec
+ handler jpFunction
+ hasExpRef bool
+}
+
+type argSpec struct {
+ types []jpType
+ variadic bool
+}
+
+type byExprString struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprString) Len() int {
+ return len(a.items)
+}
+func (a *byExprString) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprString) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type byExprFloat struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprFloat) Len() int {
+ return len(a.items)
+}
+func (a *byExprFloat) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprFloat) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type functionCaller struct {
+ functionTable map[string]functionEntry
+}
+
+func newFunctionCaller() *functionCaller {
+ caller := &functionCaller{}
+ caller.functionTable = map[string]functionEntry{
+ "length": {
+ name: "length",
+ arguments: []argSpec{
+ {types: []jpType{jpString, jpArray, jpObject}},
+ },
+ handler: jpfLength,
+ },
+ "starts_with": {
+ name: "starts_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfStartsWith,
+ },
+ "abs": {
+ name: "abs",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfAbs,
+ },
+ "avg": {
+ name: "avg",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfAvg,
+ },
+ "ceil": {
+ name: "ceil",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfCeil,
+ },
+ "contains": {
+ name: "contains",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfContains,
+ },
+ "ends_with": {
+ name: "ends_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfEndsWith,
+ },
+ "floor": {
+ name: "floor",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfFloor,
+ },
+ "map": {
+ name: "amp",
+ arguments: []argSpec{
+ {types: []jpType{jpExpref}},
+ {types: []jpType{jpArray}},
+ },
+ handler: jpfMap,
+ hasExpRef: true,
+ },
+ "max": {
+ name: "max",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMax,
+ },
+ "merge": {
+ name: "merge",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}, variadic: true},
+ },
+ handler: jpfMerge,
+ },
+ "max_by": {
+ name: "max_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMaxBy,
+ hasExpRef: true,
+ },
+ "sum": {
+ name: "sum",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfSum,
+ },
+ "min": {
+ name: "min",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMin,
+ },
+ "min_by": {
+ name: "min_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMinBy,
+ hasExpRef: true,
+ },
+ "type": {
+ name: "type",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfType,
+ },
+ "keys": {
+ name: "keys",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfKeys,
+ },
+ "values": {
+ name: "values",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfValues,
+ },
+ "sort": {
+ name: "sort",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayString, jpArrayNumber}},
+ },
+ handler: jpfSort,
+ },
+ "sort_by": {
+ name: "sort_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfSortBy,
+ hasExpRef: true,
+ },
+ "join": {
+ name: "join",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpArrayString}},
+ },
+ handler: jpfJoin,
+ },
+ "reverse": {
+ name: "reverse",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ },
+ handler: jpfReverse,
+ },
+ "to_array": {
+ name: "to_array",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToArray,
+ },
+ "to_string": {
+ name: "to_string",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToString,
+ },
+ "to_number": {
+ name: "to_number",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToNumber,
+ },
+ "not_null": {
+ name: "not_null",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}, variadic: true},
+ },
+ handler: jpfNotNull,
+ },
+ }
+ return caller
+}
+
+func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
+ if len(e.arguments) == 0 {
+ return arguments, nil
+ }
+ if !e.arguments[len(e.arguments)-1].variadic {
+ if len(e.arguments) != len(arguments) {
+ return nil, errors.New("incorrect number of args")
+ }
+ for i, spec := range e.arguments {
+ userArg := arguments[i]
+ err := spec.typeCheck(userArg)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arguments, nil
+ }
+ if len(arguments) < len(e.arguments) {
+ return nil, errors.New("Invalid arity.")
+ }
+ return arguments, nil
+}
+
+func (a *argSpec) typeCheck(arg interface{}) error {
+ for _, t := range a.types {
+ switch t {
+ case jpNumber:
+ if _, ok := arg.(float64); ok {
+ return nil
+ }
+ case jpString:
+ if _, ok := arg.(string); ok {
+ return nil
+ }
+ case jpArray:
+ if isSliceType(arg) {
+ return nil
+ }
+ case jpObject:
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil
+ }
+ case jpArrayNumber:
+ if _, ok := toArrayNum(arg); ok {
+ return nil
+ }
+ case jpArrayString:
+ if _, ok := toArrayStr(arg); ok {
+ return nil
+ }
+ case jpAny:
+ return nil
+ case jpExpref:
+ if _, ok := arg.(expRef); ok {
+ return nil
+ }
+ }
+ }
+ return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
+}
+
+func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
+ entry, ok := f.functionTable[name]
+ if !ok {
+ return nil, errors.New("unknown function: " + name)
+ }
+ resolvedArgs, err := entry.resolveArgs(arguments)
+ if err != nil {
+ return nil, err
+ }
+ if entry.hasExpRef {
+ var extra []interface{}
+ extra = append(extra, intr)
+ resolvedArgs = append(extra, resolvedArgs...)
+ }
+ return entry.handler(resolvedArgs)
+}
+
+func jpfAbs(arguments []interface{}) (interface{}, error) {
+ num := arguments[0].(float64)
+ return math.Abs(num), nil
+}
+
+func jpfLength(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if c, ok := arg.(string); ok {
+ return float64(utf8.RuneCountInString(c)), nil
+ } else if isSliceType(arg) {
+ v := reflect.ValueOf(arg)
+ return float64(v.Len()), nil
+ } else if c, ok := arg.(map[string]interface{}); ok {
+ return float64(len(c)), nil
+ }
+ return nil, errors.New("could not compute length()")
+}
+
+func jpfStartsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ prefix := arguments[1].(string)
+ return strings.HasPrefix(search, prefix), nil
+}
+
+func jpfAvg(arguments []interface{}) (interface{}, error) {
+ // We've already type checked the value so we can safely use
+ // type assertions.
+ args := arguments[0].([]interface{})
+ length := float64(len(args))
+ numerator := 0.0
+ for _, n := range args {
+ numerator += n.(float64)
+ }
+ return numerator / length, nil
+}
+func jpfCeil(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Ceil(val), nil
+}
+func jpfContains(arguments []interface{}) (interface{}, error) {
+ search := arguments[0]
+ el := arguments[1]
+ if searchStr, ok := search.(string); ok {
+ if elStr, ok := el.(string); ok {
+ return strings.Index(searchStr, elStr) != -1, nil
+ }
+ return false, nil
+ }
+ // Otherwise this is a generic contains for []interface{}
+ general := search.([]interface{})
+ for _, item := range general {
+ if item == el {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+func jpfEndsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ suffix := arguments[1].(string)
+ return strings.HasSuffix(search, suffix), nil
+}
+func jpfFloor(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Floor(val), nil
+}
+func jpfMap(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ exp := arguments[1].(expRef)
+ node := exp.ref
+ arr := arguments[2].([]interface{})
+ mapped := make([]interface{}, 0, len(arr))
+ for _, value := range arr {
+ current, err := intr.Execute(node, value)
+ if err != nil {
+ return nil, err
+ }
+ mapped = append(mapped, current)
+ }
+ return mapped, nil
+}
+func jpfMax(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ // Otherwise we're dealing with a max() of strings.
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+}
+func jpfMerge(arguments []interface{}) (interface{}, error) {
+ final := make(map[string]interface{})
+ for _, m := range arguments {
+ mapped := m.(map[string]interface{})
+ for key, value := range mapped {
+ final[key] = value
+ }
+ }
+ return final, nil
+}
+func jpfMaxBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ switch t := start.(type) {
+ case float64:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ case string:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ default:
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfSum(arguments []interface{}) (interface{}, error) {
+ items, _ := toArrayNum(arguments[0])
+ sum := 0.0
+ for _, item := range items {
+ sum += item
+ }
+ return sum, nil
+}
+
+func jpfMin(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+}
+
+func jpfMinBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if t, ok := start.(float64); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else if t, ok := start.(string); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfType(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if _, ok := arg.(float64); ok {
+ return "number", nil
+ }
+ if _, ok := arg.(string); ok {
+ return "string", nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return "array", nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return "object", nil
+ }
+ if arg == nil {
+ return "null", nil
+ }
+ if arg == true || arg == false {
+ return "boolean", nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfKeys(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for key := range arg {
+ collected = append(collected, key)
+ }
+ return collected, nil
+}
+func jpfValues(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for _, value := range arg {
+ collected = append(collected, value)
+ }
+ return collected, nil
+}
+func jpfSort(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ d := sort.Float64Slice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+ }
+ // Otherwise we're dealing with sort()'ing strings.
+ items, _ := toArrayStr(arguments[0])
+ d := sort.StringSlice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+}
+func jpfSortBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return arr, nil
+ } else if len(arr) == 1 {
+ return arr, nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := start.(float64); ok {
+ sortable := &byExprFloat{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else if _, ok := start.(string); ok {
+ sortable := &byExprString{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfJoin(arguments []interface{}) (interface{}, error) {
+ sep := arguments[0].(string)
+ // We can't just do arguments[1].([]string), we have to
+ // manually convert each item to a string.
+ arrayStr := []string{}
+ for _, item := range arguments[1].([]interface{}) {
+ arrayStr = append(arrayStr, item.(string))
+ }
+ return strings.Join(arrayStr, sep), nil
+}
+func jpfReverse(arguments []interface{}) (interface{}, error) {
+ if s, ok := arguments[0].(string); ok {
+ r := []rune(s)
+ for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
+ r[i], r[j] = r[j], r[i]
+ }
+ return string(r), nil
+ }
+ items := arguments[0].([]interface{})
+ length := len(items)
+ reversed := make([]interface{}, length)
+ for i, item := range items {
+ reversed[length-(i+1)] = item
+ }
+ return reversed, nil
+}
+func jpfToArray(arguments []interface{}) (interface{}, error) {
+ if _, ok := arguments[0].([]interface{}); ok {
+ return arguments[0], nil
+ }
+ return arguments[:1:1], nil
+}
+func jpfToString(arguments []interface{}) (interface{}, error) {
+ if v, ok := arguments[0].(string); ok {
+ return v, nil
+ }
+ result, err := json.Marshal(arguments[0])
+ if err != nil {
+ return nil, err
+ }
+ return string(result), nil
+}
+func jpfToNumber(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if v, ok := arg.(float64); ok {
+ return v, nil
+ }
+ if v, ok := arg.(string); ok {
+ conv, err := strconv.ParseFloat(v, 64)
+ if err != nil {
+ return nil, nil
+ }
+ return conv, nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return nil, nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil, nil
+ }
+ if arg == nil {
+ return nil, nil
+ }
+ if arg == true || arg == false {
+ return nil, nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfNotNull(arguments []interface{}) (interface{}, error) {
+ for _, arg := range arguments {
+ if arg != nil {
+ return arg, nil
+ }
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go
new file mode 100755
index 0000000..13c7460
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go
@@ -0,0 +1,418 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+/* This is a tree based interpreter. It walks the AST and directly
+ interprets the AST to search through a JSON document.
+*/
+
+type treeInterpreter struct {
+ fCall *functionCaller
+}
+
+func newInterpreter() *treeInterpreter {
+ interpreter := treeInterpreter{}
+ interpreter.fCall = newFunctionCaller()
+ return &interpreter
+}
+
+type expRef struct {
+ ref ASTNode
+}
+
+// Execute takes an ASTNode and input data and interprets the AST directly.
+// It will produce the result of applying the JMESPath expression associated
+// with the ASTNode to the input data "value".
+func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
+ switch node.nodeType {
+ case ASTComparator:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ right, err := intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ switch node.value {
+ case tEQ:
+ return objsEqual(left, right), nil
+ case tNE:
+ return !objsEqual(left, right), nil
+ }
+ leftNum, ok := left.(float64)
+ if !ok {
+ return nil, nil
+ }
+ rightNum, ok := right.(float64)
+ if !ok {
+ return nil, nil
+ }
+ switch node.value {
+ case tGT:
+ return leftNum > rightNum, nil
+ case tGTE:
+ return leftNum >= rightNum, nil
+ case tLT:
+ return leftNum < rightNum, nil
+ case tLTE:
+ return leftNum <= rightNum, nil
+ }
+ case ASTExpRef:
+ return expRef{ref: node.children[0]}, nil
+ case ASTFunctionExpression:
+ resolvedArgs := []interface{}{}
+ for _, arg := range node.children {
+ current, err := intr.Execute(arg, value)
+ if err != nil {
+ return nil, err
+ }
+ resolvedArgs = append(resolvedArgs, current)
+ }
+ return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
+ case ASTField:
+ if m, ok := value.(map[string]interface{}); ok {
+ key := node.value.(string)
+ return m[key], nil
+ }
+ return intr.fieldFromStruct(node.value.(string), value)
+ case ASTFilterProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.filterProjectionWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ for _, element := range sliceType {
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+ case ASTFlatten:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ // If we can't type convert to []interface{}, there's
+ // a chance this could still work via reflection if we're
+ // dealing with user provided types.
+ if isSliceType(left) {
+ return intr.flattenWithReflection(left)
+ }
+ return nil, nil
+ }
+ flattened := []interface{}{}
+ for _, element := range sliceType {
+ if elementSlice, ok := element.([]interface{}); ok {
+ flattened = append(flattened, elementSlice...)
+ } else if isSliceType(element) {
+ reflectFlat := []interface{}{}
+ v := reflect.ValueOf(element)
+ for i := 0; i < v.Len(); i++ {
+ reflectFlat = append(reflectFlat, v.Index(i).Interface())
+ }
+ flattened = append(flattened, reflectFlat...)
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+ case ASTIdentity, ASTCurrentNode:
+ return value, nil
+ case ASTIndex:
+ if sliceType, ok := value.([]interface{}); ok {
+ index := node.value.(int)
+ if index < 0 {
+ index += len(sliceType)
+ }
+ if index < len(sliceType) && index >= 0 {
+ return sliceType[index], nil
+ }
+ return nil, nil
+ }
+ // Otherwise try via reflection.
+ rv := reflect.ValueOf(value)
+ if rv.Kind() == reflect.Slice {
+ index := node.value.(int)
+ if index < 0 {
+ index += rv.Len()
+ }
+ if index < rv.Len() && index >= 0 {
+ v := rv.Index(index)
+ return v.Interface(), nil
+ }
+ }
+ return nil, nil
+ case ASTKeyValPair:
+ return intr.Execute(node.children[0], value)
+ case ASTLiteral:
+ return node.value, nil
+ case ASTMultiSelectHash:
+ if value == nil {
+ return nil, nil
+ }
+ collected := make(map[string]interface{})
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ key := child.value.(string)
+ collected[key] = current
+ }
+ return collected, nil
+ case ASTMultiSelectList:
+ if value == nil {
+ return nil, nil
+ }
+ collected := []interface{}{}
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ collected = append(collected, current)
+ }
+ return collected, nil
+ case ASTOrExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ matched, err = intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return matched, nil
+ case ASTAndExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return matched, nil
+ }
+ return intr.Execute(node.children[1], value)
+ case ASTNotExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return true, nil
+ }
+ return false, nil
+ case ASTPipe:
+ result := value
+ var err error
+ for _, child := range node.children {
+ result, err = intr.Execute(child, result)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return result, nil
+ case ASTProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.projectWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ collected := []interface{}{}
+ var current interface{}
+ for _, element := range sliceType {
+ current, err = intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ case ASTSubexpression, ASTIndexExpression:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(node.children[1], left)
+ case ASTSlice:
+ sliceType, ok := value.([]interface{})
+ if !ok {
+ if isSliceType(value) {
+ return intr.sliceWithReflection(node, value)
+ }
+ return nil, nil
+ }
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ return slice(sliceType, sliceParams)
+ case ASTValueProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ mapType, ok := left.(map[string]interface{})
+ if !ok {
+ return nil, nil
+ }
+ values := make([]interface{}, len(mapType))
+ for _, value := range mapType {
+ values = append(values, value)
+ }
+ collected := []interface{}{}
+ for _, element := range values {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ }
+ return nil, errors.New("Unknown AST node: " + node.nodeType.String())
+}
+
+func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
+ rv := reflect.ValueOf(value)
+ first, n := utf8.DecodeRuneInString(key)
+ fieldName := string(unicode.ToUpper(first)) + key[n:]
+ if rv.Kind() == reflect.Struct {
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ } else if rv.Kind() == reflect.Ptr {
+ // Handle multiple levels of indirection?
+ if rv.IsNil() {
+ return nil, nil
+ }
+ rv = rv.Elem()
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ }
+ return nil, nil
+}
+
+func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ flattened := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ if reflect.TypeOf(element).Kind() == reflect.Slice {
+ // Then insert the contents of the element
+ // slice into the flattened slice,
+ // i.e flattened = append(flattened, mySlice...)
+ elementV := reflect.ValueOf(element)
+ for j := 0; j < elementV.Len(); j++ {
+ flattened = append(
+ flattened, elementV.Index(j).Interface())
+ }
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+}
+
+func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ final := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ final = append(final, element)
+ }
+ return slice(final, sliceParams)
+}
+
+func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+}
+
+func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if result != nil {
+ collected = append(collected, result)
+ }
+ }
+ return collected, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go
new file mode 100755
index 0000000..817900c
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/lexer.go
@@ -0,0 +1,420 @@
+package jmespath
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type token struct {
+ tokenType tokType
+ value string
+ position int
+ length int
+}
+
+type tokType int
+
+const eof = -1
+
+// Lexer contains information about the expression being tokenized.
+type Lexer struct {
+ expression string // The expression provided by the user.
+ currentPos int // The current position in the string.
+ lastWidth int // The width of the current rune. This
+ buf bytes.Buffer // Internal buffer used for building up values.
+}
+
+// SyntaxError is the main error used whenever a lexing or parsing error occurs.
+type SyntaxError struct {
+ msg string // Error message displayed to user
+ Expression string // Expression that generated a SyntaxError
+ Offset int // The location in the string where the error occurred
+}
+
+func (e SyntaxError) Error() string {
+ // In the future, it would be good to underline the specific
+ // location where the error occurred.
+ return "SyntaxError: " + e.msg
+}
+
+// HighlightLocation will show where the syntax error occurred.
+// It will place a "^" character on a line below the expression
+// at the point where the syntax error occurred.
+func (e SyntaxError) HighlightLocation() string {
+ return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
+}
+
+//go:generate stringer -type=tokType
+const (
+ tUnknown tokType = iota
+ tStar
+ tDot
+ tFilter
+ tFlatten
+ tLparen
+ tRparen
+ tLbracket
+ tRbracket
+ tLbrace
+ tRbrace
+ tOr
+ tPipe
+ tNumber
+ tUnquotedIdentifier
+ tQuotedIdentifier
+ tComma
+ tColon
+ tLT
+ tLTE
+ tGT
+ tGTE
+ tEQ
+ tNE
+ tJSONLiteral
+ tStringLiteral
+ tCurrent
+ tExpref
+ tAnd
+ tNot
+ tEOF
+)
+
+var basicTokens = map[rune]tokType{
+ '.': tDot,
+ '*': tStar,
+ ',': tComma,
+ ':': tColon,
+ '{': tLbrace,
+ '}': tRbrace,
+ ']': tRbracket, // tLbracket not included because it could be "[]"
+ '(': tLparen,
+ ')': tRparen,
+ '@': tCurrent,
+}
+
+// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
+// When using this bitmask just be sure to shift the rune down 64 bits
+// before checking against identifierStartBits.
+const identifierStartBits uint64 = 576460745995190270
+
+// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
+var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
+
+var whiteSpace = map[rune]bool{
+ ' ': true, '\t': true, '\n': true, '\r': true,
+}
+
+func (t token) String() string {
+ return fmt.Sprintf("Token{%+v, %s, %d, %d}",
+ t.tokenType, t.value, t.position, t.length)
+}
+
+// NewLexer creates a new JMESPath lexer.
+func NewLexer() *Lexer {
+ lexer := Lexer{}
+ return &lexer
+}
+
+func (lexer *Lexer) next() rune {
+ if lexer.currentPos >= len(lexer.expression) {
+ lexer.lastWidth = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
+ lexer.lastWidth = w
+ lexer.currentPos += w
+ return r
+}
+
+func (lexer *Lexer) back() {
+ lexer.currentPos -= lexer.lastWidth
+}
+
+func (lexer *Lexer) peek() rune {
+ t := lexer.next()
+ lexer.back()
+ return t
+}
+
+// tokenize takes an expression and returns corresponding tokens.
+func (lexer *Lexer) tokenize(expression string) ([]token, error) {
+ var tokens []token
+ lexer.expression = expression
+ lexer.currentPos = 0
+ lexer.lastWidth = 0
+loop:
+ for {
+ r := lexer.next()
+ if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
+ t := lexer.consumeUnquotedIdentifier()
+ tokens = append(tokens, t)
+ } else if val, ok := basicTokens[r]; ok {
+ // Basic single char token.
+ t := token{
+ tokenType: val,
+ value: string(r),
+ position: lexer.currentPos - lexer.lastWidth,
+ length: 1,
+ }
+ tokens = append(tokens, t)
+ } else if r == '-' || (r >= '0' && r <= '9') {
+ t := lexer.consumeNumber()
+ tokens = append(tokens, t)
+ } else if r == '[' {
+ t := lexer.consumeLBracket()
+ tokens = append(tokens, t)
+ } else if r == '"' {
+ t, err := lexer.consumeQuotedIdentifier()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '\'' {
+ t, err := lexer.consumeRawStringLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '`' {
+ t, err := lexer.consumeLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '|' {
+ t := lexer.matchOrElse(r, '|', tOr, tPipe)
+ tokens = append(tokens, t)
+ } else if r == '<' {
+ t := lexer.matchOrElse(r, '=', tLTE, tLT)
+ tokens = append(tokens, t)
+ } else if r == '>' {
+ t := lexer.matchOrElse(r, '=', tGTE, tGT)
+ tokens = append(tokens, t)
+ } else if r == '!' {
+ t := lexer.matchOrElse(r, '=', tNE, tNot)
+ tokens = append(tokens, t)
+ } else if r == '=' {
+ t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
+ tokens = append(tokens, t)
+ } else if r == '&' {
+ t := lexer.matchOrElse(r, '&', tAnd, tExpref)
+ tokens = append(tokens, t)
+ } else if r == eof {
+ break loop
+ } else if _, ok := whiteSpace[r]; ok {
+ // Ignore whitespace
+ } else {
+ return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
+ }
+ }
+ tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
+ return tokens, nil
+}
+
+// Consume characters until the ending rune "r" is reached.
+// If the end of the expression is reached before seeing the
+// terminating rune "r", then an error is returned.
+// If no error occurs then the matching substring is returned.
+// The returned string will not include the ending rune.
+func (lexer *Lexer) consumeUntil(end rune) (string, error) {
+ start := lexer.currentPos
+ current := lexer.next()
+ for current != end && current != eof {
+ if current == '\\' && lexer.peek() != eof {
+ lexer.next()
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return "", SyntaxError{
+ msg: "Unclosed delimiter: " + string(end),
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
+}
+
+func (lexer *Lexer) consumeLiteral() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('`')
+ if err != nil {
+ return token{}, err
+ }
+ value = strings.Replace(value, "\\`", "`", -1)
+ return token{
+ tokenType: tJSONLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
+ start := lexer.currentPos
+ currentIndex := start
+ current := lexer.next()
+ for current != '\'' && lexer.peek() != eof {
+ if current == '\\' && lexer.peek() == '\'' {
+ chunk := lexer.expression[currentIndex : lexer.currentPos-1]
+ lexer.buf.WriteString(chunk)
+ lexer.buf.WriteString("'")
+ lexer.next()
+ currentIndex = lexer.currentPos
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return token{}, SyntaxError{
+ msg: "Unclosed delimiter: '",
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ if currentIndex < lexer.currentPos {
+ lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
+ }
+ value := lexer.buf.String()
+ // Reset the buffer so it can reused again.
+ lexer.buf.Reset()
+ return token{
+ tokenType: tStringLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: lexer.expression,
+ Offset: lexer.currentPos - 1,
+ }
+}
+
+// Checks for a two char token, otherwise matches a single character
+// token. This is used whenever a two char token overlaps a single
+// char token, e.g. "||" -> tPipe, "|" -> tOr.
+func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == second {
+ t = token{
+ tokenType: matchedType,
+ value: string(first) + string(second),
+ position: start,
+ length: 2,
+ }
+ } else {
+ lexer.back()
+ t = token{
+ tokenType: singleCharType,
+ value: string(first),
+ position: start,
+ length: 1,
+ }
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeLBracket() token {
+ // There's three options here:
+ // 1. A filter expression "[?"
+ // 2. A flatten operator "[]"
+ // 3. A bare rbracket "["
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == '?' {
+ t = token{
+ tokenType: tFilter,
+ value: "[?",
+ position: start,
+ length: 2,
+ }
+ } else if nextRune == ']' {
+ t = token{
+ tokenType: tFlatten,
+ value: "[]",
+ position: start,
+ length: 2,
+ }
+ } else {
+ t = token{
+ tokenType: tLbracket,
+ value: "[",
+ position: start,
+ length: 1,
+ }
+ lexer.back()
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('"')
+ if err != nil {
+ return token{}, err
+ }
+ var decoded string
+ asJSON := []byte("\"" + value + "\"")
+ if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
+ return token{}, err
+ }
+ return token{
+ tokenType: tQuotedIdentifier,
+ value: decoded,
+ position: start - 1,
+ length: len(decoded),
+ }, nil
+}
+
+func (lexer *Lexer) consumeUnquotedIdentifier() token {
+ // Consume runes until we reach the end of an unquoted
+ // identifier.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tUnquotedIdentifier,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
+
+func (lexer *Lexer) consumeNumber() token {
+ // Consume runes until we reach something that's not a number.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < '0' || r > '9' {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tNumber,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go
new file mode 100755
index 0000000..1240a17
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/parser.go
@@ -0,0 +1,603 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type astNodeType int
+
+//go:generate stringer -type astNodeType
+const (
+ ASTEmpty astNodeType = iota
+ ASTComparator
+ ASTCurrentNode
+ ASTExpRef
+ ASTFunctionExpression
+ ASTField
+ ASTFilterProjection
+ ASTFlatten
+ ASTIdentity
+ ASTIndex
+ ASTIndexExpression
+ ASTKeyValPair
+ ASTLiteral
+ ASTMultiSelectHash
+ ASTMultiSelectList
+ ASTOrExpression
+ ASTAndExpression
+ ASTNotExpression
+ ASTPipe
+ ASTProjection
+ ASTSubexpression
+ ASTSlice
+ ASTValueProjection
+)
+
+// ASTNode represents the abstract syntax tree of a JMESPath expression.
+type ASTNode struct {
+ nodeType astNodeType
+ value interface{}
+ children []ASTNode
+}
+
+func (node ASTNode) String() string {
+ return node.PrettyPrint(0)
+}
+
+// PrettyPrint will pretty print the parsed AST.
+// The AST is an implementation detail and this pretty print
+// function is provided as a convenience method to help with
+// debugging. You should not rely on its output as the internal
+// structure of the AST may change at any time.
+func (node ASTNode) PrettyPrint(indent int) string {
+ spaces := strings.Repeat(" ", indent)
+ output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
+ nextIndent := indent + 2
+ if node.value != nil {
+ if converted, ok := node.value.(fmt.Stringer); ok {
+ // Account for things like comparator nodes
+ // that are enums with a String() method.
+ output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
+ } else {
+ output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
+ }
+ }
+ lastIndex := len(node.children)
+ if lastIndex > 0 {
+ output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
+ childIndent := nextIndent + 2
+ for _, elem := range node.children {
+ output += elem.PrettyPrint(childIndent)
+ }
+ }
+ output += fmt.Sprintf("%s}\n", spaces)
+ return output
+}
+
+var bindingPowers = map[tokType]int{
+ tEOF: 0,
+ tUnquotedIdentifier: 0,
+ tQuotedIdentifier: 0,
+ tRbracket: 0,
+ tRparen: 0,
+ tComma: 0,
+ tRbrace: 0,
+ tNumber: 0,
+ tCurrent: 0,
+ tExpref: 0,
+ tColon: 0,
+ tPipe: 1,
+ tOr: 2,
+ tAnd: 3,
+ tEQ: 5,
+ tLT: 5,
+ tLTE: 5,
+ tGT: 5,
+ tGTE: 5,
+ tNE: 5,
+ tFlatten: 9,
+ tStar: 20,
+ tFilter: 21,
+ tDot: 40,
+ tNot: 45,
+ tLbrace: 50,
+ tLbracket: 55,
+ tLparen: 60,
+}
+
+// Parser holds state about the current expression being parsed.
+type Parser struct {
+ expression string
+ tokens []token
+ index int
+}
+
+// NewParser creates a new JMESPath parser.
+func NewParser() *Parser {
+ p := Parser{}
+ return &p
+}
+
+// Parse will compile a JMESPath expression.
+func (p *Parser) Parse(expression string) (ASTNode, error) {
+ lexer := NewLexer()
+ p.expression = expression
+ p.index = 0
+ tokens, err := lexer.tokenize(expression)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ p.tokens = tokens
+ parsed, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() != tEOF {
+ return ASTNode{}, p.syntaxError(fmt.Sprintf(
+ "Unexpected token at the end of the expresssion: %s", p.current()))
+ }
+ return parsed, nil
+}
+
+func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
+ var err error
+ leftToken := p.lookaheadToken(0)
+ p.advance()
+ leftNode, err := p.nud(leftToken)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken := p.current()
+ for bindingPower < bindingPowers[currentToken] {
+ p.advance()
+ leftNode, err = p.led(currentToken, leftNode)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken = p.current()
+ }
+ return leftNode, nil
+}
+
+func (p *Parser) parseIndexExpression() (ASTNode, error) {
+ if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
+ return p.parseSliceExpression()
+ }
+ indexStr := p.lookaheadToken(0).value
+ parsedInt, err := strconv.Atoi(indexStr)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
+ p.advance()
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return indexNode, nil
+}
+
+func (p *Parser) parseSliceExpression() (ASTNode, error) {
+ parts := []*int{nil, nil, nil}
+ index := 0
+ current := p.current()
+ for current != tRbracket && index < 3 {
+ if current == tColon {
+ index++
+ p.advance()
+ } else if current == tNumber {
+ parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ parts[index] = &parsedInt
+ p.advance()
+ } else {
+ return ASTNode{}, p.syntaxError(
+ "Expected tColon or tNumber" + ", received: " + p.current().String())
+ }
+ current = p.current()
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTSlice,
+ value: parts,
+ }, nil
+}
+
+func (p *Parser) match(tokenType tokType) error {
+ if p.current() == tokenType {
+ p.advance()
+ return nil
+ }
+ return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
+}
+
+func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
+ switch tokenType {
+ case tDot:
+ if p.current() != tStar {
+ right, err := p.parseDotRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTSubexpression,
+ children: []ASTNode{node, right},
+ }, err
+ }
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTValueProjection,
+ children: []ASTNode{node, right},
+ }, err
+ case tPipe:
+ right, err := p.parseExpression(bindingPowers[tPipe])
+ return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
+ case tOr:
+ right, err := p.parseExpression(bindingPowers[tOr])
+ return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
+ case tAnd:
+ right, err := p.parseExpression(bindingPowers[tAnd])
+ return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
+ case tLparen:
+ name := node.value
+ var args []ASTNode
+ for p.current() != tRparen {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tComma {
+ if err := p.match(tComma); err != nil {
+ return ASTNode{}, err
+ }
+ }
+ args = append(args, expression)
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTFunctionExpression,
+ value: name,
+ children: args,
+ }, nil
+ case tFilter:
+ return p.parseFilter(node)
+ case tFlatten:
+ left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{left, right},
+ }, err
+ case tEQ, tNE, tGT, tGTE, tLT, tLTE:
+ right, err := p.parseExpression(bindingPowers[tokenType])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTComparator,
+ value: tokenType,
+ children: []ASTNode{node, right},
+ }, nil
+ case tLbracket:
+ tokenType := p.current()
+ var right ASTNode
+ var err error
+ if tokenType == tNumber || tokenType == tColon {
+ right, err = p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.projectIfSlice(node, right)
+ }
+ // Otherwise this is a projection.
+ if err := p.match(tStar); err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{node, right},
+ }, nil
+ }
+ return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
+}
+
+func (p *Parser) nud(token token) (ASTNode, error) {
+ switch token.tokenType {
+ case tJSONLiteral:
+ var parsed interface{}
+ err := json.Unmarshal([]byte(token.value), &parsed)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
+ case tStringLiteral:
+ return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
+ case tUnquotedIdentifier:
+ return ASTNode{
+ nodeType: ASTField,
+ value: token.value,
+ }, nil
+ case tQuotedIdentifier:
+ node := ASTNode{nodeType: ASTField, value: token.value}
+ if p.current() == tLparen {
+ return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
+ }
+ return node, nil
+ case tStar:
+ left := ASTNode{nodeType: ASTIdentity}
+ var right ASTNode
+ var err error
+ if p.current() == tRbracket {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ }
+ return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
+ case tFilter:
+ return p.parseFilter(ASTNode{nodeType: ASTIdentity})
+ case tLbrace:
+ return p.parseMultiSelectHash()
+ case tFlatten:
+ left := ASTNode{
+ nodeType: ASTFlatten,
+ children: []ASTNode{{nodeType: ASTIdentity}},
+ }
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
+ case tLbracket:
+ tokenType := p.current()
+ //var right ASTNode
+ if tokenType == tNumber || tokenType == tColon {
+ right, err := p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
+ } else if tokenType == tStar && p.lookahead(1) == tRbracket {
+ p.advance()
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{{nodeType: ASTIdentity}, right},
+ }, nil
+ } else {
+ return p.parseMultiSelectList()
+ }
+ case tCurrent:
+ return ASTNode{nodeType: ASTCurrentNode}, nil
+ case tExpref:
+ expression, err := p.parseExpression(bindingPowers[tExpref])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
+ case tNot:
+ expression, err := p.parseExpression(bindingPowers[tNot])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
+ case tLparen:
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return expression, nil
+ case tEOF:
+ return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
+ }
+
+ return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
+}
+
+func (p *Parser) parseMultiSelectList() (ASTNode, error) {
+ var expressions []ASTNode
+ for {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ expressions = append(expressions, expression)
+ if p.current() == tRbracket {
+ break
+ }
+ err = p.match(tComma)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+ err := p.match(tRbracket)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectList,
+ children: expressions,
+ }, nil
+}
+
+func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
+ var children []ASTNode
+ for {
+ keyToken := p.lookaheadToken(0)
+ if err := p.match(tUnquotedIdentifier); err != nil {
+ if err := p.match(tQuotedIdentifier); err != nil {
+ return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
+ }
+ }
+ keyName := keyToken.value
+ err := p.match(tColon)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ value, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ node := ASTNode{
+ nodeType: ASTKeyValPair,
+ value: keyName,
+ children: []ASTNode{value},
+ }
+ children = append(children, node)
+ if p.current() == tComma {
+ err := p.match(tComma)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ } else if p.current() == tRbrace {
+ err := p.match(tRbrace)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ break
+ }
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectHash,
+ children: children,
+ }, nil
+}
+
+func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
+ indexExpr := ASTNode{
+ nodeType: ASTIndexExpression,
+ children: []ASTNode{left, right},
+ }
+ if right.nodeType == ASTSlice {
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{indexExpr, right},
+ }, err
+ }
+ return indexExpr, nil
+}
+func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
+ var right, condition ASTNode
+ var err error
+ condition, err = p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tFlatten {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tFilter])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+
+ return ASTNode{
+ nodeType: ASTFilterProjection,
+ children: []ASTNode{node, right, condition},
+ }, nil
+}
+
+func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
+ lookahead := p.current()
+ if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
+ return p.parseExpression(bindingPower)
+ } else if lookahead == tLbracket {
+ if err := p.match(tLbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectList()
+ } else if lookahead == tLbrace {
+ if err := p.match(tLbrace); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectHash()
+ }
+ return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
+}
+
+func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
+ current := p.current()
+ if bindingPowers[current] < 10 {
+ return ASTNode{nodeType: ASTIdentity}, nil
+ } else if current == tLbracket {
+ return p.parseExpression(bindingPower)
+ } else if current == tFilter {
+ return p.parseExpression(bindingPower)
+ } else if current == tDot {
+ err := p.match(tDot)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseDotRHS(bindingPower)
+ } else {
+ return ASTNode{}, p.syntaxError("Error")
+ }
+}
+
+func (p *Parser) lookahead(number int) tokType {
+ return p.lookaheadToken(number).tokenType
+}
+
+func (p *Parser) current() tokType {
+ return p.lookahead(0)
+}
+
+func (p *Parser) lookaheadToken(number int) token {
+ return p.tokens[p.index+number]
+}
+
+func (p *Parser) advance() {
+ p.index++
+}
+
+func tokensOneOf(elements []tokType, token tokType) bool {
+ for _, elem := range elements {
+ if elem == token {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Parser) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: p.lookaheadToken(0).position,
+ }
+}
+
+// Create a SyntaxError based on the provided token.
+// This differs from syntaxError() which creates a SyntaxError
+// based on the current lookahead token.
+func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: t.position,
+ }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
new file mode 100755
index 0000000..dae79cb
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type=tokType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
+
+var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
+
+func (i tokType) String() string {
+ if i < 0 || i >= tokType(len(_tokType_index)-1) {
+ return fmt.Sprintf("tokType(%d)", i)
+ }
+ return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go
new file mode 100755
index 0000000..ddc1b7d
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/util.go
@@ -0,0 +1,185 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+)
+
+// IsFalse determines if an object is false based on the JMESPath spec.
+// JMESPath defines false values to be any of:
+// - An empty string array, or hash.
+// - The boolean value false.
+// - nil
+func isFalse(value interface{}) bool {
+ switch v := value.(type) {
+ case bool:
+ return !v
+ case []interface{}:
+ return len(v) == 0
+ case map[string]interface{}:
+ return len(v) == 0
+ case string:
+ return len(v) == 0
+ case nil:
+ return true
+ }
+ // Try the reflection cases before returning false.
+ rv := reflect.ValueOf(value)
+ switch rv.Kind() {
+ case reflect.Struct:
+ // A struct type will never be false, even if
+ // all of its values are the zero type.
+ return false
+ case reflect.Slice, reflect.Map:
+ return rv.Len() == 0
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return true
+ }
+ // If it's a pointer type, we'll try to deref the pointer
+ // and evaluate the pointer value for isFalse.
+ element := rv.Elem()
+ return isFalse(element.Interface())
+ }
+ return false
+}
+
+// ObjsEqual is a generic object equality check.
+// It will take two arbitrary objects and recursively determine
+// if they are equal.
+func objsEqual(left interface{}, right interface{}) bool {
+ return reflect.DeepEqual(left, right)
+}
+
+// SliceParam refers to a single part of a slice.
+// A slice consists of a start, a stop, and a step, similar to
+// python slices.
+type sliceParam struct {
+ N int
+ Specified bool
+}
+
+// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
+func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
+ computed, err := computeSliceParams(len(slice), parts)
+ if err != nil {
+ return nil, err
+ }
+ start, stop, step := computed[0], computed[1], computed[2]
+ result := []interface{}{}
+ if step > 0 {
+ for i := start; i < stop; i += step {
+ result = append(result, slice[i])
+ }
+ } else {
+ for i := start; i > stop; i += step {
+ result = append(result, slice[i])
+ }
+ }
+ return result, nil
+}
+
+func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
+ var start, stop, step int
+ if !parts[2].Specified {
+ step = 1
+ } else if parts[2].N == 0 {
+ return nil, errors.New("Invalid slice, step cannot be 0")
+ } else {
+ step = parts[2].N
+ }
+ var stepValueNegative bool
+ if step < 0 {
+ stepValueNegative = true
+ } else {
+ stepValueNegative = false
+ }
+
+ if !parts[0].Specified {
+ if stepValueNegative {
+ start = length - 1
+ } else {
+ start = 0
+ }
+ } else {
+ start = capSlice(length, parts[0].N, step)
+ }
+
+ if !parts[1].Specified {
+ if stepValueNegative {
+ stop = -1
+ } else {
+ stop = length
+ }
+ } else {
+ stop = capSlice(length, parts[1].N, step)
+ }
+ return []int{start, stop, step}, nil
+}
+
+func capSlice(length int, actual int, step int) int {
+ if actual < 0 {
+ actual += length
+ if actual < 0 {
+ if step < 0 {
+ actual = -1
+ } else {
+ actual = 0
+ }
+ }
+ } else if actual >= length {
+ if step < 0 {
+ actual = length - 1
+ } else {
+ actual = length
+ }
+ }
+ return actual
+}
+
+// ToArrayNum converts an empty interface type to a slice of float64.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false.
+func toArrayNum(data interface{}) ([]float64, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]float64, len(d))
+ for i, el := range d {
+ item, ok := el.(float64)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+// ToArrayStr converts an empty interface type to a slice of strings.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false. If the input data could be entirely
+// converted, then the converted data, along with a second value of true,
+// will be returned.
+func toArrayStr(data interface{}) ([]string, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]string, len(d))
+ for i, el := range d {
+ item, ok := el.(string)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+func isSliceType(v interface{}) bool {
+ if v == nil {
+ return false
+ }
+ return reflect.TypeOf(v).Kind() == reflect.Slice
+}
diff --git a/vendor/github.com/minio/minio-go/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/CONTRIBUTING.md
new file mode 100755
index 0000000..8b1ee86
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/CONTRIBUTING.md
@@ -0,0 +1,23 @@
+
+### Developer Guidelines
+
+``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following:
+
+* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes.
+ - Fork it
+ - Create your feature branch (git checkout -b my-new-feature)
+ - Commit your changes (git commit -am 'Add some feature')
+ - Push to the branch (git push origin my-new-feature)
+ - Create new Pull Request
+
+* When you're ready to create a pull request, be sure to:
+ - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request.
+ - Run `go fmt`
+ - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request.
+ - Make sure `go test -race ./...` and `go build` completes.
+ NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables
+ ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...``
+
+* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project
+ - `minio-go` project is strictly conformant with Golang style
+ - if you happen to observe offending code, please feel free to send a pull request
diff --git a/vendor/github.com/minio/minio-go/LICENSE b/vendor/github.com/minio/minio-go/LICENSE
new file mode 100755
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/minio/minio-go/MAINTAINERS.md b/vendor/github.com/minio/minio-go/MAINTAINERS.md
new file mode 100755
index 0000000..1797307
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/MAINTAINERS.md
@@ -0,0 +1,35 @@
+# For maintainers only
+
+## Responsibilities
+
+Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
+
+### Making new releases
+Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key.
+```sh
+$ export GNUPGHOME=/media/${USER}/minio/trusted
+$ git tag -s 4.0.0
+$ git push
+$ git push --tags
+```
+
+### Update version
+Once release has been made update `libraryVersion` constant in `api.go` to next to be released version.
+
+```sh
+$ grep libraryVersion api.go
+ libraryVersion = "4.0.1"
+```
+
+Commit your changes
+```
+$ git commit -a -m "Update version for next release" --author "Minio Trusted "
+```
+
+### Announce
+Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@minio.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release.
+
+To generate `changelog`
+```sh
+$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' ..
+```
diff --git a/vendor/github.com/minio/minio-go/Makefile b/vendor/github.com/minio/minio-go/Makefile
new file mode 100755
index 0000000..bad81ff
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/Makefile
@@ -0,0 +1,15 @@
+all: checks
+
+checks:
+ @go get -t ./...
+ @go vet ./...
+ @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
+ @go get github.com/dustin/go-humanize/...
+ @go get github.com/sirupsen/logrus/...
+ @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go
+ @mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done
+ @go get -u github.com/a8m/mark/...
+ @go get -u github.com/minio/cli/...
+ @go get -u golang.org/x/tools/cmd/goimports
+ @go get -u github.com/gernest/wow/...
+ @go build docs/validator.go && ./validator -m docs/API.md -t docs/checker.go.tpl
diff --git a/vendor/github.com/minio/minio-go/NOTICE b/vendor/github.com/minio/minio-go/NOTICE
new file mode 100755
index 0000000..c521791
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/NOTICE
@@ -0,0 +1,2 @@
+minio-go
+Copyright 2015-2017 Minio, Inc.
\ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md
new file mode 100755
index 0000000..85ff2cb
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/README.md
@@ -0,0 +1,236 @@
+# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [](https://slack.minio.io) [](https://sourcegraph.com/github.com/minio/minio-go?badge) [](https://github.com/minio/minio-go/blob/master/LICENSE)
+
+The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
+
+This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
+
+This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
+
+## Download from Github
+```sh
+go get -u github.com/minio/minio-go
+```
+
+## Initialize Minio Client
+Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
+
+| Parameter | Description|
+| :--- | :--- |
+| endpoint | URL to object storage service. |
+| accessKeyID | Access key is the user ID that uniquely identifies your account. |
+| secretAccessKey | Secret key is the password to your account. |
+| secure | Set this value to 'true' to enable secure (HTTPS) access. |
+
+
+```go
+package main
+
+import (
+ "github.com/minio/minio-go"
+ "log"
+)
+
+func main() {
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // Initialize minio client object.
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Printf("%#v\n", minioClient) // minioClient is now setup
+}
+```
+
+## Quick Start Example - File Uploader
+This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
+
+We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
+
+### FileUploader.go
+```go
+package main
+
+import (
+ "github.com/minio/minio-go"
+ "log"
+)
+
+func main() {
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // Initialize minio client object.
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Make a new bucket called mymusic.
+ bucketName := "mymusic"
+ location := "us-east-1"
+
+ err = minioClient.MakeBucket(bucketName, location)
+ if err != nil {
+ // Check to see if we already own this bucket (which happens if you run this twice)
+ exists, err := minioClient.BucketExists(bucketName)
+ if err == nil && exists {
+ log.Printf("We already own %s\n", bucketName)
+ } else {
+ log.Fatalln(err)
+ }
+ }
+ log.Printf("Successfully created %s\n", bucketName)
+
+ // Upload the zip file
+ objectName := "golden-oldies.zip"
+ filePath := "/tmp/golden-oldies.zip"
+ contentType := "application/zip"
+
+ // Upload the zip file with FPutObject
+ n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType})
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
+}
+```
+
+### Run FileUploader
+```sh
+go run file-uploader.go
+2016/08/13 17:03:28 Successfully created mymusic
+2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
+
+mc ls play/mymusic/
+[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
+```
+
+## API Reference
+The full API Reference is available here.
+
+* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference)
+
+### API Reference : Bucket Operations
+* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
+* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
+* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
+* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket)
+* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects)
+* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2)
+* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
+
+### API Reference : Bucket policy Operations
+* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
+* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
+
+### API Reference : Bucket notification Operations
+* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
+* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
+* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
+* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
+
+### API Reference : File Object Operations
+* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
+* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FGetObject)
+* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext)
+* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext)
+
+### API Reference : Object Operations
+* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
+* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
+* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext)
+* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext)
+* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
+* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
+* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
+* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
+* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
+* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
+
+### API Reference : Presigned Operations
+* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
+* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
+* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject)
+* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
+
+### API Reference : Client custom settings
+* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo)
+* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport)
+* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
+* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
+
+## Full Examples
+
+### Full Examples : Bucket Operations
+* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
+* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
+* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
+* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
+* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
+* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
+* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
+
+### Full Examples : Bucket policy Operations
+* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
+* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
+* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
+
+### Full Examples : Bucket lifecycle Operations
+* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go)
+* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go)
+
+### Full Examples : Bucket notification Operations
+* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
+* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
+* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
+* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
+
+### Full Examples : File Object Operations
+* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
+* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
+* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go)
+* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go)
+
+### Full Examples : Object Operations
+* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
+* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
+* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go)
+* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go)
+* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
+* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
+* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
+* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
+* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
+
+### Full Examples : Encrypted Object Operations
+* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
+* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
+* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
+
+### Full Examples : Presigned Operations
+* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
+* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
+* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
+* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
+
+## Explore Further
+* [Complete Documentation](https://docs.minio.io)
+* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
+* [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app)
+
+## Contribute
+[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
+
+[](https://travis-ci.org/minio/minio-go)
+[](https://ci.appveyor.com/project/harshavardhana/minio-go)
+
+## License
+This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](./LICENSE) and [NOTICE](./NOTICE) for more information.
diff --git a/vendor/github.com/minio/minio-go/README_zh_CN.md b/vendor/github.com/minio/minio-go/README_zh_CN.md
new file mode 100755
index 0000000..a5acf19
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/README_zh_CN.md
@@ -0,0 +1,245 @@
+# 适用于与Amazon S3兼容云存储的Minio Go SDK [](https://slack.minio.io) [](https://sourcegraph.com/github.com/minio/minio-go?badge)
+
+Minio Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。
+
+**支持的云存储:**
+
+- AWS Signature Version 4
+ - Amazon S3
+ - Minio
+
+- AWS Signature Version 2
+ - Google Cloud Storage (兼容模式)
+ - Openstack Swift + Swift3 middleware
+ - Ceph Object Gateway
+ - Riak CS
+
+本文我们将学习如何安装Minio client SDK,连接到Minio,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference)。
+
+本文假设你已经有 [Go开发环境](https://docs.minio.io/docs/how-to-install-golang)。
+
+## 从Github下载
+```sh
+go get -u github.com/minio/minio-go
+```
+
+## 初始化Minio Client
+Minio client需要以下4个参数来连接与Amazon S3兼容的对象存储。
+
+| 参数 | 描述|
+| :--- | :--- |
+| endpoint | 对象存储服务的URL |
+| accessKeyID | Access key是唯一标识你的账户的用户ID。 |
+| secretAccessKey | Secret key是你账户的密码。 |
+| secure | true代表使用HTTPS |
+
+
+```go
+package main
+
+import (
+ "github.com/minio/minio-go"
+ "log"
+)
+
+func main() {
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // 初使化 minio client对象。
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Printf("%#v\n", minioClient) // minioClient初使化成功
+}
+```
+
+## 示例-文件上传
+本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。
+
+我们在本示例中使用运行在 [https://play.minio.io:9000](https://play.minio.io:9000) 上的Minio服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。
+
+### FileUploader.go
+```go
+package main
+
+import (
+ "github.com/minio/minio-go"
+ "log"
+)
+
+func main() {
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // 初使化minio client对象。
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // 创建一个叫mymusic的存储桶。
+ bucketName := "mymusic"
+ location := "us-east-1"
+
+ err = minioClient.MakeBucket(bucketName, location)
+ if err != nil {
+ // 检查存储桶是否已经存在。
+ exists, err := minioClient.BucketExists(bucketName)
+ if err == nil && exists {
+ log.Printf("We already own %s\n", bucketName)
+ } else {
+ log.Fatalln(err)
+ }
+ }
+ log.Printf("Successfully created %s\n", bucketName)
+
+ // 上传一个zip文件。
+ objectName := "golden-oldies.zip"
+ filePath := "/tmp/golden-oldies.zip"
+ contentType := "application/zip"
+
+ // 使用FPutObject上传一个zip文件。
+ n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType})
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
+}
+```
+
+### 运行FileUploader
+```sh
+go run file-uploader.go
+2016/08/13 17:03:28 Successfully created mymusic
+2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
+
+mc ls play/mymusic/
+[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
+```
+
+## API文档
+完整的API文档在这里。
+* [完整API文档](https://docs.minio.io/docs/golang-client-api-reference)
+
+### API文档 : 操作存储桶
+* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
+* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
+* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
+* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket)
+* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects)
+* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2)
+* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
+
+### API文档 : 存储桶策略
+* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
+* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
+
+### API文档 : 存储桶通知
+* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
+* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
+* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
+* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
+
+### API文档 : 操作文件对象
+* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
+* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
+* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext)
+* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext)
+
+### API文档 : 操作对象
+* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
+* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
+* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext)
+* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext)
+* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
+* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
+* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
+* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
+* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
+* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
+
+### API文档: 操作加密对象
+* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject)
+* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject)
+
+### API文档 : Presigned操作
+* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
+* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
+* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject)
+* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
+
+### API文档 : 客户端自定义设置
+* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo)
+* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport)
+* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
+* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
+
+## 完整示例
+
+### 完整示例 : 操作存储桶
+* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
+* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
+* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
+* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
+* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
+* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
+* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
+
+### 完整示例 : 存储桶策略
+* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
+* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
+* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
+
+### 完整示例 : 存储桶通知
+* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
+* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
+* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
+* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio扩展)
+
+### 完整示例 : 操作文件对象
+* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
+* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
+* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go)
+* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go)
+
+### 完整示例 : 操作对象
+* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
+* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
+* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go)
+* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go)
+* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
+* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
+* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
+* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
+* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
+
+### 完整示例 : 操作加密对象
+* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
+* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
+* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
+
+### 完整示例 : Presigned操作
+* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
+* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
+* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
+* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
+
+## 了解更多
+* [完整文档](https://docs.minio.io)
+* [Minio Go Client SDK API文档](https://docs.minio.io/docs/golang-client-api-reference)
+* [Go 音乐播放器完整示例](https://docs.minio.io/docs/go-music-player-app)
+
+## 贡献
+[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md)
+
+[](https://travis-ci.org/minio/minio-go)
+[](https://ci.appveyor.com/project/harshavardhana/minio-go)
+
diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go
new file mode 100755
index 0000000..a69122e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-compose-object.go
@@ -0,0 +1,565 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017, 2018 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/pkg/encrypt"
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// DestinationInfo - type with information about the object to be
+// created via server-side copy requests, using the Compose API.
+type DestinationInfo struct {
+ bucket, object string
+ encryption encrypt.ServerSide
+
+ // if no user-metadata is provided, it is copied from source
+ // (when there is only once source object in the compose
+ // request)
+ userMetadata map[string]string
+}
+
+// NewDestinationInfo - creates a compose-object/copy-source
+// destination info object.
+//
+// `encSSEC` is the key info for server-side-encryption with customer
+// provided key. If it is nil, no encryption is performed.
+//
+// `userMeta` is the user-metadata key-value pairs to be set on the
+// destination. The keys are automatically prefixed with `x-amz-meta-`
+// if needed. If nil is passed, and if only a single source (of any
+// size) is provided in the ComposeObject call, then metadata from the
+// source is copied to the destination.
+func NewDestinationInfo(bucket, object string, sse encrypt.ServerSide, userMeta map[string]string) (d DestinationInfo, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucket); err != nil {
+ return d, err
+ }
+ if err = s3utils.CheckValidObjectName(object); err != nil {
+ return d, err
+ }
+
+ // Process custom-metadata to remove a `x-amz-meta-` prefix if
+ // present and validate that keys are distinct (after this
+ // prefix removal).
+ m := make(map[string]string)
+ for k, v := range userMeta {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
+ k = k[len("x-amz-meta-"):]
+ }
+ if _, ok := m[k]; ok {
+ return d, ErrInvalidArgument(fmt.Sprintf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k))
+ }
+ m[k] = v
+ }
+
+ return DestinationInfo{
+ bucket: bucket,
+ object: object,
+ encryption: sse,
+ userMetadata: m,
+ }, nil
+}
+
+// getUserMetaHeadersMap - construct appropriate key-value pairs to send
+// as headers from metadata map to pass into copy-object request. For
+// single part copy-object (i.e. non-multipart object), enable the
+// withCopyDirectiveHeader to set the `x-amz-metadata-directive` to
+// `REPLACE`, so that metadata headers from the source are not copied
+// over.
+func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string {
+ if len(d.userMetadata) == 0 {
+ return nil
+ }
+ r := make(map[string]string)
+ if withCopyDirectiveHeader {
+ r["x-amz-metadata-directive"] = "REPLACE"
+ }
+ for k, v := range d.userMetadata {
+ if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
+ r[k] = v
+ } else {
+ r["x-amz-meta-"+k] = v
+ }
+ }
+ return r
+}
+
+// SourceInfo - represents a source object to be copied, using
+// server-side copying APIs.
+type SourceInfo struct {
+ bucket, object string
+ start, end int64
+ encryption encrypt.ServerSide
+ // Headers to send with the upload-part-copy request involving
+ // this source object.
+ Headers http.Header
+}
+
+// NewSourceInfo - create a compose-object/copy-object source info
+// object.
+//
+// `decryptSSEC` is the decryption key using server-side-encryption
+// with customer provided key. It may be nil if the source is not
+// encrypted.
+func NewSourceInfo(bucket, object string, sse encrypt.ServerSide) SourceInfo {
+ r := SourceInfo{
+ bucket: bucket,
+ object: object,
+ start: -1, // range is unspecified by default
+ encryption: sse,
+ Headers: make(http.Header),
+ }
+
+ // Set the source header
+ r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object))
+ return r
+}
+
+// SetRange - Set the start and end offset of the source object to be
+// copied. If this method is not called, the whole source object is
+// copied.
+func (s *SourceInfo) SetRange(start, end int64) error {
+ if start > end || start < 0 {
+ return ErrInvalidArgument("start must be non-negative, and start must be at most end.")
+ }
+ // Note that 0 <= start <= end
+ s.start, s.end = start, end
+ return nil
+}
+
+// SetMatchETagCond - Set ETag match condition. The object is copied
+// only if the etag of the source matches the value given here.
+func (s *SourceInfo) SetMatchETagCond(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ s.Headers.Set("x-amz-copy-source-if-match", etag)
+ return nil
+}
+
+// SetMatchETagExceptCond - Set the ETag match exception
+// condition. The object is copied only if the etag of the source is
+// not the value given here.
+func (s *SourceInfo) SetMatchETagExceptCond(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ s.Headers.Set("x-amz-copy-source-if-none-match", etag)
+ return nil
+}
+
+// SetModifiedSinceCond - Set the modified since condition.
+func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Input time cannot be 0.")
+ }
+ s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat))
+ return nil
+}
+
+// SetUnmodifiedSinceCond - Set the unmodified since condition.
+func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Input time cannot be 0.")
+ }
+ s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat))
+ return nil
+}
+
+// Helper to fetch size and etag of an object using a StatObject call.
+func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) {
+ // Get object info - need size and etag here. Also, decryption
+ // headers are added to the stat request if given.
+ var objInfo ObjectInfo
+ opts := StatObjectOptions{GetObjectOptions{ServerSideEncryption: encrypt.SSE(s.encryption)}}
+ objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts)
+ if err != nil {
+ err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err))
+ } else {
+ size = objInfo.Size
+ etag = objInfo.ETag
+ userMeta = make(map[string]string)
+ for k, v := range objInfo.Metadata {
+ if strings.HasPrefix(k, "x-amz-meta-") {
+ if len(v) > 0 {
+ userMeta[k] = v[0]
+ }
+ }
+ }
+ }
+ return
+}
+
+// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
+func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
+ metadata map[string]string) (ObjectInfo, error) {
+
+ // Build headers.
+ headers := make(http.Header)
+
+ // Set all the metadata headers.
+ for k, v := range metadata {
+ headers.Set(k, v)
+ }
+
+ // Set the source header
+ headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
+
+ // Send upload-part-copy request
+ resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
+ bucketName: destBucket,
+ objectName: destObject,
+ customHeader: headers,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject)
+ }
+
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+
+ objInfo := ObjectInfo{
+ Key: destObject,
+ ETag: strings.Trim(cpObjRes.ETag, "\""),
+ LastModified: cpObjRes.LastModified,
+ }
+ return objInfo, nil
+}
+
+func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
+ partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) {
+
+ headers := make(http.Header)
+
+ // Set source
+ headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
+
+ if startOffset < 0 {
+ return p, ErrInvalidArgument("startOffset must be non-negative")
+ }
+
+ if length >= 0 {
+ headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
+ }
+
+ for k, v := range metadata {
+ headers.Set(k, v)
+ }
+
+ queryValues := make(url.Values)
+ queryValues.Set("partNumber", strconv.Itoa(partID))
+ queryValues.Set("uploadId", uploadID)
+
+ resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
+ bucketName: destBucket,
+ objectName: destObject,
+ customHeader: headers,
+ queryValues: queryValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return
+ }
+
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return p, httpRespToErrorResponse(resp, destBucket, destObject)
+ }
+
+ // Decode copy-part response on success.
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return p, err
+ }
+ p.PartNumber, p.ETag = partID, cpObjRes.ETag
+ return p, nil
+}
+
+// uploadPartCopy - helper function to create a part in a multipart
+// upload via an upload-part-copy request
+// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
+func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
+ headers http.Header) (p CompletePart, err error) {
+
+ // Build query parameters
+ urlValues := make(url.Values)
+ urlValues.Set("partNumber", strconv.Itoa(partNumber))
+ urlValues.Set("uploadId", uploadID)
+
+ // Send upload-part-copy request
+ resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
+ bucketName: bucket,
+ objectName: object,
+ customHeader: headers,
+ queryValues: urlValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return p, err
+ }
+
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return p, httpRespToErrorResponse(resp, bucket, object)
+ }
+
+ // Decode copy-part response on success.
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return p, err
+ }
+ p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
+ return p, nil
+}
+
+// ComposeObjectWithProgress - creates an object using server-side copying of
+// existing objects. It takes a list of source objects (with optional
+// offsets) and concatenates them into a new object using only
+// server-side copying operations. Optionally takes progress reader hook
+// for applications to look at current progress.
+func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo, progress io.Reader) error {
+ if len(srcs) < 1 || len(srcs) > maxPartsCount {
+ return ErrInvalidArgument("There must be as least one and up to 10000 source objects.")
+ }
+ ctx := context.Background()
+ srcSizes := make([]int64, len(srcs))
+ var totalSize, size, totalParts int64
+ var srcUserMeta map[string]string
+ var etag string
+ var err error
+ for i, src := range srcs {
+ size, etag, srcUserMeta, err = src.getProps(c)
+ if err != nil {
+ return err
+ }
+
+ // Error out if client side encryption is used in this source object when
+ // more than one source objects are given.
+ if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" {
+ return ErrInvalidArgument(
+ fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object))
+ }
+
+ // Check if a segment is specified, and if so, is the
+ // segment within object bounds?
+ if src.start != -1 {
+ // Since range is specified,
+ // 0 <= src.start <= src.end
+ // so only invalid case to check is:
+ if src.end >= size {
+ return ErrInvalidArgument(
+ fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)",
+ i, src.start, src.end, size))
+ }
+ size = src.end - src.start + 1
+ }
+
+ // Only the last source may be less than `absMinPartSize`
+ if size < absMinPartSize && i < len(srcs)-1 {
+ return ErrInvalidArgument(
+ fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size))
+ }
+
+ // Is data to copy too large?
+ totalSize += size
+ if totalSize > maxMultipartPutObjectSize {
+ return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
+ }
+
+ // record source size
+ srcSizes[i] = size
+
+ // calculate parts needed for current source
+ totalParts += partsRequired(size)
+ // Do we need more parts than we are allowed?
+ if totalParts > maxPartsCount {
+ return ErrInvalidArgument(fmt.Sprintf(
+ "Your proposed compose object requires more than %d parts", maxPartsCount))
+ }
+ }
+
+ // Single source object case (i.e. when only one source is
+ // involved, it is being copied wholly and at most 5GiB in
+ // size, emptyfiles are also supported).
+ if (totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize) || (totalSize == 0) {
+ return c.CopyObjectWithProgress(dst, srcs[0], progress)
+ }
+
+ // Now, handle multipart-copy cases.
+
+ // 1. Ensure that the object has not been changed while
+ // we are copying data.
+ for _, src := range srcs {
+ if src.Headers.Get("x-amz-copy-source-if-match") == "" {
+ src.SetMatchETagCond(etag)
+ }
+ }
+
+ // 2. Initiate a new multipart upload.
+
+ // Set user-metadata on the destination object. If no
+ // user-metadata is specified, and there is only one source,
+ // (only) then metadata from source is copied.
+ userMeta := dst.getUserMetaHeadersMap(false)
+ metaMap := userMeta
+ if len(userMeta) == 0 && len(srcs) == 1 {
+ metaMap = srcUserMeta
+ }
+ metaHeaders := make(map[string]string)
+ for k, v := range metaMap {
+ metaHeaders[k] = v
+ }
+
+ uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{ServerSideEncryption: dst.encryption, UserMetadata: metaHeaders})
+ if err != nil {
+ return err
+ }
+
+ // 3. Perform copy part uploads
+ objParts := []CompletePart{}
+ partIndex := 1
+ for i, src := range srcs {
+ h := src.Headers
+ if src.encryption != nil {
+ encrypt.SSECopy(src.encryption).Marshal(h)
+ }
+ // Add destination encryption headers
+ if dst.encryption != nil {
+ dst.encryption.Marshal(h)
+ }
+
+ // calculate start/end indices of parts after
+ // splitting.
+ startIdx, endIdx := calculateEvenSplits(srcSizes[i], src)
+ for j, start := range startIdx {
+ end := endIdx[j]
+
+ // Add (or reset) source range header for
+ // upload part copy request.
+ h.Set("x-amz-copy-source-range",
+ fmt.Sprintf("bytes=%d-%d", start, end))
+
+ // make upload-part-copy request
+ complPart, err := c.uploadPartCopy(ctx, dst.bucket,
+ dst.object, uploadID, partIndex, h)
+ if err != nil {
+ return err
+ }
+ if progress != nil {
+ io.CopyN(ioutil.Discard, progress, end-start+1)
+ }
+ objParts = append(objParts, complPart)
+ partIndex++
+ }
+ }
+
+ // 4. Make final complete-multipart request.
+ _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID,
+ completeMultipartUpload{Parts: objParts})
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// ComposeObject - creates an object using server-side copying of
+// existing objects. It takes a list of source objects (with optional
+// offsets) and concatenates them into a new object using only
+// server-side copying operations.
+func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
+ return c.ComposeObjectWithProgress(dst, srcs, nil)
+}
+
+// partsRequired is maximum parts possible with
+// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1))
+func partsRequired(size int64) int64 {
+ maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1)
+ r := size / int64(maxPartSize)
+ if size%int64(maxPartSize) > 0 {
+ r++
+ }
+ return r
+}
+
+// calculateEvenSplits - computes splits for a source and returns
+// start and end index slices. Splits happen evenly to be sure that no
+// part is less than 5MiB, as that could fail the multipart request if
+// it is not the last part.
+func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) {
+ if size == 0 {
+ return
+ }
+
+ reqParts := partsRequired(size)
+ startIndex = make([]int64, reqParts)
+ endIndex = make([]int64, reqParts)
+ // Compute number of required parts `k`, as:
+ //
+ // k = ceiling(size / copyPartSize)
+ //
+ // Now, distribute the `size` bytes in the source into
+ // k parts as evenly as possible:
+ //
+ // r parts sized (q+1) bytes, and
+ // (k - r) parts sized q bytes, where
+ //
+ // size = q * k + r (by simple division of size by k,
+ // so that 0 <= r < k)
+ //
+ start := src.start
+ if start == -1 {
+ start = 0
+ }
+ quot, rem := size/reqParts, size%reqParts
+ nextStart := start
+ for j := int64(0); j < reqParts; j++ {
+ curPartSize := quot
+ if j < rem {
+ curPartSize++
+ }
+
+ cStart := nextStart
+ cEnd := cStart + curPartSize - 1
+ nextStart = cEnd + 1
+
+ startIndex[j], endIndex[j] = cStart, cEnd
+ }
+ return
+}
diff --git a/vendor/github.com/minio/minio-go/api-datatypes.go b/vendor/github.com/minio/minio-go/api-datatypes.go
new file mode 100755
index 0000000..63fc089
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-datatypes.go
@@ -0,0 +1,84 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net/http"
+ "time"
+)
+
+// BucketInfo container for bucket metadata.
+type BucketInfo struct {
+ // The name of the bucket.
+ Name string `json:"name"`
+ // Date the bucket was created.
+ CreationDate time.Time `json:"creationDate"`
+}
+
+// ObjectInfo container for object metadata.
+type ObjectInfo struct {
+ // An ETag is optionally set to md5sum of an object. In case of multipart objects,
+ // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of
+ // each parts concatenated into one string.
+ ETag string `json:"etag"`
+
+ Key string `json:"name"` // Name of the object
+ LastModified time.Time `json:"lastModified"` // Date and time the object was last modified.
+ Size int64 `json:"size"` // Size in bytes of the object.
+ ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
+
+ // Collection of additional metadata on the object.
+ // eg: x-amz-meta-*, content-encoding etc.
+ Metadata http.Header `json:"metadata" xml:"-"`
+
+ // Owner name.
+ Owner struct {
+ DisplayName string `json:"name"`
+ ID string `json:"id"`
+ } `json:"owner"`
+
+ // The class of storage used to store the object.
+ StorageClass string `json:"storageClass"`
+
+ // Error
+ Err error `json:"-"`
+}
+
+// ObjectMultipartInfo container for multipart object metadata.
+type ObjectMultipartInfo struct {
+ // Date and time at which the multipart upload was initiated.
+ Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ Initiator initiator
+ Owner owner
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass string
+
+ // Key of the object for which the multipart upload was initiated.
+ Key string
+
+ // Size in bytes of the object.
+ Size int64
+
+ // Upload ID that identifies the multipart upload.
+ UploadID string `xml:"UploadId"`
+
+ // Error
+ Err error
+}
diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go
new file mode 100755
index 0000000..655991c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-error-response.go
@@ -0,0 +1,286 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/xml"
+ "fmt"
+ "net/http"
+)
+
+/* **** SAMPLE ERROR RESPONSE ****
+
+
+ AccessDenied
+ Access Denied
+ bucketName
+ objectName
+ F19772218238A85A
+ GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD
+
+*/
+
+// ErrorResponse - Is the typed error returned by all API operations.
+type ErrorResponse struct {
+ XMLName xml.Name `xml:"Error" json:"-"`
+ Code string
+ Message string
+ BucketName string
+ Key string
+ RequestID string `xml:"RequestId"`
+ HostID string `xml:"HostId"`
+
+ // Region where the bucket is located. This header is returned
+ // only in HEAD bucket and ListObjects response.
+ Region string
+
+ // Underlying HTTP status code for the returned error
+ StatusCode int `xml:"-" json:"-"`
+
+ // Headers of the returned S3 XML error
+ Headers http.Header `xml:"-" json:"-"`
+}
+
+// ToErrorResponse - Returns parsed ErrorResponse struct from body and
+// http headers.
+//
+// For example:
+//
+// import s3 "github.com/minio/minio-go"
+// ...
+// ...
+// reader, stat, err := s3.GetObject(...)
+// if err != nil {
+// resp := s3.ToErrorResponse(err)
+// }
+// ...
+func ToErrorResponse(err error) ErrorResponse {
+ switch err := err.(type) {
+ case ErrorResponse:
+ return err
+ default:
+ return ErrorResponse{}
+ }
+}
+
+// Error - Returns S3 error string.
+func (e ErrorResponse) Error() string {
+ if e.Message == "" {
+ msg, ok := s3ErrorResponseMap[e.Code]
+ if !ok {
+ msg = fmt.Sprintf("Error response code %s.", e.Code)
+ }
+ return msg
+ }
+ return e.Message
+}
+
+// Common string for errors to report issue location in unexpected
+// cases.
+const (
+ reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
+)
+
+// httpRespToErrorResponse returns a new encoded ErrorResponse
+// structure as error.
+func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
+ if resp == nil {
+ msg := "Response is empty. " + reportIssue
+ return ErrInvalidArgument(msg)
+ }
+
+ errResp := ErrorResponse{
+ StatusCode: resp.StatusCode,
+ }
+
+ err := xmlDecoder(resp.Body, &errResp)
+ // Xml decoding failed with no body, fall back to HTTP headers.
+ if err != nil {
+ switch resp.StatusCode {
+ case http.StatusNotFound:
+ if objectName == "" {
+ errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Code: "NoSuchBucket",
+ Message: "The specified bucket does not exist.",
+ BucketName: bucketName,
+ }
+ } else {
+ errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Code: "NoSuchKey",
+ Message: "The specified key does not exist.",
+ BucketName: bucketName,
+ Key: objectName,
+ }
+ }
+ case http.StatusForbidden:
+ errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Code: "AccessDenied",
+ Message: "Access Denied.",
+ BucketName: bucketName,
+ Key: objectName,
+ }
+ case http.StatusConflict:
+ errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Code: "Conflict",
+ Message: "Bucket not empty.",
+ BucketName: bucketName,
+ }
+ case http.StatusPreconditionFailed:
+ errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Code: "PreconditionFailed",
+ Message: s3ErrorResponseMap["PreconditionFailed"],
+ BucketName: bucketName,
+ Key: objectName,
+ }
+ default:
+ errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
+ Code: resp.Status,
+ Message: resp.Status,
+ BucketName: bucketName,
+ }
+ }
+ }
+
+ // Save hostID, requestID and region information
+ // from headers if not available through error XML.
+ if errResp.RequestID == "" {
+ errResp.RequestID = resp.Header.Get("x-amz-request-id")
+ }
+ if errResp.HostID == "" {
+ errResp.HostID = resp.Header.Get("x-amz-id-2")
+ }
+ if errResp.Region == "" {
+ errResp.Region = resp.Header.Get("x-amz-bucket-region")
+ }
+ if errResp.Code == "InvalidRegion" && errResp.Region != "" {
+ errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region)
+ }
+
+ // Save headers returned in the API XML error
+ errResp.Headers = resp.Header
+
+ return errResp
+}
+
+// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
+func ErrTransferAccelerationBucket(bucketName string) error {
+ return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidArgument",
+ Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.",
+ BucketName: bucketName,
+ }
+}
+
+// ErrEntityTooLarge - Input size is larger than supported maximum.
+func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
+ msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
+ return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "EntityTooLarge",
+ Message: msg,
+ BucketName: bucketName,
+ Key: objectName,
+ }
+}
+
+// ErrEntityTooSmall - Input size is smaller than supported minimum.
+func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
+ msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize)
+ return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "EntityTooSmall",
+ Message: msg,
+ BucketName: bucketName,
+ Key: objectName,
+ }
+}
+
+// ErrUnexpectedEOF - Unexpected end of file reached.
+func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
+ msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize)
+ return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "UnexpectedEOF",
+ Message: msg,
+ BucketName: bucketName,
+ Key: objectName,
+ }
+}
+
+// ErrInvalidBucketName - Invalid bucket name response.
+func ErrInvalidBucketName(message string) error {
+ return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidBucketName",
+ Message: message,
+ RequestID: "minio",
+ }
+}
+
+// ErrInvalidObjectName - Invalid object name response.
+func ErrInvalidObjectName(message string) error {
+ return ErrorResponse{
+ StatusCode: http.StatusNotFound,
+ Code: "NoSuchKey",
+ Message: message,
+ RequestID: "minio",
+ }
+}
+
+// ErrInvalidObjectPrefix - Invalid object prefix response is
+// similar to object name response.
+var ErrInvalidObjectPrefix = ErrInvalidObjectName
+
+// ErrInvalidArgument - Invalid argument response.
+func ErrInvalidArgument(message string) error {
+ return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidArgument",
+ Message: message,
+ RequestID: "minio",
+ }
+}
+
+// ErrNoSuchBucketPolicy - No Such Bucket Policy response
+// The specified bucket does not have a bucket policy.
+func ErrNoSuchBucketPolicy(message string) error {
+ return ErrorResponse{
+ StatusCode: http.StatusNotFound,
+ Code: "NoSuchBucketPolicy",
+ Message: message,
+ RequestID: "minio",
+ }
+}
+
+// ErrAPINotSupported - API not supported response
+// The specified API call is not supported
+func ErrAPINotSupported(message string) error {
+ return ErrorResponse{
+ StatusCode: http.StatusNotImplemented,
+ Code: "APINotSupported",
+ Message: message,
+ RequestID: "minio",
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/api-get-lifecycle.go b/vendor/github.com/minio/minio-go/api-get-lifecycle.go
new file mode 100755
index 0000000..8097bfc
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-get-lifecycle.go
@@ -0,0 +1,77 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// GetBucketLifecycle - get bucket lifecycle.
+func (c Client) GetBucketLifecycle(bucketName string) (string, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ bucketLifecycle, err := c.getBucketLifecycle(bucketName)
+ if err != nil {
+ errResponse := ToErrorResponse(err)
+ if errResponse.Code == "NoSuchLifecycleConfiguration" {
+ return "", nil
+ }
+ return "", err
+ }
+ return bucketLifecycle, nil
+}
+
+// Request server for current bucket lifecycle.
+func (c Client) getBucketLifecycle(bucketName string) (string, error) {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("lifecycle", "")
+
+ // Execute GET on bucket to get lifecycle.
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return "", err
+ }
+
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return "", httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ bucketLifecycleBuf, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+
+ lifecycle := string(bucketLifecycleBuf)
+ return lifecycle, err
+}
diff --git a/vendor/github.com/minio/minio-go/api-get-object-acl.go b/vendor/github.com/minio/minio-go/api-get-object-acl.go
new file mode 100755
index 0000000..af5544d
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-get-object-acl.go
@@ -0,0 +1,136 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2018 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "net/http"
+ "net/url"
+)
+
+type accessControlPolicy struct {
+ Owner struct {
+ ID string `xml:"ID"`
+ DisplayName string `xml:"DisplayName"`
+ } `xml:"Owner"`
+ AccessControlList struct {
+ Grant []struct {
+ Grantee struct {
+ ID string `xml:"ID"`
+ DisplayName string `xml:"DisplayName"`
+ URI string `xml:"URI"`
+ } `xml:"Grantee"`
+ Permission string `xml:"Permission"`
+ } `xml:"Grant"`
+ } `xml:"AccessControlList"`
+}
+
+//GetObjectACL get object ACLs
+func (c Client) GetObjectACL(bucketName, objectName string) (*ObjectInfo, error) {
+
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: url.Values{
+ "acl": []string{""},
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer closeResponse(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+
+ res := &accessControlPolicy{}
+
+ if err := xmlDecoder(resp.Body, res); err != nil {
+ return nil, err
+ }
+
+ objInfo, err := c.statObject(context.Background(), bucketName, objectName, StatObjectOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ cannedACL := getCannedACL(res)
+ if cannedACL != "" {
+ objInfo.Metadata.Add("X-Amz-Acl", cannedACL)
+ return &objInfo, nil
+ }
+
+ grantACL := getAmzGrantACL(res)
+ for k, v := range grantACL {
+ objInfo.Metadata[k] = v
+ }
+
+ return &objInfo, nil
+}
+
+func getCannedACL(aCPolicy *accessControlPolicy) string {
+ grants := aCPolicy.AccessControlList.Grant
+
+ switch {
+ case len(grants) == 1:
+ if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" {
+ return "private"
+ }
+ case len(grants) == 2:
+ for _, g := range grants {
+ if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
+ return "authenticated-read"
+ }
+ if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
+ return "public-read"
+ }
+ if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID {
+ return "bucket-owner-read"
+ }
+ }
+ case len(grants) == 3:
+ for _, g := range grants {
+ if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
+ return "public-read-write"
+ }
+ }
+ }
+ return ""
+}
+
+func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string {
+ grants := aCPolicy.AccessControlList.Grant
+ res := map[string][]string{}
+
+ for _, g := range grants {
+ switch {
+ case g.Permission == "READ":
+ res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID)
+ case g.Permission == "WRITE":
+ res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID)
+ case g.Permission == "READ_ACP":
+ res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID)
+ case g.Permission == "WRITE_ACP":
+ res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID)
+ case g.Permission == "FULL_CONTROL":
+ res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID)
+ }
+ }
+ return res
+}
diff --git a/vendor/github.com/minio/minio-go/api-get-object-context.go b/vendor/github.com/minio/minio-go/api-get-object-context.go
new file mode 100755
index 0000000..f8dfac7
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-get-object-context.go
@@ -0,0 +1,26 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import "context"
+
+// GetObjectWithContext - returns an seekable, readable object.
+// The options can be used to specify the GET request further.
+func (c Client) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
+ return c.getObjectWithContext(ctx, bucketName, objectName, opts)
+}
diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go
new file mode 100755
index 0000000..a852220
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-get-object-file.go
@@ -0,0 +1,125 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// FGetObjectWithContext - download contents of an object to a local file.
+// The options can be used to specify the GET request further.
+func (c Client) FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
+ return c.fGetObjectWithContext(ctx, bucketName, objectName, filePath, opts)
+}
+
+// FGetObject - download contents of an object to a local file.
+func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error {
+ return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts)
+}
+
+// fGetObjectWithContext - fgetObject wrapper function with context
+func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return err
+ }
+
+ // Verify if destination already exists.
+ st, err := os.Stat(filePath)
+ if err == nil {
+ // If the destination exists and is a directory.
+ if st.IsDir() {
+ return ErrInvalidArgument("fileName is a directory.")
+ }
+ }
+
+ // Proceed if file does not exist. return for all other errors.
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ }
+
+ // Extract top level directory.
+ objectDir, _ := filepath.Split(filePath)
+ if objectDir != "" {
+ // Create any missing top level directories.
+ if err := os.MkdirAll(objectDir, 0700); err != nil {
+ return err
+ }
+ }
+
+ // Gather md5sum.
+ objectStat, err := c.StatObject(bucketName, objectName, StatObjectOptions{opts})
+ if err != nil {
+ return err
+ }
+
+ // Write to a temporary file "fileName.part.minio" before saving.
+ filePartPath := filePath + objectStat.ETag + ".part.minio"
+
+ // If exists, open in append mode. If not create it as a part file.
+ filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
+ if err != nil {
+ return err
+ }
+
+ // Issue Stat to get the current offset.
+ st, err = filePart.Stat()
+ if err != nil {
+ return err
+ }
+
+ // Initialize get object request headers to set the
+ // appropriate range offsets to read from.
+ if st.Size() > 0 {
+ opts.SetRange(st.Size(), 0)
+ }
+
+ // Seek to current position for incoming reader.
+ objectReader, objectStat, err := c.getObject(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return err
+ }
+
+ // Write to the part file.
+ if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil {
+ return err
+ }
+
+ // Close the file before rename, this is specifically needed for Windows users.
+ if err = filePart.Close(); err != nil {
+ return err
+ }
+
+ // Safely completed. Now commit by renaming to actual filename.
+ if err = os.Rename(filePartPath, filePath); err != nil {
+ return err
+ }
+
+ // Return.
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go
new file mode 100755
index 0000000..0bf556e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-get-object.go
@@ -0,0 +1,659 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// GetObject - returns an seekable, readable object.
+func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
+ return c.getObjectWithContext(context.Background(), bucketName, objectName, opts)
+}
+
+// GetObject wrapper function that accepts a request context
+func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
+
+ var httpReader io.ReadCloser
+ var objectInfo ObjectInfo
+ var err error
+
+ // Create request channel.
+ reqCh := make(chan getRequest)
+ // Create response channel.
+ resCh := make(chan getResponse)
+ // Create done channel.
+ doneCh := make(chan struct{})
+
+ // This routine feeds partial object data as and when the caller reads.
+ go func() {
+ defer close(reqCh)
+ defer close(resCh)
+
+ // Used to verify if etag of object has changed since last read.
+ var etag string
+
+ // Loop through the incoming control messages and read data.
+ for {
+ select {
+ // When the done channel is closed exit our routine.
+ case <-doneCh:
+ // Close the http response body before returning.
+ // This ends the connection with the server.
+ if httpReader != nil {
+ httpReader.Close()
+ }
+ return
+
+ // Gather incoming request.
+ case req := <-reqCh:
+ // If this is the first request we may not need to do a getObject request yet.
+ if req.isFirstReq {
+ // First request is a Read/ReadAt.
+ if req.isReadOp {
+ // Differentiate between wanting the whole object and just a range.
+ if req.isReadAt {
+ // If this is a ReadAt request only get the specified range.
+ // Range is set with respect to the offset and length of the buffer requested.
+ // Do not set objectInfo from the first readAt request because it will not get
+ // the whole object.
+ opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
+ } else if req.Offset > 0 {
+ opts.SetRange(req.Offset, 0)
+ }
+ httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts)
+ if err != nil {
+ resCh <- getResponse{Error: err}
+ return
+ }
+ etag = objectInfo.ETag
+ // Read at least firstReq.Buffer bytes, if not we have
+ // reached our EOF.
+ size, err := io.ReadFull(httpReader, req.Buffer)
+ if size > 0 && err == io.ErrUnexpectedEOF {
+ // If an EOF happens after reading some but not
+ // all the bytes ReadFull returns ErrUnexpectedEOF
+ err = io.EOF
+ }
+ // Send back the first response.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ Size: int(size),
+ Error: err,
+ didRead: true,
+ }
+ } else {
+ // First request is a Stat or Seek call.
+ // Only need to run a StatObject until an actual Read or ReadAt request comes through.
+
+ // Remove range header if already set, for stat Operations to get original file size.
+ delete(opts.headers, "Range")
+ objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts})
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ // Exit the go-routine.
+ return
+ }
+ etag = objectInfo.ETag
+ // Send back the first response.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ }
+ }
+ } else if req.settingObjectInfo { // Request is just to get objectInfo.
+ // Remove range header if already set, for stat Operations to get original file size.
+ delete(opts.headers, "Range")
+ if etag != "" {
+ opts.SetMatchETag(etag)
+ }
+ objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts})
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+ // Send back the objectInfo.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ }
+ } else {
+ // Offset changes fetch the new object at an Offset.
+ // Because the httpReader may not be set by the first
+ // request if it was a stat or seek it must be checked
+ // if the object has been read or not to only initialize
+ // new ones when they haven't been already.
+ // All readAt requests are new requests.
+ if req.DidOffsetChange || !req.beenRead {
+ if etag != "" {
+ opts.SetMatchETag(etag)
+ }
+ if httpReader != nil {
+ // Close previously opened http reader.
+ httpReader.Close()
+ }
+ // If this request is a readAt only get the specified range.
+ if req.isReadAt {
+ // Range is set with respect to the offset and length of the buffer requested.
+ opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
+ } else if req.Offset > 0 { // Range is set with respect to the offset.
+ opts.SetRange(req.Offset, 0)
+ }
+ httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts)
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ return
+ }
+ }
+
+ // Read at least req.Buffer bytes, if not we have
+ // reached our EOF.
+ size, err := io.ReadFull(httpReader, req.Buffer)
+ if err == io.ErrUnexpectedEOF {
+ // If an EOF happens after reading some but not
+ // all the bytes ReadFull returns ErrUnexpectedEOF
+ err = io.EOF
+ }
+ // Reply back how much was read.
+ resCh <- getResponse{
+ Size: int(size),
+ Error: err,
+ didRead: true,
+ objectInfo: objectInfo,
+ }
+ }
+ }
+ }
+ }()
+
+ // Create a newObject through the information sent back by reqCh.
+ return newObject(reqCh, resCh, doneCh), nil
+}
+
+// get request message container to communicate with internal
+// go-routine.
+type getRequest struct {
+ Buffer []byte
+ Offset int64 // readAt offset.
+ DidOffsetChange bool // Tracks the offset changes for Seek requests.
+ beenRead bool // Determines if this is the first time an object is being read.
+ isReadAt bool // Determines if this request is a request to a specific range
+ isReadOp bool // Determines if this request is a Read or Read/At request.
+ isFirstReq bool // Determines if this request is the first time an object is being accessed.
+ settingObjectInfo bool // Determines if this request is to set the objectInfo of an object.
+}
+
+// get response message container to reply back for the request.
+type getResponse struct {
+ Size int
+ Error error
+ didRead bool // Lets subsequent calls know whether or not httpReader has been initiated.
+ objectInfo ObjectInfo // Used for the first request.
+}
+
+// Object represents an open object. It implements
+// Reader, ReaderAt, Seeker, Closer for a HTTP stream.
+type Object struct {
+ // Mutex.
+ mutex *sync.Mutex
+
+ // User allocated and defined.
+ reqCh chan<- getRequest
+ resCh <-chan getResponse
+ doneCh chan<- struct{}
+ currOffset int64
+ objectInfo ObjectInfo
+
+ // Ask lower level to initiate data fetching based on currOffset
+ seekData bool
+
+ // Keeps track of closed call.
+ isClosed bool
+
+ // Keeps track of if this is the first call.
+ isStarted bool
+
+ // Previous error saved for future calls.
+ prevErr error
+
+ // Keeps track of if this object has been read yet.
+ beenRead bool
+
+ // Keeps track of if objectInfo has been set yet.
+ objectInfoSet bool
+}
+
+// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
+// Returns back the size of the buffer read, if anything was read, as well
+// as any error encountered. For all first requests sent on the object
+// it is also responsible for sending back the objectInfo.
+func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
+ o.reqCh <- request
+ response := <-o.resCh
+
+ // Return any error to the top level.
+ if response.Error != nil {
+ return response, response.Error
+ }
+
+ // This was the first request.
+ if !o.isStarted {
+ // The object has been operated on.
+ o.isStarted = true
+ }
+ // Set the objectInfo if the request was not readAt
+ // and it hasn't been set before.
+ if !o.objectInfoSet && !request.isReadAt {
+ o.objectInfo = response.objectInfo
+ o.objectInfoSet = true
+ }
+ // Set beenRead only if it has not been set before.
+ if !o.beenRead {
+ o.beenRead = response.didRead
+ }
+ // Data are ready on the wire, no need to reinitiate connection in lower level
+ o.seekData = false
+
+ return response, nil
+}
+
+// setOffset - handles the setting of offsets for
+// Read/ReadAt/Seek requests.
+func (o *Object) setOffset(bytesRead int64) error {
+ // Update the currentOffset.
+ o.currOffset += bytesRead
+
+ if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size {
+ return io.EOF
+ }
+ return nil
+}
+
+// Read reads up to len(b) bytes into b. It returns the number of
+// bytes read (0 <= n <= len(b)) and any error encountered. Returns
+// io.EOF upon end of file.
+func (o *Object) Read(b []byte) (n int, err error) {
+ if o == nil {
+ return 0, ErrInvalidArgument("Object is nil")
+ }
+
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ // prevErr is previous error saved from previous operation.
+ if o.prevErr != nil || o.isClosed {
+ return 0, o.prevErr
+ }
+ // Create a new request.
+ readReq := getRequest{
+ isReadOp: true,
+ beenRead: o.beenRead,
+ Buffer: b,
+ }
+
+ // Alert that this is the first request.
+ if !o.isStarted {
+ readReq.isFirstReq = true
+ }
+
+ // Ask to establish a new data fetch routine based on seekData flag
+ readReq.DidOffsetChange = o.seekData
+ readReq.Offset = o.currOffset
+
+ // Send and receive from the first request.
+ response, err := o.doGetRequest(readReq)
+ if err != nil && err != io.EOF {
+ // Save the error for future calls.
+ o.prevErr = err
+ return response.Size, err
+ }
+
+ // Bytes read.
+ bytesRead := int64(response.Size)
+
+ // Set the new offset.
+ oerr := o.setOffset(bytesRead)
+ if oerr != nil {
+ // Save the error for future calls.
+ o.prevErr = oerr
+ return response.Size, oerr
+ }
+
+ // Return the response.
+ return response.Size, err
+}
+
+// Stat returns the ObjectInfo structure describing Object.
+func (o *Object) Stat() (ObjectInfo, error) {
+ if o == nil {
+ return ObjectInfo{}, ErrInvalidArgument("Object is nil")
+ }
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
+ return ObjectInfo{}, o.prevErr
+ }
+
+ // This is the first request.
+ if !o.isStarted || !o.objectInfoSet {
+ // Send the request and get the response.
+ _, err := o.doGetRequest(getRequest{
+ isFirstReq: !o.isStarted,
+ settingObjectInfo: !o.objectInfoSet,
+ })
+ if err != nil {
+ o.prevErr = err
+ return ObjectInfo{}, err
+ }
+ }
+
+ return o.objectInfo, nil
+}
+
+// ReadAt reads len(b) bytes from the File starting at byte offset
+// off. It returns the number of bytes read and the error, if any.
+// ReadAt always returns a non-nil error when n < len(b). At end of
+// file, that error is io.EOF.
+func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
+ if o == nil {
+ return 0, ErrInvalidArgument("Object is nil")
+ }
+
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ // prevErr is error which was saved in previous operation.
+ if o.prevErr != nil || o.isClosed {
+ return 0, o.prevErr
+ }
+
+ // Can only compare offsets to size when size has been set.
+ if o.objectInfoSet {
+ // If offset is negative than we return io.EOF.
+ // If offset is greater than or equal to object size we return io.EOF.
+ if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 {
+ return 0, io.EOF
+ }
+ }
+
+ // Create the new readAt request.
+ readAtReq := getRequest{
+ isReadOp: true,
+ isReadAt: true,
+ DidOffsetChange: true, // Offset always changes.
+ beenRead: o.beenRead, // Set if this is the first request to try and read.
+ Offset: offset, // Set the offset.
+ Buffer: b,
+ }
+
+ // Alert that this is the first request.
+ if !o.isStarted {
+ readAtReq.isFirstReq = true
+ }
+
+ // Send and receive from the first request.
+ response, err := o.doGetRequest(readAtReq)
+ if err != nil && err != io.EOF {
+ // Save the error.
+ o.prevErr = err
+ return response.Size, err
+ }
+ // Bytes read.
+ bytesRead := int64(response.Size)
+ // There is no valid objectInfo yet
+ // to compare against for EOF.
+ if !o.objectInfoSet {
+ // Update the currentOffset.
+ o.currOffset += bytesRead
+ } else {
+ // If this was not the first request update
+ // the offsets and compare against objectInfo
+ // for EOF.
+ oerr := o.setOffset(bytesRead)
+ if oerr != nil {
+ o.prevErr = oerr
+ return response.Size, oerr
+ }
+ }
+ return response.Size, err
+}
+
+// Seek sets the offset for the next Read or Write to offset,
+// interpreted according to whence: 0 means relative to the
+// origin of the file, 1 means relative to the current offset,
+// and 2 means relative to the end.
+// Seek returns the new offset and an error, if any.
+//
+// Seeking to a negative offset is an error. Seeking to any positive
+// offset is legal, subsequent io operations succeed until the
+// underlying object is not closed.
+func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
+ if o == nil {
+ return 0, ErrInvalidArgument("Object is nil")
+ }
+
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ if o.prevErr != nil {
+ // At EOF seeking is legal allow only io.EOF, for any other errors we return.
+ if o.prevErr != io.EOF {
+ return 0, o.prevErr
+ }
+ }
+
+ // Negative offset is valid for whence of '2'.
+ if offset < 0 && whence != 2 {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence))
+ }
+
+ // This is the first request. So before anything else
+ // get the ObjectInfo.
+ if !o.isStarted || !o.objectInfoSet {
+ // Create the new Seek request.
+ seekReq := getRequest{
+ isReadOp: false,
+ Offset: offset,
+ isFirstReq: true,
+ }
+ // Send and receive from the seek request.
+ _, err := o.doGetRequest(seekReq)
+ if err != nil {
+ // Save the error.
+ o.prevErr = err
+ return 0, err
+ }
+ }
+
+ // Switch through whence.
+ switch whence {
+ default:
+ return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
+ case 0:
+ if o.objectInfo.Size > -1 && offset > o.objectInfo.Size {
+ return 0, io.EOF
+ }
+ o.currOffset = offset
+ case 1:
+ if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size {
+ return 0, io.EOF
+ }
+ o.currOffset += offset
+ case 2:
+ // If we don't know the object size return an error for io.SeekEnd
+ if o.objectInfo.Size < 0 {
+ return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown")
+ }
+ // Seeking to positive offset is valid for whence '2', but
+ // since we are backing a Reader we have reached 'EOF' if
+ // offset is positive.
+ if offset > 0 {
+ return 0, io.EOF
+ }
+ // Seeking to negative position not allowed for whence.
+ if o.objectInfo.Size+offset < 0 {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
+ }
+ o.currOffset = o.objectInfo.Size + offset
+ }
+ // Reset the saved error since we successfully seeked, let the Read
+ // and ReadAt decide.
+ if o.prevErr == io.EOF {
+ o.prevErr = nil
+ }
+
+ // Ask lower level to fetch again from source
+ o.seekData = true
+
+ // Return the effective offset.
+ return o.currOffset, nil
+}
+
+// Close - The behavior of Close after the first call returns error
+// for subsequent Close() calls.
+func (o *Object) Close() (err error) {
+ if o == nil {
+ return ErrInvalidArgument("Object is nil")
+ }
+ // Locking.
+ o.mutex.Lock()
+ defer o.mutex.Unlock()
+
+ // if already closed return an error.
+ if o.isClosed {
+ return o.prevErr
+ }
+
+ // Close successfully.
+ close(o.doneCh)
+
+ // Save for future operations.
+ errMsg := "Object is already closed. Bad file descriptor."
+ o.prevErr = errors.New(errMsg)
+ // Save here that we closed done channel successfully.
+ o.isClosed = true
+ return nil
+}
+
+// newObject instantiates a new *minio.Object*
+// ObjectInfo will be set by setObjectInfo
+func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object {
+ return &Object{
+ mutex: &sync.Mutex{},
+ reqCh: reqCh,
+ resCh: resCh,
+ doneCh: doneCh,
+ }
+}
+
+// getObject - retrieve object from Object Storage.
+//
+// Additionally this function also takes range arguments to download the specified
+// range bytes of an object. Setting offset and length = 0 will download the full object.
+//
+// For more information about the HTTP Range header.
+// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) {
+ // Validate input arguments.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, ObjectInfo{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, ObjectInfo{}, err
+ }
+
+ // Execute GET on objectName.
+ resp, err := c.executeMethod(ctx, "GET", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: opts.Header(),
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ if err != nil {
+ return nil, ObjectInfo{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
+ return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
+ md5sum = strings.TrimSuffix(md5sum, "\"")
+
+ // Parse the date.
+ date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
+ if err != nil {
+ msg := "Last-Modified time format not recognized. " + reportIssue
+ return nil, ObjectInfo{}, ErrorResponse{
+ Code: "InternalError",
+ Message: msg,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ }
+
+ // Get content-type.
+ contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+
+ objectStat := ObjectInfo{
+ ETag: md5sum,
+ Key: objectName,
+ Size: resp.ContentLength,
+ LastModified: date,
+ ContentType: contentType,
+ // Extract only the relevant header keys describing the object.
+ // following function filters out a list of standard set of keys
+ // which are not part of object metadata.
+ Metadata: extractObjMetadata(resp.Header),
+ }
+
+ // do not close body here, caller will close
+ return resp.Body, objectStat, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-get-options.go b/vendor/github.com/minio/minio-go/api-get-options.go
new file mode 100755
index 0000000..a5a8752
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-get-options.go
@@ -0,0 +1,128 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/minio/minio-go/pkg/encrypt"
+)
+
+// GetObjectOptions are used to specify additional headers or options
+// during GET requests.
+type GetObjectOptions struct {
+ headers map[string]string
+ ServerSideEncryption encrypt.ServerSide
+}
+
+// StatObjectOptions are used to specify additional headers or options
+// during GET info/stat requests.
+type StatObjectOptions struct {
+ GetObjectOptions
+}
+
+// Header returns the http.Header representation of the GET options.
+func (o GetObjectOptions) Header() http.Header {
+ headers := make(http.Header, len(o.headers))
+ for k, v := range o.headers {
+ headers.Set(k, v)
+ }
+ if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() != encrypt.S3 {
+ o.ServerSideEncryption.Marshal(headers)
+ }
+ return headers
+}
+
+// Set adds a key value pair to the options. The
+// key-value pair will be part of the HTTP GET request
+// headers.
+func (o *GetObjectOptions) Set(key, value string) {
+ if o.headers == nil {
+ o.headers = make(map[string]string)
+ }
+ o.headers[http.CanonicalHeaderKey(key)] = value
+}
+
+// SetMatchETag - set match etag.
+func (o *GetObjectOptions) SetMatchETag(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ o.Set("If-Match", "\""+etag+"\"")
+ return nil
+}
+
+// SetMatchETagExcept - set match etag except.
+func (o *GetObjectOptions) SetMatchETagExcept(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ o.Set("If-None-Match", "\""+etag+"\"")
+ return nil
+}
+
+// SetUnmodified - set unmodified time since.
+func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Modified since cannot be empty.")
+ }
+ o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat))
+ return nil
+}
+
+// SetModified - set modified time since.
+func (o *GetObjectOptions) SetModified(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Modified since cannot be empty.")
+ }
+ o.Set("If-Modified-Since", modTime.Format(http.TimeFormat))
+ return nil
+}
+
+// SetRange - set the start and end offset of the object to be read.
+// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
+func (o *GetObjectOptions) SetRange(start, end int64) error {
+ switch {
+ case start == 0 && end < 0:
+ // Read last '-end' bytes. `bytes=-N`.
+ o.Set("Range", fmt.Sprintf("bytes=%d", end))
+ case 0 < start && end == 0:
+ // Read everything starting from offset
+ // 'start'. `bytes=N-`.
+ o.Set("Range", fmt.Sprintf("bytes=%d-", start))
+ case 0 <= start && start <= end:
+ // Read everything starting at 'start' till the
+ // 'end'. `bytes=N-M`
+ o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
+ default:
+ // All other cases such as
+ // bytes=-3-
+ // bytes=5-3
+ // bytes=-2-4
+ // bytes=-3-0
+ // bytes=-3--2
+ // are invalid.
+ return ErrInvalidArgument(
+ fmt.Sprintf(
+ "Invalid range specified: start=%d end=%d",
+ start, end))
+ }
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go
new file mode 100755
index 0000000..12d4c59
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-get-policy.go
@@ -0,0 +1,78 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// GetBucketPolicy - get bucket policy at a given path.
+func (c Client) GetBucketPolicy(bucketName string) (string, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ bucketPolicy, err := c.getBucketPolicy(bucketName)
+ if err != nil {
+ errResponse := ToErrorResponse(err)
+ if errResponse.Code == "NoSuchBucketPolicy" {
+ return "", nil
+ }
+ return "", err
+ }
+ return bucketPolicy, nil
+}
+
+// Request server for current bucket policy.
+func (c Client) getBucketPolicy(bucketName string) (string, error) {
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("policy", "")
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return "", err
+ }
+
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return "", httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+
+ policy := string(bucketPolicyBuf)
+ return policy, err
+}
diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go
new file mode 100755
index 0000000..04f7573
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-list.go
@@ -0,0 +1,720 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// ListBuckets list all buckets owned by this authenticated user.
+//
+// This call requires explicit authentication, no anonymous requests are
+// allowed for listing buckets.
+//
+// api := client.New(....)
+// for message := range api.ListBuckets() {
+// fmt.Println(message)
+// }
+//
+func (c Client) ListBuckets() ([]BucketInfo, error) {
+ // Execute GET on service.
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Hex: emptySHA256Hex})
+ defer closeResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return nil, httpRespToErrorResponse(resp, "", "")
+ }
+ }
+ listAllMyBucketsResult := listAllMyBucketsResult{}
+ err = xmlDecoder(resp.Body, &listAllMyBucketsResult)
+ if err != nil {
+ return nil, err
+ }
+ return listAllMyBucketsResult.Buckets.Bucket, nil
+}
+
+/// Bucket Read Operations.
+
+// ListObjectsV2 lists all objects matching the objectPrefix from
+// the specified bucket. If recursion is enabled it would list
+// all subdirectories and all its contents.
+//
+// Your input parameters are just bucketName, objectPrefix, recursive
+// and a done channel for pro-actively closing the internal go
+// routine. If you enable recursive as 'true' this function will
+// return back all the objects in a given bucket name and object
+// prefix.
+//
+// api := client.New(....)
+// // Create a done channel.
+// doneCh := make(chan struct{})
+// defer close(doneCh)
+// // Recursively list all objects in 'mytestbucket'
+// recursive := true
+// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) {
+// fmt.Println(message)
+// }
+//
+func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
+ // Allocate new list objects channel.
+ objectStatCh := make(chan ObjectInfo, 1)
+ // Default listing is delimited at "/"
+ delimiter := "/"
+ if recursive {
+ // If recursive we do not delimit.
+ delimiter = ""
+ }
+
+ // Return object owner information by default
+ fetchOwner := true
+
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ defer close(objectStatCh)
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return objectStatCh
+ }
+
+ // Validate incoming object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
+ defer close(objectStatCh)
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return objectStatCh
+ }
+
+ // Initiate list objects goroutine here.
+ go func(objectStatCh chan<- ObjectInfo) {
+ defer close(objectStatCh)
+ // Save continuationToken for next request.
+ var continuationToken string
+ for {
+ // Get list of objects a maximum of 1000 per request.
+ result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000, "")
+ if err != nil {
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return
+ }
+
+ // If contents are available loop through and send over channel.
+ for _, object := range result.Contents {
+ select {
+ // Send object content.
+ case objectStatCh <- object:
+ // If receives done from the caller, return here.
+ case <-doneCh:
+ return
+ }
+ }
+
+ // Send all common prefixes if any.
+ // NOTE: prefixes are only present if the request is delimited.
+ for _, obj := range result.CommonPrefixes {
+ select {
+ // Send object prefixes.
+ case objectStatCh <- ObjectInfo{
+ Key: obj.Prefix,
+ Size: 0,
+ }:
+ // If receives done from the caller, return here.
+ case <-doneCh:
+ return
+ }
+ }
+
+ // If continuation token present, save it for next request.
+ if result.NextContinuationToken != "" {
+ continuationToken = result.NextContinuationToken
+ }
+
+ // Listing ends result is not truncated, return right here.
+ if !result.IsTruncated {
+ return
+ }
+ }
+ }(objectStatCh)
+ return objectStatCh
+}
+
+// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket.
+//
+// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
+// request parameters :-
+// ---------
+// ?continuation-token - Used to continue iterating over a set of objects
+// ?delimiter - A delimiter is a character you use to group keys.
+// ?prefix - Limits the response to keys that begin with the specified prefix.
+// ?max-keys - Sets the maximum number of keys returned in the response body.
+// ?start-after - Specifies the key to start after when listing objects in a bucket.
+func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) {
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ListBucketV2Result{}, err
+ }
+ // Validate object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
+ return ListBucketV2Result{}, err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+
+ // Always set list-type in ListObjects V2
+ urlValues.Set("list-type", "2")
+
+ // Set object prefix.
+ if objectPrefix != "" {
+ urlValues.Set("prefix", objectPrefix)
+ }
+ // Set continuation token
+ if continuationToken != "" {
+ urlValues.Set("continuation-token", continuationToken)
+ }
+ // Set delimiter.
+ if delimiter != "" {
+ urlValues.Set("delimiter", delimiter)
+ }
+
+ // Fetch owner when listing
+ if fetchOwner {
+ urlValues.Set("fetch-owner", "true")
+ }
+
+ // maxkeys should default to 1000 or less.
+ if maxkeys == 0 || maxkeys > 1000 {
+ maxkeys = 1000
+ }
+ // Set max keys.
+ urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
+
+ // Set start-after
+ if startAfter != "" {
+ urlValues.Set("start-after", startAfter)
+ }
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ListBucketV2Result{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Decode listBuckets XML.
+ listBucketResult := ListBucketV2Result{}
+ if err = xmlDecoder(resp.Body, &listBucketResult); err != nil {
+ return listBucketResult, err
+ }
+
+ // This is an additional verification check to make
+ // sure proper responses are received.
+ if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" {
+ return listBucketResult, errors.New("Truncated response should have continuation token set")
+ }
+
+ // Success.
+ return listBucketResult, nil
+}
+
+// ListObjects - (List Objects) - List some objects or all recursively.
+//
+// ListObjects lists all objects matching the objectPrefix from
+// the specified bucket. If recursion is enabled it would list
+// all subdirectories and all its contents.
+//
+// Your input parameters are just bucketName, objectPrefix, recursive
+// and a done channel for pro-actively closing the internal go
+// routine. If you enable recursive as 'true' this function will
+// return back all the objects in a given bucket name and object
+// prefix.
+//
+// api := client.New(....)
+// // Create a done channel.
+// doneCh := make(chan struct{})
+// defer close(doneCh)
+// // Recurively list all objects in 'mytestbucket'
+// recursive := true
+// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) {
+// fmt.Println(message)
+// }
+//
+func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
+ // Allocate new list objects channel.
+ objectStatCh := make(chan ObjectInfo, 1)
+ // Default listing is delimited at "/"
+ delimiter := "/"
+ if recursive {
+ // If recursive we do not delimit.
+ delimiter = ""
+ }
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ defer close(objectStatCh)
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return objectStatCh
+ }
+ // Validate incoming object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
+ defer close(objectStatCh)
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return objectStatCh
+ }
+
+ // Initiate list objects goroutine here.
+ go func(objectStatCh chan<- ObjectInfo) {
+ defer close(objectStatCh)
+ // Save marker for next request.
+ var marker string
+ for {
+ // Get list of objects a maximum of 1000 per request.
+ result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000)
+ if err != nil {
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return
+ }
+
+ // If contents are available loop through and send over channel.
+ for _, object := range result.Contents {
+ // Save the marker.
+ marker = object.Key
+ select {
+ // Send object content.
+ case objectStatCh <- object:
+ // If receives done from the caller, return here.
+ case <-doneCh:
+ return
+ }
+ }
+
+ // Send all common prefixes if any.
+ // NOTE: prefixes are only present if the request is delimited.
+ for _, obj := range result.CommonPrefixes {
+ object := ObjectInfo{}
+ object.Key = obj.Prefix
+ object.Size = 0
+ select {
+ // Send object prefixes.
+ case objectStatCh <- object:
+ // If receives done from the caller, return here.
+ case <-doneCh:
+ return
+ }
+ }
+
+ // If next marker present, save it for next request.
+ if result.NextMarker != "" {
+ marker = result.NextMarker
+ }
+
+ // Listing ends result is not truncated, return right here.
+ if !result.IsTruncated {
+ return
+ }
+ }
+ }(objectStatCh)
+ return objectStatCh
+}
+
+// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
+//
+// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
+// request parameters :-
+// ---------
+// ?marker - Specifies the key to start with when listing objects in a bucket.
+// ?delimiter - A delimiter is a character you use to group keys.
+// ?prefix - Limits the response to keys that begin with the specified prefix.
+// ?max-keys - Sets the maximum number of keys returned in the response body.
+func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) {
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ListBucketResult{}, err
+ }
+ // Validate object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
+ return ListBucketResult{}, err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ // Set object prefix.
+ if objectPrefix != "" {
+ urlValues.Set("prefix", objectPrefix)
+ }
+ // Set object marker.
+ if objectMarker != "" {
+ urlValues.Set("marker", objectMarker)
+ }
+ // Set delimiter.
+ if delimiter != "" {
+ urlValues.Set("delimiter", delimiter)
+ }
+
+ // maxkeys should default to 1000 or less.
+ if maxkeys == 0 || maxkeys > 1000 {
+ maxkeys = 1000
+ }
+ // Set max keys.
+ urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ListBucketResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ // Decode listBuckets XML.
+ listBucketResult := ListBucketResult{}
+ err = xmlDecoder(resp.Body, &listBucketResult)
+ if err != nil {
+ return listBucketResult, err
+ }
+ return listBucketResult, nil
+}
+
+// ListIncompleteUploads - List incompletely uploaded multipart objects.
+//
+// ListIncompleteUploads lists all incompleted objects matching the
+// objectPrefix from the specified bucket. If recursion is enabled
+// it would list all subdirectories and all its contents.
+//
+// Your input parameters are just bucketName, objectPrefix, recursive
+// and a done channel to pro-actively close the internal go routine.
+// If you enable recursive as 'true' this function will return back all
+// the multipart objects in a given bucket name.
+//
+// api := client.New(....)
+// // Create a done channel.
+// doneCh := make(chan struct{})
+// defer close(doneCh)
+// // Recurively list all objects in 'mytestbucket'
+// recursive := true
+// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) {
+// fmt.Println(message)
+// }
+//
+func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
+ // Turn on size aggregation of individual parts.
+ isAggregateSize := true
+ return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh)
+}
+
+// listIncompleteUploads lists all incomplete uploads.
+func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
+ // Allocate channel for multipart uploads.
+ objectMultipartStatCh := make(chan ObjectMultipartInfo, 1)
+ // Delimiter is set to "/" by default.
+ delimiter := "/"
+ if recursive {
+ // If recursive do not delimit.
+ delimiter = ""
+ }
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ defer close(objectMultipartStatCh)
+ objectMultipartStatCh <- ObjectMultipartInfo{
+ Err: err,
+ }
+ return objectMultipartStatCh
+ }
+ // Validate incoming object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
+ defer close(objectMultipartStatCh)
+ objectMultipartStatCh <- ObjectMultipartInfo{
+ Err: err,
+ }
+ return objectMultipartStatCh
+ }
+ go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
+ defer close(objectMultipartStatCh)
+ // object and upload ID marker for future requests.
+ var objectMarker string
+ var uploadIDMarker string
+ for {
+ // list all multipart uploads.
+ result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000)
+ if err != nil {
+ objectMultipartStatCh <- ObjectMultipartInfo{
+ Err: err,
+ }
+ return
+ }
+ // Save objectMarker and uploadIDMarker for next request.
+ objectMarker = result.NextKeyMarker
+ uploadIDMarker = result.NextUploadIDMarker
+ // Send all multipart uploads.
+ for _, obj := range result.Uploads {
+ // Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
+ if aggregateSize {
+ // Get total multipart size.
+ obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID)
+ if err != nil {
+ objectMultipartStatCh <- ObjectMultipartInfo{
+ Err: err,
+ }
+ continue
+ }
+ }
+ select {
+ // Send individual uploads here.
+ case objectMultipartStatCh <- obj:
+ // If done channel return here.
+ case <-doneCh:
+ return
+ }
+ }
+ // Send all common prefixes if any.
+ // NOTE: prefixes are only present if the request is delimited.
+ for _, obj := range result.CommonPrefixes {
+ object := ObjectMultipartInfo{}
+ object.Key = obj.Prefix
+ object.Size = 0
+ select {
+ // Send delimited prefixes here.
+ case objectMultipartStatCh <- object:
+ // If done channel return here.
+ case <-doneCh:
+ return
+ }
+ }
+ // Listing ends if result not truncated, return right here.
+ if !result.IsTruncated {
+ return
+ }
+ }
+ }(objectMultipartStatCh)
+ // return.
+ return objectMultipartStatCh
+}
+
+// listMultipartUploads - (List Multipart Uploads).
+// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
+//
+// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
+// request parameters. :-
+// ---------
+// ?key-marker - Specifies the multipart upload after which listing should begin.
+// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
+// ?delimiter - A delimiter is a character you use to group keys.
+// ?prefix - Limits the response to keys that begin with the specified prefix.
+// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
+func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) {
+ // Get resources properly escaped and lined up before using them in http request.
+ urlValues := make(url.Values)
+ // Set uploads.
+ urlValues.Set("uploads", "")
+ // Set object key marker.
+ if keyMarker != "" {
+ urlValues.Set("key-marker", keyMarker)
+ }
+ // Set upload id marker.
+ if uploadIDMarker != "" {
+ urlValues.Set("upload-id-marker", uploadIDMarker)
+ }
+ // Set prefix marker.
+ if prefix != "" {
+ urlValues.Set("prefix", prefix)
+ }
+ // Set delimiter.
+ if delimiter != "" {
+ urlValues.Set("delimiter", delimiter)
+ }
+
+ // maxUploads should be 1000 or less.
+ if maxUploads == 0 || maxUploads > 1000 {
+ maxUploads = 1000
+ }
+ // Set max-uploads.
+ urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
+
+ // Execute GET on bucketName to list multipart uploads.
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ListMultipartUploadsResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ // Decode response body.
+ listMultipartUploadsResult := ListMultipartUploadsResult{}
+ err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
+ if err != nil {
+ return listMultipartUploadsResult, err
+ }
+ return listMultipartUploadsResult, nil
+}
+
+// listObjectParts list all object parts recursively.
+func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
+ // Part number marker for the next batch of request.
+ var nextPartNumberMarker int
+ partsInfo = make(map[int]ObjectPart)
+ for {
+ // Get list of uploaded parts a maximum of 1000 per request.
+ listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
+ if err != nil {
+ return nil, err
+ }
+ // Append to parts info.
+ for _, part := range listObjPartsResult.ObjectParts {
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ part.ETag = strings.TrimPrefix(part.ETag, "\"")
+ part.ETag = strings.TrimSuffix(part.ETag, "\"")
+ partsInfo[part.PartNumber] = part
+ }
+ // Keep part number marker, for the next iteration.
+ nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker
+ // Listing ends result is not truncated, return right here.
+ if !listObjPartsResult.IsTruncated {
+ break
+ }
+ }
+
+ // Return all the parts.
+ return partsInfo, nil
+}
+
+// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name.
+func (c Client) findUploadIDs(bucketName, objectName string) ([]string, error) {
+ var uploadIDs []string
+ // Make list incomplete uploads recursive.
+ isRecursive := true
+ // Turn off size aggregation of individual parts, in this request.
+ isAggregateSize := false
+ // Create done channel to cleanup the routine.
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+ // List all incomplete uploads.
+ for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) {
+ if mpUpload.Err != nil {
+ return nil, mpUpload.Err
+ }
+ if objectName == mpUpload.Key {
+ uploadIDs = append(uploadIDs, mpUpload.UploadID)
+ }
+ }
+ // Return the latest upload id.
+ return uploadIDs, nil
+}
+
+// getTotalMultipartSize - calculate total uploaded size for the a given multipart object.
+func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) {
+ // Iterate over all parts and aggregate the size.
+ partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
+ if err != nil {
+ return 0, err
+ }
+ for _, partInfo := range partsInfo {
+ size += partInfo.Size
+ }
+ return size, nil
+}
+
+// listObjectPartsQuery (List Parts query)
+// - lists some or all (up to 1000) parts that have been uploaded
+// for a specific multipart upload
+//
+// You can use the request parameters as selection criteria to return
+// a subset of the uploads in a bucket, request parameters :-
+// ---------
+// ?part-number-marker - Specifies the part after which listing should
+// begin.
+// ?max-parts - Maximum parts to be listed per request.
+func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) {
+ // Get resources properly escaped and lined up before using them in http request.
+ urlValues := make(url.Values)
+ // Set part number marker.
+ urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
+ // Set upload id.
+ urlValues.Set("uploadId", uploadID)
+
+ // maxParts should be 1000 or less.
+ if maxParts == 0 || maxParts > 1000 {
+ maxParts = 1000
+ }
+ // Set max parts.
+ urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
+
+ // Execute GET on objectName to get list of parts.
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ListObjectPartsResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ // Decode list object parts XML.
+ listObjectPartsResult := ListObjectPartsResult{}
+ err = xmlDecoder(resp.Body, &listObjectPartsResult)
+ if err != nil {
+ return listObjectPartsResult, err
+ }
+ return listObjectPartsResult, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go
new file mode 100755
index 0000000..1c01e36
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-notification.go
@@ -0,0 +1,228 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// GetBucketNotification - get bucket notification at a given path.
+func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return BucketNotification{}, err
+ }
+ notification, err := c.getBucketNotification(bucketName)
+ if err != nil {
+ return BucketNotification{}, err
+ }
+ return notification, nil
+}
+
+// Request server for notification rules.
+func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) {
+ urlValues := make(url.Values)
+ urlValues.Set("notification", "")
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return BucketNotification{}, err
+ }
+ return processBucketNotificationResponse(bucketName, resp)
+
+}
+
+// processes the GetNotification http response from the server.
+func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) {
+ if resp.StatusCode != http.StatusOK {
+ errResponse := httpRespToErrorResponse(resp, bucketName, "")
+ return BucketNotification{}, errResponse
+ }
+ var bucketNotification BucketNotification
+ err := xmlDecoder(resp.Body, &bucketNotification)
+ if err != nil {
+ return BucketNotification{}, err
+ }
+ return bucketNotification, nil
+}
+
+// Indentity represents the user id, this is a compliance field.
+type identity struct {
+ PrincipalID string `json:"principalId"`
+}
+
+// Notification event bucket metadata.
+type bucketMeta struct {
+ Name string `json:"name"`
+ OwnerIdentity identity `json:"ownerIdentity"`
+ ARN string `json:"arn"`
+}
+
+// Notification event object metadata.
+type objectMeta struct {
+ Key string `json:"key"`
+ Size int64 `json:"size,omitempty"`
+ ETag string `json:"eTag,omitempty"`
+ VersionID string `json:"versionId,omitempty"`
+ Sequencer string `json:"sequencer"`
+}
+
+// Notification event server specific metadata.
+type eventMeta struct {
+ SchemaVersion string `json:"s3SchemaVersion"`
+ ConfigurationID string `json:"configurationId"`
+ Bucket bucketMeta `json:"bucket"`
+ Object objectMeta `json:"object"`
+}
+
+// sourceInfo represents information on the client that
+// triggered the event notification.
+type sourceInfo struct {
+ Host string `json:"host"`
+ Port string `json:"port"`
+ UserAgent string `json:"userAgent"`
+}
+
+// NotificationEvent represents an Amazon an S3 bucket notification event.
+type NotificationEvent struct {
+ EventVersion string `json:"eventVersion"`
+ EventSource string `json:"eventSource"`
+ AwsRegion string `json:"awsRegion"`
+ EventTime string `json:"eventTime"`
+ EventName string `json:"eventName"`
+ UserIdentity identity `json:"userIdentity"`
+ RequestParameters map[string]string `json:"requestParameters"`
+ ResponseElements map[string]string `json:"responseElements"`
+ S3 eventMeta `json:"s3"`
+ Source sourceInfo `json:"source"`
+}
+
+// NotificationInfo - represents the collection of notification events, additionally
+// also reports errors if any while listening on bucket notifications.
+type NotificationInfo struct {
+ Records []NotificationEvent
+ Err error
+}
+
+// ListenBucketNotification - listen on bucket notifications.
+func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo {
+ notificationInfoCh := make(chan NotificationInfo, 1)
+ // Only success, start a routine to start reading line by line.
+ go func(notificationInfoCh chan<- NotificationInfo) {
+ defer close(notificationInfoCh)
+
+ // Validate the bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ notificationInfoCh <- NotificationInfo{
+ Err: err,
+ }
+ return
+ }
+
+ // Check ARN partition to verify if listening bucket is supported
+ if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) {
+ notificationInfoCh <- NotificationInfo{
+ Err: ErrAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"),
+ }
+ return
+ }
+
+ // Continuously run and listen on bucket notification.
+ // Create a done channel to control 'ListObjects' go routine.
+ retryDoneCh := make(chan struct{}, 1)
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(retryDoneCh)
+
+ // Wait on the jitter retry loop.
+ for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
+ urlValues := make(url.Values)
+ urlValues.Set("prefix", prefix)
+ urlValues.Set("suffix", suffix)
+ urlValues["events"] = events
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ if err != nil {
+ notificationInfoCh <- NotificationInfo{
+ Err: err,
+ }
+ return
+ }
+
+ // Validate http response, upon error return quickly.
+ if resp.StatusCode != http.StatusOK {
+ errResponse := httpRespToErrorResponse(resp, bucketName, "")
+ notificationInfoCh <- NotificationInfo{
+ Err: errResponse,
+ }
+ return
+ }
+
+ // Initialize a new bufio scanner, to read line by line.
+ bio := bufio.NewScanner(resp.Body)
+
+ // Close the response body.
+ defer resp.Body.Close()
+
+ // Unmarshal each line, returns marshalled values.
+ for bio.Scan() {
+ var notificationInfo NotificationInfo
+ if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil {
+ continue
+ }
+ // Send notificationInfo
+ select {
+ case notificationInfoCh <- notificationInfo:
+ case <-doneCh:
+ return
+ }
+ }
+ // Look for any underlying errors.
+ if err = bio.Err(); err != nil {
+ // For an unexpected connection drop from server, we close the body
+ // and re-connect.
+ if err == io.ErrUnexpectedEOF {
+ resp.Body.Close()
+ }
+ }
+ }
+ }(notificationInfoCh)
+
+ // Returns the notification info channel, for caller to start reading from.
+ return notificationInfoCh
+}
diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go
new file mode 100755
index 0000000..a2c0607
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-presigned.go
@@ -0,0 +1,215 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "errors"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/minio/minio-go/pkg/s3signer"
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// presignURL - Returns a presigned URL for an input 'method'.
+// Expires maximum is 7days - ie. 604800 and minimum is 1.
+func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ // Input validation.
+ if method == "" {
+ return nil, ErrInvalidArgument("method cannot be empty.")
+ }
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+ if err = isValidExpiry(expires); err != nil {
+ return nil, err
+ }
+
+ // Convert expires into seconds.
+ expireSeconds := int64(expires / time.Second)
+ reqMetadata := requestMetadata{
+ presignURL: true,
+ bucketName: bucketName,
+ objectName: objectName,
+ expires: expireSeconds,
+ queryValues: reqParams,
+ }
+
+ // Instantiate a new request.
+ // Since expires is set newRequest will presign the request.
+ var req *http.Request
+ if req, err = c.newRequest(method, reqMetadata); err != nil {
+ return nil, err
+ }
+ return req.URL, nil
+}
+
+// PresignedGetObject - Returns a presigned URL to access an object
+// data without credentials. URL can have a maximum expiry of
+// upto 7days or a minimum of 1sec. Additionally you can override
+// a set of response headers using the query parameters.
+func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
+ return c.presignURL("GET", bucketName, objectName, expires, reqParams)
+}
+
+// PresignedHeadObject - Returns a presigned URL to access object
+// metadata without credentials. URL can have a maximum expiry of
+// upto 7days or a minimum of 1sec. Additionally you can override
+// a set of response headers using the query parameters.
+func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
+ return c.presignURL("HEAD", bucketName, objectName, expires, reqParams)
+}
+
+// PresignedPutObject - Returns a presigned URL to upload an object
+// without credentials. URL can have a maximum expiry of upto 7days
+// or a minimum of 1sec.
+func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
+ return c.presignURL("PUT", bucketName, objectName, expires, nil)
+}
+
+// Presign - returns a presigned URL for any http method of your choice
+// along with custom request params. URL can have a maximum expiry of
+// upto 7days or a minimum of 1sec.
+func (c Client) Presign(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ return c.presignURL(method, bucketName, objectName, expires, reqParams)
+}
+
+// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
+func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
+ // Validate input arguments.
+ if p.expiration.IsZero() {
+ return nil, nil, errors.New("Expiration time must be specified")
+ }
+ if _, ok := p.formData["key"]; !ok {
+ return nil, nil, errors.New("object key must be specified")
+ }
+ if _, ok := p.formData["bucket"]; !ok {
+ return nil, nil, errors.New("bucket name must be specified")
+ }
+
+ bucketName := p.formData["bucket"]
+ // Fetch the bucket location.
+ location, err := c.getBucketLocation(bucketName)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName)
+
+ u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get credentials from the configured credentials provider.
+ credValues, err := c.credsProvider.Get()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var (
+ signerType = credValues.SignerType
+ sessionToken = credValues.SessionToken
+ accessKeyID = credValues.AccessKeyID
+ secretAccessKey = credValues.SecretAccessKey
+ )
+
+ if signerType.IsAnonymous() {
+ return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials")
+ }
+
+ // Keep time.
+ t := time.Now().UTC()
+ // For signature version '2' handle here.
+ if signerType.IsV2() {
+ policyBase64 := p.base64()
+ p.formData["policy"] = policyBase64
+ // For Google endpoint set this value to be 'GoogleAccessId'.
+ if s3utils.IsGoogleEndpoint(*c.endpointURL) {
+ p.formData["GoogleAccessId"] = accessKeyID
+ } else {
+ // For all other endpoints set this value to be 'AWSAccessKeyId'.
+ p.formData["AWSAccessKeyId"] = accessKeyID
+ }
+ // Sign the policy.
+ p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey)
+ return u, p.formData, nil
+ }
+
+ // Add date policy.
+ if err = p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-date",
+ value: t.Format(iso8601DateFormat),
+ }); err != nil {
+ return nil, nil, err
+ }
+
+ // Add algorithm policy.
+ if err = p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-algorithm",
+ value: signV4Algorithm,
+ }); err != nil {
+ return nil, nil, err
+ }
+
+ // Add a credential policy.
+ credential := s3signer.GetCredential(accessKeyID, location, t)
+ if err = p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-credential",
+ value: credential,
+ }); err != nil {
+ return nil, nil, err
+ }
+
+ if sessionToken != "" {
+ if err = p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-security-token",
+ value: sessionToken,
+ }); err != nil {
+ return nil, nil, err
+ }
+ }
+
+ // Get base64 encoded policy.
+ policyBase64 := p.base64()
+
+ // Fill in the form data.
+ p.formData["policy"] = policyBase64
+ p.formData["x-amz-algorithm"] = signV4Algorithm
+ p.formData["x-amz-credential"] = credential
+ p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
+ if sessionToken != "" {
+ p.formData["x-amz-security-token"] = sessionToken
+ }
+ p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location)
+ return u, p.formData, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go
new file mode 100755
index 0000000..33dc0cf
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-bucket.go
@@ -0,0 +1,306 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+/// Bucket operations
+
+// MakeBucket creates a new bucket with bucketName.
+//
+// Location is an optional argument, by default all buckets are
+// created in US Standard Region.
+//
+// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
+// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
+func (c Client) MakeBucket(bucketName string, location string) (err error) {
+ defer func() {
+ // Save the location into cache on a successful makeBucket response.
+ if err == nil {
+ c.bucketLocCache.Set(bucketName, location)
+ }
+ }()
+
+ // Validate the input arguments.
+ if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil {
+ return err
+ }
+
+ // If location is empty, treat is a default region 'us-east-1'.
+ if location == "" {
+ location = "us-east-1"
+ // For custom region clients, default
+ // to custom region instead not 'us-east-1'.
+ if c.region != "" {
+ location = c.region
+ }
+ }
+ // PUT bucket request metadata.
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ bucketLocation: location,
+ }
+
+ // If location is not 'us-east-1' create bucket location config.
+ if location != "us-east-1" && location != "" {
+ createBucketConfig := createBucketConfiguration{}
+ createBucketConfig.Location = location
+ var createBucketConfigBytes []byte
+ createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
+ if err != nil {
+ return err
+ }
+ reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes)
+ reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes)
+ reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
+ reqMetadata.contentLength = int64(len(createBucketConfigBytes))
+ }
+
+ // Execute PUT to create a new bucket.
+ resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Success.
+ return nil
+}
+
+// SetBucketPolicy set the access permissions on an existing bucket.
+func (c Client) SetBucketPolicy(bucketName, policy string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // If policy is empty then delete the bucket policy.
+ if policy == "" {
+ return c.removeBucketPolicy(bucketName)
+ }
+
+ // Save the updated policies.
+ return c.putBucketPolicy(bucketName, policy)
+}
+
+// Saves a new bucket policy.
+func (c Client) putBucketPolicy(bucketName, policy string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("policy", "")
+
+ // Content-length is mandatory for put policy request
+ policyReader := strings.NewReader(policy)
+ b, err := ioutil.ReadAll(policyReader)
+ if err != nil {
+ return err
+ }
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: policyReader,
+ contentLength: int64(len(b)),
+ }
+
+ // Execute PUT to upload a new bucket policy.
+ resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+// Removes all policies on a bucket.
+func (c Client) removeBucketPolicy(bucketName string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("policy", "")
+
+ // Execute DELETE on objectName.
+ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// SetBucketLifecycle set the lifecycle on an existing bucket.
+func (c Client) SetBucketLifecycle(bucketName, lifecycle string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // If lifecycle is empty then delete it.
+ if lifecycle == "" {
+ return c.removeBucketLifecycle(bucketName)
+ }
+
+ // Save the updated lifecycle.
+ return c.putBucketLifecycle(bucketName, lifecycle)
+}
+
+// Saves a new bucket lifecycle.
+func (c Client) putBucketLifecycle(bucketName, lifecycle string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("lifecycle", "")
+
+ // Content-length is mandatory for put lifecycle request
+ lifecycleReader := strings.NewReader(lifecycle)
+ b, err := ioutil.ReadAll(lifecycleReader)
+ if err != nil {
+ return err
+ }
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: lifecycleReader,
+ contentLength: int64(len(b)),
+ contentMD5Base64: sumMD5Base64(b),
+ }
+
+ // Execute PUT to upload a new bucket lifecycle.
+ resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+// Remove lifecycle from a bucket.
+func (c Client) removeBucketLifecycle(bucketName string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("lifecycle", "")
+
+ // Execute DELETE on objectName.
+ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// SetBucketNotification saves a new bucket notification.
+func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("notification", "")
+
+ notifBytes, err := xml.Marshal(bucketNotification)
+ if err != nil {
+ return err
+ }
+
+ notifBuffer := bytes.NewReader(notifBytes)
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: notifBuffer,
+ contentLength: int64(len(notifBytes)),
+ contentMD5Base64: sumMD5Base64(notifBytes),
+ contentSHA256Hex: sum256Hex(notifBytes),
+ }
+
+ // Execute PUT to upload a new bucket notification.
+ resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
+func (c Client) RemoveAllBucketNotification(bucketName string) error {
+ return c.SetBucketNotification(bucketName, BucketNotification{})
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go
new file mode 100755
index 0000000..c16c3c6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-common.go
@@ -0,0 +1,111 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io"
+ "math"
+ "os"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// Verify if reader is *minio.Object
+func isObject(reader io.Reader) (ok bool) {
+ _, ok = reader.(*Object)
+ return
+}
+
+// Verify if reader is a generic ReaderAt
+func isReadAt(reader io.Reader) (ok bool) {
+ _, ok = reader.(io.ReaderAt)
+ if ok {
+ var v *os.File
+ v, ok = reader.(*os.File)
+ if ok {
+ // Stdin, Stdout and Stderr all have *os.File type
+ // which happen to also be io.ReaderAt compatible
+ // we need to add special conditions for them to
+ // be ignored by this function.
+ for _, f := range []string{
+ "/dev/stdin",
+ "/dev/stdout",
+ "/dev/stderr",
+ } {
+ if f == v.Name() {
+ ok = false
+ break
+ }
+ }
+ }
+ }
+ return
+}
+
+// optimalPartInfo - calculate the optimal part info for a given
+// object size.
+//
+// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
+// object storage it will have the following parameters as constants.
+//
+// maxPartsCount - 10000
+// minPartSize - 64MiB
+// maxMultipartPutObjectSize - 5TiB
+//
+func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
+ // object size is '-1' set it to 5TiB.
+ if objectSize == -1 {
+ objectSize = maxMultipartPutObjectSize
+ }
+ // object size is larger than supported maximum.
+ if objectSize > maxMultipartPutObjectSize {
+ err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
+ return
+ }
+ // Use floats for part size for all calculations to avoid
+ // overflows during float64 to int64 conversions.
+ partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount))
+ partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize
+ // Total parts count.
+ totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt))
+ // Part size.
+ partSize = int64(partSizeFlt)
+ // Last part size.
+ lastPartSize = objectSize - int64(totalPartsCount-1)*partSize
+ return totalPartsCount, partSize, lastPartSize, nil
+}
+
+// getUploadID - fetch upload id if already present for an object name
+// or initiate a new request to fetch a new upload id.
+func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return "", err
+ }
+
+ // Initiate multipart upload for an object.
+ initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return "", err
+ }
+ return initMultipartUploadResult.UploadID, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-context.go b/vendor/github.com/minio/minio-go/api-put-object-context.go
new file mode 100755
index 0000000..ff4663e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-context.go
@@ -0,0 +1,33 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io"
+)
+
+// PutObjectWithContext - Identical to PutObject call, but accepts context to facilitate request cancellation.
+func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
+ opts PutObjectOptions) (n int64, err error) {
+ err = opts.validate()
+ if err != nil {
+ return 0, err
+ }
+ return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts)
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go
new file mode 100755
index 0000000..21322ef
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go
@@ -0,0 +1,83 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017, 2018 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/minio/minio-go/pkg/encrypt"
+)
+
+// CopyObject - copy a source object into a new object
+func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error {
+ return c.CopyObjectWithProgress(dst, src, nil)
+}
+
+// CopyObjectWithProgress - copy a source object into a new object, optionally takes
+// progress bar input to notify current progress.
+func (c Client) CopyObjectWithProgress(dst DestinationInfo, src SourceInfo, progress io.Reader) error {
+ header := make(http.Header)
+ for k, v := range src.Headers {
+ header[k] = v
+ }
+
+ var err error
+ var size int64
+ // If progress bar is specified, size should be requested as well initiate a StatObject request.
+ if progress != nil {
+ size, _, _, err = src.getProps(c)
+ if err != nil {
+ return err
+ }
+ }
+
+ if src.encryption != nil {
+ encrypt.SSECopy(src.encryption).Marshal(header)
+ }
+
+ if dst.encryption != nil {
+ dst.encryption.Marshal(header)
+ }
+ for k, v := range dst.getUserMetaHeadersMap(true) {
+ header.Set(k, v)
+ }
+
+ resp, err := c.executeMethod(context.Background(), "PUT", requestMetadata{
+ bucketName: dst.bucket,
+ objectName: dst.object,
+ customHeader: header,
+ })
+ if err != nil {
+ return err
+ }
+ defer closeResponse(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, dst.bucket, dst.object)
+ }
+
+ // Update the progress properly after successful copy.
+ if progress != nil {
+ io.CopyN(ioutil.Discard, progress, size)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/api-put-object-file-context.go
new file mode 100755
index 0000000..140a9c0
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-file-context.go
@@ -0,0 +1,64 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "mime"
+ "os"
+ "path/filepath"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation.
+func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Open the referenced file.
+ fileReader, err := os.Open(filePath)
+ // If any error fail quickly here.
+ if err != nil {
+ return 0, err
+ }
+ defer fileReader.Close()
+
+ // Save the file stat.
+ fileStat, err := fileReader.Stat()
+ if err != nil {
+ return 0, err
+ }
+
+ // Save the file size.
+ fileSize := fileStat.Size()
+
+ // Set contentType based on filepath extension if not given or default
+ // value of "application/octet-stream" if the extension has no associated type.
+ if opts.ContentType == "" {
+ if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" {
+ opts.ContentType = "application/octet-stream"
+ }
+ }
+ return c.PutObjectWithContext(ctx, bucketName, objectName, fileReader, fileSize, opts)
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go
new file mode 100755
index 0000000..7c8e051
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-file.go
@@ -0,0 +1,27 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+)
+
+// FPutObject - Create an object in a bucket, with contents from file at filePath
+func (c Client) FPutObject(bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) {
+ return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, opts)
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
new file mode 100755
index 0000000..52dc069
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
@@ -0,0 +1,368 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "runtime/debug"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/minio/minio-go/pkg/encrypt"
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
+ opts PutObjectOptions) (n int64, err error) {
+ n, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts)
+ if err != nil {
+ errResp := ToErrorResponse(err)
+ // Verify if multipart functionality is not available, if not
+ // fall back to single PutObject operation.
+ if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ // Verify if size of reader is greater than '5GiB'.
+ if size > maxSinglePutObjectSize {
+ return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+ // Fall back to uploading as single PutObject operation.
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
+ }
+ }
+ return n, err
+}
+
+func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Total data read and written to server. should be equal to
+ // 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, _, err := optimalPartInfo(-1)
+ if err != nil {
+ return 0, err
+ }
+
+ // Initiate a new multipart upload.
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return 0, err
+ }
+
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Part number always starts with '1'.
+ partNumber := 1
+
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
+ // Create a buffer.
+ buf := make([]byte, partSize)
+ defer debug.FreeOSMemory()
+
+ for partNumber <= totalPartsCount {
+ // Choose hash algorithms to be calculated by hashCopyN,
+ // avoid sha256 with non-v4 signature request or
+ // HTTPS connection.
+ hashAlgos, hashSums := c.hashMaterials()
+
+ length, rErr := io.ReadFull(reader, buf)
+ if rErr == io.EOF {
+ break
+ }
+ if rErr != nil && rErr != io.ErrUnexpectedEOF {
+ return 0, rErr
+ }
+
+ // Calculates hash sums while copying partSize bytes into cw.
+ for k, v := range hashAlgos {
+ v.Write(buf[:length])
+ hashSums[k] = v.Sum(nil)
+ }
+
+ // Update progress reader appropriately to the latest offset
+ // as we read from the source.
+ rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
+
+ // Checksums..
+ var (
+ md5Base64 string
+ sha256Hex string
+ )
+ if hashSums["md5"] != nil {
+ md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"])
+ }
+ if hashSums["sha256"] != nil {
+ sha256Hex = hex.EncodeToString(hashSums["sha256"])
+ }
+
+ // Proceed to upload the part.
+ var objPart ObjectPart
+ objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
+ md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+
+ // Save successfully uploaded size.
+ totalUploadedSize += int64(length)
+
+ // Increment part number.
+ partNumber++
+
+ // For unknown size, Read EOF we break away.
+ // We do not have to upload till totalPartsCount.
+ if rErr == io.EOF {
+ break
+ }
+ }
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ }
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
+
+// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
+func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return initiateMultipartUploadResult{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return initiateMultipartUploadResult{}, err
+ }
+
+ // Initialize url queries.
+ urlValues := make(url.Values)
+ urlValues.Set("uploads", "")
+
+ // Set ContentType header.
+ customHeader := opts.Header()
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ customHeader: customHeader,
+ }
+
+ // Execute POST on an objectName to initiate multipart upload.
+ resp, err := c.executeMethod(ctx, "POST", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return initiateMultipartUploadResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ // Decode xml for new multipart upload.
+ initiateMultipartUploadResult := initiateMultipartUploadResult{}
+ err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
+ if err != nil {
+ return initiateMultipartUploadResult, err
+ }
+ return initiateMultipartUploadResult, nil
+}
+
+// uploadPart - Uploads a part in a multipart upload.
+func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader,
+ partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ObjectPart{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return ObjectPart{}, err
+ }
+ if size > maxPartSize {
+ return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName)
+ }
+ if size <= -1 {
+ return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
+ }
+ if partNumber <= 0 {
+ return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
+ }
+ if uploadID == "" {
+ return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
+ }
+
+ // Get resources properly escaped and lined up before using them in http request.
+ urlValues := make(url.Values)
+ // Set part number.
+ urlValues.Set("partNumber", strconv.Itoa(partNumber))
+ // Set upload id.
+ urlValues.Set("uploadId", uploadID)
+
+ // Set encryption headers, if any.
+ customHeader := make(http.Header)
+ if sse != nil && sse.Type() != encrypt.S3 && sse.Type() != encrypt.KMS {
+ sse.Marshal(customHeader)
+ }
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ customHeader: customHeader,
+ contentBody: reader,
+ contentLength: size,
+ contentMD5Base64: md5Base64,
+ contentSHA256Hex: sha256Hex,
+ }
+
+ // Execute PUT on each part.
+ resp, err := c.executeMethod(ctx, "PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectPart{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+ // Once successfully uploaded, return completed part.
+ objPart := ObjectPart{}
+ objPart.Size = size
+ objPart.PartNumber = partNumber
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
+ objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"")
+ return objPart, nil
+}
+
+// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
+func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
+ complete completeMultipartUpload) (completeMultipartUploadResult, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+
+ // Initialize url queries.
+ urlValues := make(url.Values)
+ urlValues.Set("uploadId", uploadID)
+ // Marshal complete multipart body.
+ completeMultipartUploadBytes, err := xml.Marshal(complete)
+ if err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+
+ // Instantiate all the complete multipart buffer.
+ completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentBody: completeMultipartUploadBuffer,
+ contentLength: int64(len(completeMultipartUploadBytes)),
+ contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
+ }
+
+ // Execute POST to complete multipart upload for an objectName.
+ resp, err := c.executeMethod(ctx, "POST", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ // Read resp.Body into a []bytes to parse for Error response inside the body
+ var b []byte
+ b, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return completeMultipartUploadResult{}, err
+ }
+ // Decode completed multipart upload response on success.
+ completeMultipartUploadResult := completeMultipartUploadResult{}
+ err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult)
+ if err != nil {
+ // xml parsing failure due to presence an ill-formed xml fragment
+ return completeMultipartUploadResult, err
+ } else if completeMultipartUploadResult.Bucket == "" {
+ // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied.
+ // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values
+ // of the members.
+
+ // Decode completed multipart upload response on failure
+ completeMultipartUploadErr := ErrorResponse{}
+ err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr)
+ if err != nil {
+ // xml parsing failure due to presence an ill-formed xml fragment
+ return completeMultipartUploadResult, err
+ }
+ return completeMultipartUploadResult, completeMultipartUploadErr
+ }
+ return completeMultipartUploadResult, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/api-put-object-streaming.go
new file mode 100755
index 0000000..211d1c2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-streaming.go
@@ -0,0 +1,417 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+ "strings"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// putObjectMultipartStream - upload a large object using
+// multipart upload and streaming signature for signing payload.
+// Comprehensive put object operation involving multipart uploads.
+//
+// Following code handles these types of readers.
+//
+// - *minio.Object
+// - Any reader which has a method 'ReadAt()'
+//
+func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
+ reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
+
+ if !isObject(reader) && isReadAt(reader) {
+ // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
+ n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
+ } else {
+ n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts)
+ }
+ if err != nil {
+ errResp := ToErrorResponse(err)
+ // Verify if multipart functionality is not available, if not
+ // fall back to single PutObject operation.
+ if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ // Verify if size of reader is greater than '5GiB'.
+ if size > maxSinglePutObjectSize {
+ return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+ // Fall back to uploading as single PutObject operation.
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
+ }
+ }
+ return n, err
+}
+
+// uploadedPartRes - the response received from a part upload.
+type uploadedPartRes struct {
+ Error error // Any error encountered while uploading the part.
+ PartNum int // Number of the part uploaded.
+ Size int64 // Size of the part uploaded.
+ Part *ObjectPart
+}
+
+type uploadPartReq struct {
+ PartNum int // Number of the part uploaded.
+ Part *ObjectPart // Size of the part uploaded.
+}
+
+// putObjectMultipartFromReadAt - Uploads files bigger than 64MiB.
+// Supports all readers which implements io.ReaderAt interface
+// (ReadAt method).
+//
+// NOTE: This function is meant to be used for all readers which
+// implement io.ReaderAt which allows us for resuming multipart
+// uploads but reading at an offset, which would avoid re-read the
+// data which was already uploaded. Internally this function uses
+// temporary files for staging all the data, these temporary files are
+// cleaned automatically when the caller i.e http client closes the
+// stream after uploading all the contents successfully.
+func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
+ reader io.ReaderAt, size int64, opts PutObjectOptions) (n int64, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
+ if err != nil {
+ return 0, err
+ }
+
+ // Initiate a new multipart upload.
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return 0, err
+ }
+
+ // Aborts the multipart upload in progress, if the
+ // function returns any error, since we do not resume
+ // we should purge the parts which have been uploaded
+ // to relinquish storage space.
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Declare a channel that sends the next part number to be uploaded.
+ // Buffered to 10000 because thats the maximum number of parts allowed
+ // by S3.
+ uploadPartsCh := make(chan uploadPartReq, 10000)
+
+ // Declare a channel that sends back the response of a part upload.
+ // Buffered to 10000 because thats the maximum number of parts allowed
+ // by S3.
+ uploadedPartsCh := make(chan uploadedPartRes, 10000)
+
+ // Used for readability, lastPartNumber is always totalPartsCount.
+ lastPartNumber := totalPartsCount
+
+ // Send each part number to the channel to be processed.
+ for p := 1; p <= totalPartsCount; p++ {
+ uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
+ }
+ close(uploadPartsCh)
+ // Receive each part number from the channel allowing three parallel uploads.
+ for w := 1; w <= opts.getNumThreads(); w++ {
+ go func(partSize int64) {
+ // Each worker will draw from the part channel and upload in parallel.
+ for uploadReq := range uploadPartsCh {
+
+ // If partNumber was not uploaded we calculate the missing
+ // part offset and size. For all other part numbers we
+ // calculate offset based on multiples of partSize.
+ readOffset := int64(uploadReq.PartNum-1) * partSize
+
+ // As a special case if partNumber is lastPartNumber, we
+ // calculate the offset based on the last part size.
+ if uploadReq.PartNum == lastPartNumber {
+ readOffset = (size - lastPartSize)
+ partSize = lastPartSize
+ }
+
+ // Get a section reader on a particular offset.
+ sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
+
+ // Proceed to upload the part.
+ var objPart ObjectPart
+ objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
+ sectionReader, uploadReq.PartNum,
+ "", "", partSize, opts.ServerSideEncryption)
+ if err != nil {
+ uploadedPartsCh <- uploadedPartRes{
+ Size: 0,
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+
+ // Save successfully uploaded part metadata.
+ uploadReq.Part = &objPart
+
+ // Send successful part info through the channel.
+ uploadedPartsCh <- uploadedPartRes{
+ Size: objPart.Size,
+ PartNum: uploadReq.PartNum,
+ Part: uploadReq.Part,
+ Error: nil,
+ }
+ }
+ }(partSize)
+ }
+
+ // Gather the responses as they occur and update any
+ // progress bar.
+ for u := 1; u <= totalPartsCount; u++ {
+ uploadRes := <-uploadedPartsCh
+ if uploadRes.Error != nil {
+ return totalUploadedSize, uploadRes.Error
+ }
+ // Retrieve each uploaded part and store it to be completed.
+ // part, ok := partsInfo[uploadRes.PartNum]
+ part := uploadRes.Part
+ if part == nil {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
+ }
+ // Update the totalUploadedSize.
+ totalUploadedSize += uploadRes.Size
+ // Store the parts to be completed in order.
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
+ }
+
+ // Verify if we uploaded all the data.
+ if totalUploadedSize != size {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
+
+func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketName, objectName string,
+ reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
+ if err != nil {
+ return 0, err
+ }
+ // Initiates a new multipart request
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return 0, err
+ }
+
+ // Aborts the multipart upload if the function returns
+ // any error, since we do not resume we should purge
+ // the parts which have been uploaded to relinquish
+ // storage space.
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
+ // Part number always starts with '1'.
+ var partNumber int
+ for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
+ // Update progress reader appropriately to the latest offset
+ // as we read from the source.
+ hookReader := newHook(reader, opts.Progress)
+
+ // Proceed to upload the part.
+ if partNumber == totalPartsCount {
+ partSize = lastPartSize
+ }
+ var objPart ObjectPart
+ objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
+ io.LimitReader(hookReader, partSize),
+ partNumber, "", "", partSize, opts.ServerSideEncryption)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+
+ // Save successfully uploaded size.
+ totalUploadedSize += partSize
+ }
+
+ // Verify if we uploaded all the data.
+ if size > 0 {
+ if totalUploadedSize != size {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ }
+ }
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ }
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
+
+// putObjectNoChecksum special function used Google Cloud Storage. This special function
+// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
+func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Size -1 is only supported on Google Cloud Storage, we error
+ // out in all other situations.
+ if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) {
+ return 0, ErrEntityTooSmall(size, bucketName, objectName)
+ }
+ if size > 0 {
+ if isReadAt(reader) && !isObject(reader) {
+ seeker, _ := reader.(io.Seeker)
+ offset, err := seeker.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return 0, ErrInvalidArgument(err.Error())
+ }
+ reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size)
+ }
+ }
+
+ // Update progress reader appropriately to the latest offset as we
+ // read from the source.
+ readSeeker := newHook(reader, opts.Progress)
+
+ // This function does not calculate sha256 and md5sum for payload.
+ // Execute put object.
+ st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, "", "", size, opts)
+ if err != nil {
+ return 0, err
+ }
+ if st.Size != size {
+ return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
+ }
+ return size, nil
+}
+
+// putObjectDo - executes the put object http operation.
+// NOTE: You must have WRITE permissions on a bucket to add an object to it.
+func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (ObjectInfo, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ObjectInfo{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return ObjectInfo{}, err
+ }
+ // Set headers.
+ customHeader := opts.Header()
+
+ // Populate request metadata.
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeader,
+ contentBody: reader,
+ contentLength: size,
+ contentMD5Base64: md5Base64,
+ contentSHA256Hex: sha256Hex,
+ }
+
+ // Execute PUT an objectName.
+ resp, err := c.executeMethod(ctx, "PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ var objInfo ObjectInfo
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
+ objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
+ // A success here means data was written to server successfully.
+ objInfo.Size = size
+
+ // Return here.
+ return objInfo, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go
new file mode 100755
index 0000000..0330cd9
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object.go
@@ -0,0 +1,267 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "runtime/debug"
+ "sort"
+
+ "github.com/minio/minio-go/pkg/encrypt"
+ "github.com/minio/minio-go/pkg/s3utils"
+ "golang.org/x/net/http/httpguts"
+)
+
+// PutObjectOptions represents options specified by user for PutObject call
+type PutObjectOptions struct {
+ UserMetadata map[string]string
+ Progress io.Reader
+ ContentType string
+ ContentEncoding string
+ ContentDisposition string
+ ContentLanguage string
+ CacheControl string
+ ServerSideEncryption encrypt.ServerSide
+ NumThreads uint
+ StorageClass string
+ WebsiteRedirectLocation string
+}
+
+// getNumThreads - gets the number of threads to be used in the multipart
+// put object operation
+func (opts PutObjectOptions) getNumThreads() (numThreads int) {
+ if opts.NumThreads > 0 {
+ numThreads = int(opts.NumThreads)
+ } else {
+ numThreads = totalWorkers
+ }
+ return
+}
+
+// Header - constructs the headers from metadata entered by user in
+// PutObjectOptions struct
+func (opts PutObjectOptions) Header() (header http.Header) {
+ header = make(http.Header)
+
+ if opts.ContentType != "" {
+ header["Content-Type"] = []string{opts.ContentType}
+ } else {
+ header["Content-Type"] = []string{"application/octet-stream"}
+ }
+ if opts.ContentEncoding != "" {
+ header["Content-Encoding"] = []string{opts.ContentEncoding}
+ }
+ if opts.ContentDisposition != "" {
+ header["Content-Disposition"] = []string{opts.ContentDisposition}
+ }
+ if opts.ContentLanguage != "" {
+ header["Content-Language"] = []string{opts.ContentLanguage}
+ }
+ if opts.CacheControl != "" {
+ header["Cache-Control"] = []string{opts.CacheControl}
+ }
+ if opts.ServerSideEncryption != nil {
+ opts.ServerSideEncryption.Marshal(header)
+ }
+ if opts.StorageClass != "" {
+ header[amzStorageClass] = []string{opts.StorageClass}
+ }
+ if opts.WebsiteRedirectLocation != "" {
+ header[amzWebsiteRedirectLocation] = []string{opts.WebsiteRedirectLocation}
+ }
+ for k, v := range opts.UserMetadata {
+ if !isAmzHeader(k) && !isStandardHeader(k) && !isStorageClassHeader(k) {
+ header["X-Amz-Meta-"+k] = []string{v}
+ } else {
+ header[k] = []string{v}
+ }
+ }
+ return
+}
+
+// validate() checks if the UserMetadata map has standard headers or and raises an error if so.
+func (opts PutObjectOptions) validate() (err error) {
+ for k, v := range opts.UserMetadata {
+ if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) {
+ return ErrInvalidArgument(k + " unsupported user defined metadata name")
+ }
+ if !httpguts.ValidHeaderFieldValue(v) {
+ return ErrInvalidArgument(v + " unsupported user defined metadata value")
+ }
+ }
+ return nil
+}
+
+// completedParts is a collection of parts sortable by their part numbers.
+// used for sorting the uploaded parts before completing the multipart request.
+type completedParts []CompletePart
+
+func (a completedParts) Len() int { return len(a) }
+func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
+
+// PutObject creates an object in a bucket.
+//
+// You must have WRITE permissions on a bucket to create an object.
+//
+// - For size smaller than 64MiB PutObject automatically does a
+// single atomic Put operation.
+// - For size larger than 64MiB PutObject automatically does a
+// multipart Put operation.
+// - For size input as -1 PutObject does a multipart Put operation
+// until input stream reaches EOF. Maximum object size that can
+// be uploaded through this operation will be 5TiB.
+func (c Client) PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,
+ opts PutObjectOptions) (n int64, err error) {
+ return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, objectSize, opts)
+}
+
+func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
+ // Check for largest object size allowed.
+ if size > int64(maxMultipartPutObjectSize) {
+ return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
+ }
+
+ // NOTE: Streaming signature is not supported by GCS.
+ if s3utils.IsGoogleEndpoint(*c.endpointURL) {
+ // Do not compute MD5 for Google Cloud Storage.
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
+ }
+
+ if c.overrideSignerType.IsV2() {
+ if size >= 0 && size < minPartSize {
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
+ }
+ return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts)
+ }
+ if size < 0 {
+ return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
+ }
+
+ if size < minPartSize {
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
+ }
+ // For all sizes greater than 64MiB do multipart.
+ return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts)
+}
+
+func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Total data read and written to server. should be equal to
+ // 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, _, err := optimalPartInfo(-1)
+ if err != nil {
+ return 0, err
+ }
+ // Initiate a new multipart upload.
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return 0, err
+ }
+
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Part number always starts with '1'.
+ partNumber := 1
+
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
+ // Create a buffer.
+ buf := make([]byte, partSize)
+ defer debug.FreeOSMemory()
+
+ for partNumber <= totalPartsCount {
+ length, rErr := io.ReadFull(reader, buf)
+ if rErr == io.EOF && partNumber > 1 {
+ break
+ }
+ if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF {
+ return 0, rErr
+ }
+ // Update progress reader appropriately to the latest offset
+ // as we read from the source.
+ rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
+
+ // Proceed to upload the part.
+ var objPart ObjectPart
+ objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
+ "", "", int64(length), opts.ServerSideEncryption)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+
+ // Save successfully uploaded size.
+ totalUploadedSize += int64(length)
+
+ // Increment part number.
+ partNumber++
+
+ // For unknown size, Read EOF we break away.
+ // We do not have to upload till totalPartsCount.
+ if rErr == io.EOF {
+ break
+ }
+ }
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ }
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go
new file mode 100755
index 0000000..f33df4d
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-remove.go
@@ -0,0 +1,303 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// RemoveBucket deletes the bucket name.
+//
+// All objects (including all object versions and delete markers).
+// in the bucket must be deleted before successfully attempting this request.
+func (c Client) RemoveBucket(bucketName string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ // Execute DELETE on bucket.
+ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
+ bucketName: bucketName,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Remove the location from cache on a successful delete.
+ c.bucketLocCache.Delete(bucketName)
+
+ return nil
+}
+
+// RemoveObject remove an object from a bucket.
+func (c Client) RemoveObject(bucketName, objectName string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return err
+ }
+ // Execute DELETE on objectName.
+ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ // if some unexpected error happened and max retry is reached, we want to let client know
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ // DeleteObject always responds with http '204' even for
+ // objects which do not exist. So no need to handle them
+ // specifically.
+ return nil
+}
+
+// RemoveObjectError - container of Multi Delete S3 API error
+type RemoveObjectError struct {
+ ObjectName string
+ Err error
+}
+
+// generateRemoveMultiObjects - generate the XML request for remove multi objects request
+func generateRemoveMultiObjectsRequest(objects []string) []byte {
+ rmObjects := []deleteObject{}
+ for _, obj := range objects {
+ rmObjects = append(rmObjects, deleteObject{Key: obj})
+ }
+ xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true})
+ return xmlBytes
+}
+
+// processRemoveMultiObjectsResponse - parse the remove multi objects web service
+// and return the success/failure result status for each object
+func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) {
+ // Parse multi delete XML response
+ rmResult := &deleteMultiObjectsResult{}
+ err := xmlDecoder(body, rmResult)
+ if err != nil {
+ errorCh <- RemoveObjectError{ObjectName: "", Err: err}
+ return
+ }
+
+ // Fill deletion that returned an error.
+ for _, obj := range rmResult.UnDeletedObjects {
+ errorCh <- RemoveObjectError{
+ ObjectName: obj.Key,
+ Err: ErrorResponse{
+ Code: obj.Code,
+ Message: obj.Message,
+ },
+ }
+ }
+}
+
+// RemoveObjectsWithContext - Identical to RemoveObjects call, but accepts context to facilitate request cancellation.
+func (c Client) RemoveObjectsWithContext(ctx context.Context, bucketName string, objectsCh <-chan string) <-chan RemoveObjectError {
+ errorCh := make(chan RemoveObjectError, 1)
+
+ // Validate if bucket name is valid.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ defer close(errorCh)
+ errorCh <- RemoveObjectError{
+ Err: err,
+ }
+ return errorCh
+ }
+ // Validate objects channel to be properly allocated.
+ if objectsCh == nil {
+ defer close(errorCh)
+ errorCh <- RemoveObjectError{
+ Err: ErrInvalidArgument("Objects channel cannot be nil"),
+ }
+ return errorCh
+ }
+
+ // Generate and call MultiDelete S3 requests based on entries received from objectsCh
+ go func(errorCh chan<- RemoveObjectError) {
+ maxEntries := 1000
+ finish := false
+ urlValues := make(url.Values)
+ urlValues.Set("delete", "")
+
+ // Close error channel when Multi delete finishes.
+ defer close(errorCh)
+
+ // Loop over entries by 1000 and call MultiDelete requests
+ for {
+ if finish {
+ break
+ }
+ count := 0
+ var batch []string
+
+ // Try to gather 1000 entries
+ for object := range objectsCh {
+ batch = append(batch, object)
+ if count++; count >= maxEntries {
+ break
+ }
+ }
+ if count == 0 {
+ // Multi Objects Delete API doesn't accept empty object list, quit immediately
+ break
+ }
+ if count < maxEntries {
+ // We didn't have 1000 entries, so this is the last batch
+ finish = true
+ }
+
+ // Generate remove multi objects XML request
+ removeBytes := generateRemoveMultiObjectsRequest(batch)
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod(ctx, "POST", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(removeBytes),
+ contentLength: int64(len(removeBytes)),
+ contentMD5Base64: sumMD5Base64(removeBytes),
+ contentSHA256Hex: sum256Hex(removeBytes),
+ })
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ e := httpRespToErrorResponse(resp, bucketName, "")
+ errorCh <- RemoveObjectError{ObjectName: "", Err: e}
+ }
+ }
+ if err != nil {
+ for _, b := range batch {
+ errorCh <- RemoveObjectError{ObjectName: b, Err: err}
+ }
+ continue
+ }
+
+ // Process multiobjects remove xml response
+ processRemoveMultiObjectsResponse(resp.Body, batch, errorCh)
+
+ closeResponse(resp)
+ }
+ }(errorCh)
+ return errorCh
+}
+
+// RemoveObjects removes multiple objects from a bucket.
+// The list of objects to remove are received from objectsCh.
+// Remove failures are sent back via error channel.
+func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError {
+ return c.RemoveObjectsWithContext(context.Background(), bucketName, objectsCh)
+}
+
+// RemoveIncompleteUpload aborts an partially uploaded object.
+func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return err
+ }
+ // Find multipart upload ids of the object to be aborted.
+ uploadIDs, err := c.findUploadIDs(bucketName, objectName)
+ if err != nil {
+ return err
+ }
+
+ for _, uploadID := range uploadIDs {
+ // abort incomplete multipart upload, based on the upload id passed.
+ err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// abortMultipartUpload aborts a multipart upload for the given
+// uploadID, all previously uploaded parts are deleted.
+func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return err
+ }
+
+ // Initialize url queries.
+ urlValues := make(url.Values)
+ urlValues.Set("uploadId", uploadID)
+
+ // Execute DELETE on multipart upload.
+ resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusNoContent {
+ // Abort has no response body, handle it for any errors.
+ var errorResponse ErrorResponse
+ switch resp.StatusCode {
+ case http.StatusNotFound:
+ // This is needed specifically for abort and it cannot
+ // be converged into default case.
+ errorResponse = ErrorResponse{
+ Code: "NoSuchUpload",
+ Message: "The specified multipart upload does not exist.",
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ default:
+ return httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ return errorResponse
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
new file mode 100755
index 0000000..8d8880c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
@@ -0,0 +1,245 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/xml"
+ "time"
+)
+
+// listAllMyBucketsResult container for listBuckets response.
+type listAllMyBucketsResult struct {
+ // Container for one or more buckets.
+ Buckets struct {
+ Bucket []BucketInfo
+ }
+ Owner owner
+}
+
+// owner container for bucket owner information.
+type owner struct {
+ DisplayName string
+ ID string
+}
+
+// CommonPrefix container for prefix response.
+type CommonPrefix struct {
+ Prefix string
+}
+
+// ListBucketV2Result container for listObjects response version 2.
+type ListBucketV2Result struct {
+ // A response can contain CommonPrefixes only if you have
+ // specified a delimiter.
+ CommonPrefixes []CommonPrefix
+ // Metadata about each object returned.
+ Contents []ObjectInfo
+ Delimiter string
+
+ // Encoding type used to encode object keys in the response.
+ EncodingType string
+
+ // A flag that indicates whether or not ListObjects returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated bool
+ MaxKeys int64
+ Name string
+
+ // Hold the token that will be sent in the next request to fetch the next group of keys
+ NextContinuationToken string
+
+ ContinuationToken string
+ Prefix string
+
+ // FetchOwner and StartAfter are currently not used
+ FetchOwner string
+ StartAfter string
+}
+
+// ListBucketResult container for listObjects response.
+type ListBucketResult struct {
+ // A response can contain CommonPrefixes only if you have
+ // specified a delimiter.
+ CommonPrefixes []CommonPrefix
+ // Metadata about each object returned.
+ Contents []ObjectInfo
+ Delimiter string
+
+ // Encoding type used to encode object keys in the response.
+ EncodingType string
+
+ // A flag that indicates whether or not ListObjects returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated bool
+ Marker string
+ MaxKeys int64
+ Name string
+
+ // When response is truncated (the IsTruncated element value in
+ // the response is true), you can use the key name in this field
+ // as marker in the subsequent request to get next set of objects.
+ // Object storage lists objects in alphabetical order Note: This
+ // element is returned only if you have delimiter request
+ // parameter specified. If response does not include the NextMaker
+ // and it is truncated, you can use the value of the last Key in
+ // the response as the marker in the subsequent request to get the
+ // next set of object keys.
+ NextMarker string
+ Prefix string
+}
+
+// ListMultipartUploadsResult container for ListMultipartUploads response
+type ListMultipartUploadsResult struct {
+ Bucket string
+ KeyMarker string
+ UploadIDMarker string `xml:"UploadIdMarker"`
+ NextKeyMarker string
+ NextUploadIDMarker string `xml:"NextUploadIdMarker"`
+ EncodingType string
+ MaxUploads int64
+ IsTruncated bool
+ Uploads []ObjectMultipartInfo `xml:"Upload"`
+ Prefix string
+ Delimiter string
+ // A response can contain CommonPrefixes only if you specify a delimiter.
+ CommonPrefixes []CommonPrefix
+}
+
+// initiator container for who initiated multipart upload.
+type initiator struct {
+ ID string
+ DisplayName string
+}
+
+// copyObjectResult container for copy object response.
+type copyObjectResult struct {
+ ETag string
+ LastModified time.Time // time string format "2006-01-02T15:04:05.000Z"
+}
+
+// ObjectPart container for particular part of an object.
+type ObjectPart struct {
+ // Part number identifies the part.
+ PartNumber int
+
+ // Date and time the part was uploaded.
+ LastModified time.Time
+
+ // Entity tag returned when the part was uploaded, usually md5sum
+ // of the part.
+ ETag string
+
+ // Size of the uploaded part data.
+ Size int64
+}
+
+// ListObjectPartsResult container for ListObjectParts response.
+type ListObjectPartsResult struct {
+ Bucket string
+ Key string
+ UploadID string `xml:"UploadId"`
+
+ Initiator initiator
+ Owner owner
+
+ StorageClass string
+ PartNumberMarker int
+ NextPartNumberMarker int
+ MaxParts int
+
+ // Indicates whether the returned list of parts is truncated.
+ IsTruncated bool
+ ObjectParts []ObjectPart `xml:"Part"`
+
+ EncodingType string
+}
+
+// initiateMultipartUploadResult container for InitiateMultiPartUpload
+// response.
+type initiateMultipartUploadResult struct {
+ Bucket string
+ Key string
+ UploadID string `xml:"UploadId"`
+}
+
+// completeMultipartUploadResult container for completed multipart
+// upload response.
+type completeMultipartUploadResult struct {
+ Location string
+ Bucket string
+ Key string
+ ETag string
+}
+
+// CompletePart sub container lists individual part numbers and their
+// md5sum, part of completeMultipartUpload.
+type CompletePart struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
+
+ // Part number identifies the part.
+ PartNumber int
+ ETag string
+}
+
+// completeMultipartUpload container for completing multipart upload.
+type completeMultipartUpload struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
+ Parts []CompletePart `xml:"Part"`
+}
+
+// createBucketConfiguration container for bucket configuration.
+type createBucketConfiguration struct {
+ XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
+ Location string `xml:"LocationConstraint"`
+}
+
+// deleteObject container for Delete element in MultiObjects Delete XML request
+type deleteObject struct {
+ Key string
+ VersionID string `xml:"VersionId,omitempty"`
+}
+
+// deletedObject container for Deleted element in MultiObjects Delete XML response
+type deletedObject struct {
+ Key string
+ VersionID string `xml:"VersionId,omitempty"`
+ // These fields are ignored.
+ DeleteMarker bool
+ DeleteMarkerVersionID string
+}
+
+// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
+type nonDeletedObject struct {
+ Key string
+ Code string
+ Message string
+}
+
+// deletedMultiObjects container for MultiObjects Delete XML request
+type deleteMultiObjects struct {
+ XMLName xml.Name `xml:"Delete"`
+ Quiet bool
+ Objects []deleteObject `xml:"Object"`
+}
+
+// deletedMultiObjectsResult container for MultiObjects Delete XML response
+type deleteMultiObjectsResult struct {
+ XMLName xml.Name `xml:"DeleteResult"`
+ DeletedObjects []deletedObject `xml:"Deleted"`
+ UnDeletedObjects []nonDeletedObject `xml:"Error"`
+}
diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go
new file mode 100755
index 0000000..3b054c3
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-stat.go
@@ -0,0 +1,181 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// BucketExists verify if bucket exists and you have permission to access it.
+func (c Client) BucketExists(bucketName string) (bool, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return false, err
+ }
+
+ // Execute HEAD on bucketName.
+ resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{
+ bucketName: bucketName,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ if ToErrorResponse(err).Code == "NoSuchBucket" {
+ return false, nil
+ }
+ return false, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return false, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return true, nil
+}
+
+// List of header keys to be filtered, usually
+// from all S3 API http responses.
+var defaultFilterKeys = []string{
+ "Connection",
+ "Transfer-Encoding",
+ "Accept-Ranges",
+ "Date",
+ "Server",
+ "Vary",
+ "x-amz-bucket-region",
+ "x-amz-request-id",
+ "x-amz-id-2",
+ "Content-Security-Policy",
+ "X-Xss-Protection",
+
+ // Add new headers to be ignored.
+}
+
+// Extract only necessary metadata header key/values by
+// filtering them out with a list of custom header keys.
+func extractObjMetadata(header http.Header) http.Header {
+ filterKeys := append([]string{
+ "ETag",
+ "Content-Length",
+ "Last-Modified",
+ "Content-Type",
+ }, defaultFilterKeys...)
+ return filterHeader(header, filterKeys)
+}
+
+// StatObject verifies if object exists and you have permission to access.
+func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ObjectInfo{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return ObjectInfo{}, err
+ }
+ return c.statObject(context.Background(), bucketName, objectName, opts)
+}
+
+// Lower level API for statObject supporting pre-conditions and range headers.
+func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ObjectInfo{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return ObjectInfo{}, err
+ }
+
+ // Execute HEAD on objectName.
+ resp, err := c.executeMethod(ctx, "HEAD", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ contentSHA256Hex: emptySHA256Hex,
+ customHeader: opts.Header(),
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
+ return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
+ md5sum = strings.TrimSuffix(md5sum, "\"")
+
+ // Parse content length is exists
+ var size int64 = -1
+ contentLengthStr := resp.Header.Get("Content-Length")
+ if contentLengthStr != "" {
+ size, err = strconv.ParseInt(contentLengthStr, 10, 64)
+ if err != nil {
+ // Content-Length is not valid
+ return ObjectInfo{}, ErrorResponse{
+ Code: "InternalError",
+ Message: "Content-Length is invalid. " + reportIssue,
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ }
+ }
+
+ // Parse Last-Modified has http time format.
+ date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
+ if err != nil {
+ return ObjectInfo{}, ErrorResponse{
+ Code: "InternalError",
+ Message: "Last-Modified time format is invalid. " + reportIssue,
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ }
+
+ // Fetch content type if any present.
+ contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+
+ // Save object metadata info.
+ return ObjectInfo{
+ ETag: md5sum,
+ Key: objectName,
+ Size: size,
+ LastModified: date,
+ ContentType: contentType,
+ // Extract only the relevant header keys describing the object.
+ // following function filters out a list of standard set of keys
+ // which are not part of object metadata.
+ Metadata: extractObjMetadata(resp.Header),
+ }, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go
new file mode 100755
index 0000000..d8c47aa
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api.go
@@ -0,0 +1,900 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2018 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "crypto/md5"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/minio/minio-go/pkg/credentials"
+ "github.com/minio/minio-go/pkg/s3signer"
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// Client implements Amazon S3 compatible methods.
+type Client struct {
+ /// Standard options.
+
+ // Parsed endpoint url provided by the user.
+ endpointURL *url.URL
+
+ // Holds various credential providers.
+ credsProvider *credentials.Credentials
+
+ // Custom signerType value overrides all credentials.
+ overrideSignerType credentials.SignatureType
+
+ // User supplied.
+ appInfo struct {
+ appName string
+ appVersion string
+ }
+
+ // Indicate whether we are using https or not
+ secure bool
+
+ // Needs allocation.
+ httpClient *http.Client
+ bucketLocCache *bucketLocationCache
+
+ // Advanced functionality.
+ isTraceEnabled bool
+ traceOutput io.Writer
+
+ // S3 specific accelerated endpoint.
+ s3AccelerateEndpoint string
+
+ // Region endpoint
+ region string
+
+ // Random seed.
+ random *rand.Rand
+
+ // lookup indicates type of url lookup supported by server. If not specified,
+ // default to Auto.
+ lookup BucketLookupType
+}
+
+// Options for New method
+type Options struct {
+ Creds *credentials.Credentials
+ Secure bool
+ Region string
+ BucketLookup BucketLookupType
+ // Add future fields here
+}
+
+// Global constants.
+const (
+ libraryName = "minio-go"
+ libraryVersion = "v6.0.6"
+)
+
+// User Agent should always following the below style.
+// Please open an issue to discuss any new changes here.
+//
+// Minio (OS; ARCH) LIB/VER APP/VER
+const (
+ libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
+ libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
+)
+
+// BucketLookupType is type of url lookup supported by server.
+type BucketLookupType int
+
+// Different types of url lookup supported by the server.Initialized to BucketLookupAuto
+const (
+ BucketLookupAuto BucketLookupType = iota
+ BucketLookupDNS
+ BucketLookupPath
+)
+
+// NewV2 - instantiate minio client with Amazon S3 signature version
+// '2' compatibility.
+func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
+ creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "")
+ clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto)
+ if err != nil {
+ return nil, err
+ }
+ clnt.overrideSignerType = credentials.SignatureV2
+ return clnt, nil
+}
+
+// NewV4 - instantiate minio client with Amazon S3 signature version
+// '4' compatibility.
+func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
+ creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
+ clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto)
+ if err != nil {
+ return nil, err
+ }
+ clnt.overrideSignerType = credentials.SignatureV4
+ return clnt, nil
+}
+
+// New - instantiate minio client, adds automatic verification of signature.
+func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
+ creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
+ clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto)
+ if err != nil {
+ return nil, err
+ }
+ // Google cloud storage should be set to signature V2, force it if not.
+ if s3utils.IsGoogleEndpoint(*clnt.endpointURL) {
+ clnt.overrideSignerType = credentials.SignatureV2
+ }
+ // If Amazon S3 set to signature v4.
+ if s3utils.IsAmazonEndpoint(*clnt.endpointURL) {
+ clnt.overrideSignerType = credentials.SignatureV4
+ }
+ return clnt, nil
+}
+
+// NewWithCredentials - instantiate minio client with credentials provider
+// for retrieving credentials from various credentials provider such as
+// IAM, File, Env etc.
+func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
+ return privateNew(endpoint, creds, secure, region, BucketLookupAuto)
+}
+
+// NewWithRegion - instantiate minio client, with region configured. Unlike New(),
+// NewWithRegion avoids bucket-location lookup operations and it is slightly faster.
+// Use this function when if your application deals with single region.
+func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) {
+ creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
+ return privateNew(endpoint, creds, secure, region, BucketLookupAuto)
+}
+
+// NewWithOptions - instantiate minio client with options
+func NewWithOptions(endpoint string, opts *Options) (*Client, error) {
+ return privateNew(endpoint, opts.Creds, opts.Secure, opts.Region, opts.BucketLookup)
+}
+
+// lockedRandSource provides protected rand source, implements rand.Source interface.
+type lockedRandSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
+func (r *lockedRandSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+// Seed uses the provided seed value to initialize the generator to a
+// deterministic state.
+func (r *lockedRandSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
+
+// Redirect requests by re signing the request.
+func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error {
+ if len(via) >= 5 {
+ return errors.New("stopped after 5 redirects")
+ }
+ if len(via) == 0 {
+ return nil
+ }
+ lastRequest := via[len(via)-1]
+ var reAuth bool
+ for attr, val := range lastRequest.Header {
+ // if hosts do not match do not copy Authorization header
+ if attr == "Authorization" && req.Host != lastRequest.Host {
+ reAuth = true
+ continue
+ }
+ if _, ok := req.Header[attr]; !ok {
+ req.Header[attr] = val
+ }
+ }
+
+ *c.endpointURL = *req.URL
+
+ value, err := c.credsProvider.Get()
+ if err != nil {
+ return err
+ }
+ var (
+ signerType = value.SignerType
+ accessKeyID = value.AccessKeyID
+ secretAccessKey = value.SecretAccessKey
+ sessionToken = value.SessionToken
+ region = c.region
+ )
+
+ // Custom signer set then override the behavior.
+ if c.overrideSignerType != credentials.SignatureDefault {
+ signerType = c.overrideSignerType
+ }
+
+ // If signerType returned by credentials helper is anonymous,
+ // then do not sign regardless of signerType override.
+ if value.SignerType == credentials.SignatureAnonymous {
+ signerType = credentials.SignatureAnonymous
+ }
+
+ if reAuth {
+ // Check if there is no region override, if not get it from the URL if possible.
+ if region == "" {
+ region = s3utils.GetRegionFromURL(*c.endpointURL)
+ }
+ switch {
+ case signerType.IsV2():
+ return errors.New("signature V2 cannot support redirection")
+ case signerType.IsV4():
+ req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region))
+ }
+ }
+ return nil
+}
+
+func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string, lookup BucketLookupType) (*Client, error) {
+ // construct endpoint.
+ endpointURL, err := getEndpointURL(endpoint, secure)
+ if err != nil {
+ return nil, err
+ }
+
+ // instantiate new Client.
+ clnt := new(Client)
+
+ // Save the credentials.
+ clnt.credsProvider = creds
+
+ // Remember whether we are using https or not
+ clnt.secure = secure
+
+ // Save endpoint URL, user agent for future uses.
+ clnt.endpointURL = endpointURL
+
+ // Instantiate http client and bucket location cache.
+ clnt.httpClient = &http.Client{
+ Transport: DefaultTransport,
+ CheckRedirect: clnt.redirectHeaders,
+ }
+
+ // Sets custom region, if region is empty bucket location cache is used automatically.
+ if region == "" {
+ region = s3utils.GetRegionFromURL(*clnt.endpointURL)
+ }
+ clnt.region = region
+
+ // Instantiate bucket location cache.
+ clnt.bucketLocCache = newBucketLocationCache()
+
+ // Introduce a new locked random seed.
+ clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
+
+ // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
+ // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
+ clnt.lookup = lookup
+ // Return.
+ return clnt, nil
+}
+
+// SetAppInfo - add application details to user agent.
+func (c *Client) SetAppInfo(appName string, appVersion string) {
+ // if app name and version not set, we do not set a new user agent.
+ if appName != "" && appVersion != "" {
+ c.appInfo = struct {
+ appName string
+ appVersion string
+ }{}
+ c.appInfo.appName = appName
+ c.appInfo.appVersion = appVersion
+ }
+}
+
+// SetCustomTransport - set new custom transport.
+func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
+ // Set this to override default transport
+ // ``http.DefaultTransport``.
+ //
+ // This transport is usually needed for debugging OR to add your
+ // own custom TLS certificates on the client transport, for custom
+ // CA's and certs which are not part of standard certificate
+ // authority follow this example :-
+ //
+ // tr := &http.Transport{
+ // TLSClientConfig: &tls.Config{RootCAs: pool},
+ // DisableCompression: true,
+ // }
+ // api.SetCustomTransport(tr)
+ //
+ if c.httpClient != nil {
+ c.httpClient.Transport = customHTTPTransport
+ }
+}
+
+// TraceOn - enable HTTP tracing.
+func (c *Client) TraceOn(outputStream io.Writer) {
+ // if outputStream is nil then default to os.Stdout.
+ if outputStream == nil {
+ outputStream = os.Stdout
+ }
+ // Sets a new output stream.
+ c.traceOutput = outputStream
+
+ // Enable tracing.
+ c.isTraceEnabled = true
+}
+
+// TraceOff - disable HTTP tracing.
+func (c *Client) TraceOff() {
+ // Disable tracing.
+ c.isTraceEnabled = false
+}
+
+// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
+// requests. This feature is only specific to S3 for all other endpoints this
+// function does nothing. To read further details on s3 transfer acceleration
+// please vist -
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
+ if s3utils.IsAmazonEndpoint(*c.endpointURL) {
+ c.s3AccelerateEndpoint = accelerateEndpoint
+ }
+}
+
+// Hash materials provides relevant initialized hash algo writers
+// based on the expected signature type.
+//
+// - For signature v4 request if the connection is insecure compute only sha256.
+// - For signature v4 request if the connection is secure compute only md5.
+// - For anonymous request compute md5.
+func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) {
+ hashSums = make(map[string][]byte)
+ hashAlgos = make(map[string]hash.Hash)
+ if c.overrideSignerType.IsV4() {
+ if c.secure {
+ hashAlgos["md5"] = md5.New()
+ } else {
+ hashAlgos["sha256"] = sha256.New()
+ }
+ } else {
+ if c.overrideSignerType.IsAnonymous() {
+ hashAlgos["md5"] = md5.New()
+ }
+ }
+ return hashAlgos, hashSums
+}
+
+// requestMetadata - is container for all the values to make a request.
+type requestMetadata struct {
+ // If set newRequest presigns the URL.
+ presignURL bool
+
+ // User supplied.
+ bucketName string
+ objectName string
+ queryValues url.Values
+ customHeader http.Header
+ expires int64
+
+ // Generated by our internal code.
+ bucketLocation string
+ contentBody io.Reader
+ contentLength int64
+ contentMD5Base64 string // carries base64 encoded md5sum
+ contentSHA256Hex string // carries hex encoded sha256sum
+}
+
+// dumpHTTP - dump HTTP request and response.
+func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
+ // Starts http dump.
+ _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
+ if err != nil {
+ return err
+ }
+
+ // Filter out Signature field from Authorization header.
+ origAuth := req.Header.Get("Authorization")
+ if origAuth != "" {
+ req.Header.Set("Authorization", redactSignature(origAuth))
+ }
+
+ // Only display request header.
+ reqTrace, err := httputil.DumpRequestOut(req, false)
+ if err != nil {
+ return err
+ }
+
+ // Write request to trace output.
+ _, err = fmt.Fprint(c.traceOutput, string(reqTrace))
+ if err != nil {
+ return err
+ }
+
+ // Only display response header.
+ var respTrace []byte
+
+ // For errors we make sure to dump response body as well.
+ if resp.StatusCode != http.StatusOK &&
+ resp.StatusCode != http.StatusPartialContent &&
+ resp.StatusCode != http.StatusNoContent {
+ respTrace, err = httputil.DumpResponse(resp, true)
+ if err != nil {
+ return err
+ }
+ } else {
+ // WORKAROUND for https://github.com/golang/go/issues/13942.
+ // httputil.DumpResponse does not print response headers for
+ // all successful calls which have response ContentLength set
+ // to zero. Keep this workaround until the above bug is fixed.
+ if resp.ContentLength == 0 {
+ var buffer bytes.Buffer
+ if err = resp.Header.Write(&buffer); err != nil {
+ return err
+ }
+ respTrace = buffer.Bytes()
+ respTrace = append(respTrace, []byte("\r\n")...)
+ } else {
+ respTrace, err = httputil.DumpResponse(resp, false)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ // Write response to trace output.
+ _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
+ if err != nil {
+ return err
+ }
+
+ // Ends the http dump.
+ _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
+ if err != nil {
+ return err
+ }
+
+ // Returns success.
+ return nil
+}
+
+// do - execute http request.
+func (c Client) do(req *http.Request) (*http.Response, error) {
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ // Handle this specifically for now until future Golang versions fix this issue properly.
+ if urlErr, ok := err.(*url.Error); ok {
+ if strings.Contains(urlErr.Err.Error(), "EOF") {
+ return nil, &url.Error{
+ Op: urlErr.Op,
+ URL: urlErr.URL,
+ Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
+ }
+ }
+ }
+ return nil, err
+ }
+
+ // Response cannot be non-nil, report error if thats the case.
+ if resp == nil {
+ msg := "Response is empty. " + reportIssue
+ return nil, ErrInvalidArgument(msg)
+ }
+
+ // If trace is enabled, dump http request and response.
+ if c.isTraceEnabled {
+ err = c.dumpHTTP(req, resp)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return resp, nil
+}
+
+// List of success status.
+var successStatus = []int{
+ http.StatusOK,
+ http.StatusNoContent,
+ http.StatusPartialContent,
+}
+
+// executeMethod - instantiates a given method, and retries the
+// request upon any error up to maxRetries attempts in a binomially
+// delayed manner using a standard back off algorithm.
+func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
+ var isRetryable bool // Indicates if request can be retried.
+ var bodySeeker io.Seeker // Extracted seeker from io.Reader.
+ var reqRetry = MaxRetry // Indicates how many times we can retry the request
+
+ if metadata.contentBody != nil {
+ // Check if body is seekable then it is retryable.
+ bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
+ switch bodySeeker {
+ case os.Stdin, os.Stdout, os.Stderr:
+ isRetryable = false
+ }
+ // Retry only when reader is seekable
+ if !isRetryable {
+ reqRetry = 1
+ }
+
+ // Figure out if the body can be closed - if yes
+ // we will definitely close it upon the function
+ // return.
+ bodyCloser, ok := metadata.contentBody.(io.Closer)
+ if ok {
+ defer bodyCloser.Close()
+ }
+ }
+
+ // Create a done channel to control 'newRetryTimer' go routine.
+ doneCh := make(chan struct{}, 1)
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(doneCh)
+
+ // Blank indentifier is kept here on purpose since 'range' without
+ // blank identifiers is only supported since go1.4
+ // https://golang.org/doc/go1.4#forrange.
+ for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
+ // Retry executes the following function body if request has an
+ // error until maxRetries have been exhausted, retry attempts are
+ // performed after waiting for a given period of time in a
+ // binomial fashion.
+ if isRetryable {
+ // Seek back to beginning for each attempt.
+ if _, err = bodySeeker.Seek(0, 0); err != nil {
+ // If seek failed, no need to retry.
+ return nil, err
+ }
+ }
+
+ // Instantiate a new request.
+ var req *http.Request
+ req, err = c.newRequest(method, metadata)
+ if err != nil {
+ errResponse := ToErrorResponse(err)
+ if isS3CodeRetryable(errResponse.Code) {
+ continue // Retry.
+ }
+ return nil, err
+ }
+
+ // Add context to request
+ req = req.WithContext(ctx)
+
+ // Initiate the request.
+ res, err = c.do(req)
+ if err != nil {
+ // For supported http requests errors verify.
+ if isHTTPReqErrorRetryable(err) {
+ continue // Retry.
+ }
+ // For other errors, return here no need to retry.
+ return nil, err
+ }
+
+ // For any known successful http status, return quickly.
+ for _, httpStatus := range successStatus {
+ if httpStatus == res.StatusCode {
+ return res, nil
+ }
+ }
+
+ // Read the body to be saved later.
+ errBodyBytes, err := ioutil.ReadAll(res.Body)
+ // res.Body should be closed
+ closeResponse(res)
+ if err != nil {
+ return nil, err
+ }
+
+ // Save the body.
+ errBodySeeker := bytes.NewReader(errBodyBytes)
+ res.Body = ioutil.NopCloser(errBodySeeker)
+
+ // For errors verify if its retryable otherwise fail quickly.
+ errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
+
+ // Save the body back again.
+ errBodySeeker.Seek(0, 0) // Seek back to starting point.
+ res.Body = ioutil.NopCloser(errBodySeeker)
+
+ // Bucket region if set in error response and the error
+ // code dictates invalid region, we can retry the request
+ // with the new region.
+ //
+ // Additionally we should only retry if bucketLocation and custom
+ // region is empty.
+ if metadata.bucketLocation == "" && c.region == "" {
+ if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" {
+ if metadata.bucketName != "" && errResponse.Region != "" {
+ // Gather Cached location only if bucketName is present.
+ if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false {
+ c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
+ continue // Retry.
+ }
+ }
+ }
+ }
+
+ // Verify if error response code is retryable.
+ if isS3CodeRetryable(errResponse.Code) {
+ continue // Retry.
+ }
+
+ // Verify if http status code is retryable.
+ if isHTTPStatusRetryable(res.StatusCode) {
+ continue // Retry.
+ }
+
+ // For all other cases break out of the retry loop.
+ break
+ }
+ return res, err
+}
+
+// newRequest - instantiate a new HTTP request for a given method.
+func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) {
+ // If no method is supplied default to 'POST'.
+ if method == "" {
+ method = "POST"
+ }
+
+ location := metadata.bucketLocation
+ if location == "" {
+ if metadata.bucketName != "" {
+ // Gather location only if bucketName is present.
+ location, err = c.getBucketLocation(metadata.bucketName)
+ if err != nil {
+ if ToErrorResponse(err).Code != "AccessDenied" {
+ return nil, err
+ }
+ }
+ // Upon AccessDenied error on fetching bucket location, default
+ // to possible locations based on endpoint URL. This can usually
+ // happen when GetBucketLocation() is disabled using IAM policies.
+ }
+ if location == "" {
+ location = getDefaultLocation(*c.endpointURL, c.region)
+ }
+ }
+
+ // Look if target url supports virtual host.
+ isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName)
+
+ // Construct a new target URL.
+ targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, isVirtualHost, metadata.queryValues)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize a new HTTP request for the method.
+ req, err = http.NewRequest(method, targetURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get credentials from the configured credentials provider.
+ value, err := c.credsProvider.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ signerType = value.SignerType
+ accessKeyID = value.AccessKeyID
+ secretAccessKey = value.SecretAccessKey
+ sessionToken = value.SessionToken
+ )
+
+ // Custom signer set then override the behavior.
+ if c.overrideSignerType != credentials.SignatureDefault {
+ signerType = c.overrideSignerType
+ }
+
+ // If signerType returned by credentials helper is anonymous,
+ // then do not sign regardless of signerType override.
+ if value.SignerType == credentials.SignatureAnonymous {
+ signerType = credentials.SignatureAnonymous
+ }
+
+ // Generate presign url if needed, return right here.
+ if metadata.expires != 0 && metadata.presignURL {
+ if signerType.IsAnonymous() {
+ return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
+ }
+ if signerType.IsV2() {
+ // Presign URL with signature v2.
+ req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost)
+ } else if signerType.IsV4() {
+ // Presign URL with signature v4.
+ req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
+ }
+ return req, nil
+ }
+
+ // Set 'User-Agent' header for the request.
+ c.setUserAgent(req)
+
+ // Set all headers.
+ for k, v := range metadata.customHeader {
+ req.Header.Set(k, v[0])
+ }
+
+ // Go net/http notoriously closes the request body.
+ // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
+ // This can cause underlying *os.File seekers to fail, avoid that
+ // by making sure to wrap the closer as a nop.
+ if metadata.contentLength == 0 {
+ req.Body = nil
+ } else {
+ req.Body = ioutil.NopCloser(metadata.contentBody)
+ }
+
+ // Set incoming content-length.
+ req.ContentLength = metadata.contentLength
+ if req.ContentLength <= -1 {
+ // For unknown content length, we upload using transfer-encoding: chunked.
+ req.TransferEncoding = []string{"chunked"}
+ }
+
+ // set md5Sum for content protection.
+ if len(metadata.contentMD5Base64) > 0 {
+ req.Header.Set("Content-Md5", metadata.contentMD5Base64)
+ }
+
+ // For anonymous requests just return.
+ if signerType.IsAnonymous() {
+ return req, nil
+ }
+
+ switch {
+ case signerType.IsV2():
+ // Add signature version '2' authorization header.
+ req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
+ case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure:
+ // Streaming signature is used by default for a PUT object request. Additionally we also
+ // look if the initialized client is secure, if yes then we don't need to perform
+ // streaming signature.
+ req = s3signer.StreamingSignV4(req, accessKeyID,
+ secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
+ default:
+ // Set sha256 sum for signature calculation only with signature version '4'.
+ shaHeader := unsignedPayload
+ if metadata.contentSHA256Hex != "" {
+ shaHeader = metadata.contentSHA256Hex
+ }
+ req.Header.Set("X-Amz-Content-Sha256", shaHeader)
+
+ // Add signature version '4' authorization header.
+ req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location)
+ }
+
+ // Return request.
+ return req, nil
+}
+
+// set User agent.
+func (c Client) setUserAgent(req *http.Request) {
+ req.Header.Set("User-Agent", libraryUserAgent)
+ if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
+ req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
+ }
+}
+
+// makeTargetURL make a new target url.
+func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) {
+ host := c.endpointURL.Host
+ // For Amazon S3 endpoint, try to fetch location based endpoint.
+ if s3utils.IsAmazonEndpoint(*c.endpointURL) {
+ if c.s3AccelerateEndpoint != "" && bucketName != "" {
+ // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+ // Disable transfer acceleration for non-compliant bucket names.
+ if strings.Contains(bucketName, ".") {
+ return nil, ErrTransferAccelerationBucket(bucketName)
+ }
+ // If transfer acceleration is requested set new host.
+ // For more details about enabling transfer acceleration read here.
+ // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+ host = c.s3AccelerateEndpoint
+ } else {
+ // Do not change the host if the endpoint URL is a FIPS S3 endpoint.
+ if !s3utils.IsAmazonFIPSGovCloudEndpoint(*c.endpointURL) {
+ // Fetch new host based on the bucket location.
+ host = getS3Endpoint(bucketLocation)
+ }
+ }
+ }
+
+ // Save scheme.
+ scheme := c.endpointURL.Scheme
+
+ // Strip port 80 and 443 so we won't send these ports in Host header.
+ // The reason is that browsers and curl automatically remove :80 and :443
+ // with the generated presigned urls, then a signature mismatch error.
+ if h, p, err := net.SplitHostPort(host); err == nil {
+ if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
+ host = h
+ }
+ }
+
+ urlStr := scheme + "://" + host + "/"
+ // Make URL only if bucketName is available, otherwise use the
+ // endpoint URL.
+ if bucketName != "" {
+ // If endpoint supports virtual host style use that always.
+ // Currently only S3 and Google Cloud Storage would support
+ // virtual host style.
+ if isVirtualHostStyle {
+ urlStr = scheme + "://" + bucketName + "." + host + "/"
+ if objectName != "" {
+ urlStr = urlStr + s3utils.EncodePath(objectName)
+ }
+ } else {
+ // If not fall back to using path style.
+ urlStr = urlStr + bucketName + "/"
+ if objectName != "" {
+ urlStr = urlStr + s3utils.EncodePath(objectName)
+ }
+ }
+ }
+
+ // If there are any query values, add them to the end.
+ if len(queryValues) > 0 {
+ urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
+ }
+
+ return url.Parse(urlStr)
+}
+
+// returns true if virtual hosted style requests are to be used.
+func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool {
+ if bucketName == "" {
+ return false
+ }
+
+ if c.lookup == BucketLookupDNS {
+ return true
+ }
+ if c.lookup == BucketLookupPath {
+ return false
+ }
+
+ // default to virtual only for Amazon/Google storage. In all other cases use
+ // path style requests
+ return s3utils.IsVirtualHostSupported(url, bucketName)
+}
diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml
new file mode 100755
index 0000000..aa9f840
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/appveyor.yml
@@ -0,0 +1,39 @@
+# version format
+version: "{build}"
+
+# Operating system (build VM template)
+os: Windows Server 2012 R2
+
+clone_folder: c:\gopath\src\github.com\minio\minio-go
+
+# environment variables
+environment:
+ GOPATH: c:\gopath
+ GO15VENDOREXPERIMENT: 1
+
+# scripts that run after cloning repository
+install:
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - go version
+ - go env
+ - go get -u github.com/golang/lint/golint
+ - go get -u github.com/remyoudompheng/go-misc/deadcode
+ - go get -u github.com/gordonklaus/ineffassign
+ - go get -u golang.org/x/crypto/argon2
+ - go get -t ./...
+
+# to run your custom scripts instead of automatic MSBuild
+build_script:
+ - go vet ./...
+ - gofmt -s -l .
+ - golint -set_exit_status github.com/minio/minio-go...
+ - deadcode
+ - ineffassign .
+ - go test -short -v
+ - go test -short -race -v
+
+# to disable automatic tests
+test: off
+
+# to disable deployment
+deploy: off
diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go
new file mode 100755
index 0000000..cac7ad7
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/bucket-cache.go
@@ -0,0 +1,221 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net/http"
+ "net/url"
+ "path"
+ "sync"
+
+ "github.com/minio/minio-go/pkg/credentials"
+ "github.com/minio/minio-go/pkg/s3signer"
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// bucketLocationCache - Provides simple mechanism to hold bucket
+// locations in memory.
+type bucketLocationCache struct {
+ // mutex is used for handling the concurrent
+ // read/write requests for cache.
+ sync.RWMutex
+
+ // items holds the cached bucket locations.
+ items map[string]string
+}
+
+// newBucketLocationCache - Provides a new bucket location cache to be
+// used internally with the client object.
+func newBucketLocationCache() *bucketLocationCache {
+ return &bucketLocationCache{
+ items: make(map[string]string),
+ }
+}
+
+// Get - Returns a value of a given key if it exists.
+func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
+ r.RLock()
+ defer r.RUnlock()
+ location, ok = r.items[bucketName]
+ return
+}
+
+// Set - Will persist a value into cache.
+func (r *bucketLocationCache) Set(bucketName string, location string) {
+ r.Lock()
+ defer r.Unlock()
+ r.items[bucketName] = location
+}
+
+// Delete - Deletes a bucket name from cache.
+func (r *bucketLocationCache) Delete(bucketName string) {
+ r.Lock()
+ defer r.Unlock()
+ delete(r.items, bucketName)
+}
+
+// GetBucketLocation - get location for the bucket name from location cache, if not
+// fetch freshly by making a new request.
+func (c Client) GetBucketLocation(bucketName string) (string, error) {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ return c.getBucketLocation(bucketName)
+}
+
+// getBucketLocation - Get location for the bucketName from location map cache, if not
+// fetch freshly by making a new request.
+func (c Client) getBucketLocation(bucketName string) (string, error) {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+
+ // Region set then no need to fetch bucket location.
+ if c.region != "" {
+ return c.region, nil
+ }
+
+ if location, ok := c.bucketLocCache.Get(bucketName); ok {
+ return location, nil
+ }
+
+ // Initialize a new request.
+ req, err := c.getBucketLocationRequest(bucketName)
+ if err != nil {
+ return "", err
+ }
+
+ // Initiate the request.
+ resp, err := c.do(req)
+ defer closeResponse(resp)
+ if err != nil {
+ return "", err
+ }
+ location, err := processBucketLocationResponse(resp, bucketName)
+ if err != nil {
+ return "", err
+ }
+ c.bucketLocCache.Set(bucketName, location)
+ return location, nil
+}
+
+// processes the getBucketLocation http response from the server.
+func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) {
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ err = httpRespToErrorResponse(resp, bucketName, "")
+ errResp := ToErrorResponse(err)
+ // For access denied error, it could be an anonymous
+ // request. Move forward and let the top level callers
+ // succeed if possible based on their policy.
+ if errResp.Code == "AccessDenied" {
+ return "us-east-1", nil
+ }
+ return "", err
+ }
+ }
+
+ // Extract location.
+ var locationConstraint string
+ err = xmlDecoder(resp.Body, &locationConstraint)
+ if err != nil {
+ return "", err
+ }
+
+ location := locationConstraint
+ // Location is empty will be 'us-east-1'.
+ if location == "" {
+ location = "us-east-1"
+ }
+
+ // Location can be 'EU' convert it to meaningful 'eu-west-1'.
+ if location == "EU" {
+ location = "eu-west-1"
+ }
+
+ // Save the location into cache.
+
+ // Return.
+ return location, nil
+}
+
+// getBucketLocationRequest - Wrapper creates a new getBucketLocation request.
+func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) {
+ // Set location query.
+ urlValues := make(url.Values)
+ urlValues.Set("location", "")
+
+ // Set get bucket location always as path style.
+ targetURL := c.endpointURL
+ targetURL.Path = path.Join(bucketName, "") + "/"
+ targetURL.RawQuery = urlValues.Encode()
+
+ // Get a new HTTP request for the method.
+ req, err := http.NewRequest("GET", targetURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set UserAgent for the request.
+ c.setUserAgent(req)
+
+ // Get credentials from the configured credentials provider.
+ value, err := c.credsProvider.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ signerType = value.SignerType
+ accessKeyID = value.AccessKeyID
+ secretAccessKey = value.SecretAccessKey
+ sessionToken = value.SessionToken
+ )
+
+ // Custom signer set then override the behavior.
+ if c.overrideSignerType != credentials.SignatureDefault {
+ signerType = c.overrideSignerType
+ }
+
+ // If signerType returned by credentials helper is anonymous,
+ // then do not sign regardless of signerType override.
+ if value.SignerType == credentials.SignatureAnonymous {
+ signerType = credentials.SignatureAnonymous
+ }
+
+ if signerType.IsAnonymous() {
+ return req, nil
+ }
+
+ if signerType.IsV2() {
+ // Get Bucket Location calls should be always path style
+ isVirtualHost := false
+ req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
+ return req, nil
+ }
+
+ // Set sha256 sum for signature calculation only with signature version '4'.
+ contentSha256 := emptySHA256Hex
+ if c.secure {
+ contentSha256 = unsignedPayload
+ }
+
+ req.Header.Set("X-Amz-Content-Sha256", contentSha256)
+ req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
+ return req, nil
+}
diff --git a/vendor/github.com/minio/minio-go/bucket-notification.go b/vendor/github.com/minio/minio-go/bucket-notification.go
new file mode 100755
index 0000000..ea303dd
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/bucket-notification.go
@@ -0,0 +1,273 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/xml"
+
+ "github.com/minio/minio-go/pkg/set"
+)
+
+// NotificationEventType is a S3 notification event associated to the bucket notification configuration
+type NotificationEventType string
+
+// The role of all event types are described in :
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
+const (
+ ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*"
+ ObjectCreatedPut = "s3:ObjectCreated:Put"
+ ObjectCreatedPost = "s3:ObjectCreated:Post"
+ ObjectCreatedCopy = "s3:ObjectCreated:Copy"
+ ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
+ ObjectAccessedGet = "s3:ObjectAccessed:Get"
+ ObjectAccessedHead = "s3:ObjectAccessed:Head"
+ ObjectAccessedAll = "s3:ObjectAccessed:*"
+ ObjectRemovedAll = "s3:ObjectRemoved:*"
+ ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
+ ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
+ ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
+)
+
+// FilterRule - child of S3Key, a tag in the notification xml which
+// carries suffix/prefix filters
+type FilterRule struct {
+ Name string `xml:"Name"`
+ Value string `xml:"Value"`
+}
+
+// S3Key - child of Filter, a tag in the notification xml which
+// carries suffix/prefix filters
+type S3Key struct {
+ FilterRules []FilterRule `xml:"FilterRule,omitempty"`
+}
+
+// Filter - a tag in the notification xml structure which carries
+// suffix/prefix filters
+type Filter struct {
+ S3Key S3Key `xml:"S3Key,omitempty"`
+}
+
+// Arn - holds ARN information that will be sent to the web service,
+// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+type Arn struct {
+ Partition string
+ Service string
+ Region string
+ AccountID string
+ Resource string
+}
+
+// NewArn creates new ARN based on the given partition, service, region, account id and resource
+func NewArn(partition, service, region, accountID, resource string) Arn {
+ return Arn{Partition: partition,
+ Service: service,
+ Region: region,
+ AccountID: accountID,
+ Resource: resource}
+}
+
+// Return the string format of the ARN
+func (arn Arn) String() string {
+ return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
+}
+
+// NotificationConfig - represents one single notification configuration
+// such as topic, queue or lambda configuration.
+type NotificationConfig struct {
+ ID string `xml:"Id,omitempty"`
+ Arn Arn `xml:"-"`
+ Events []NotificationEventType `xml:"Event"`
+ Filter *Filter `xml:"Filter,omitempty"`
+}
+
+// NewNotificationConfig creates one notification config and sets the given ARN
+func NewNotificationConfig(arn Arn) NotificationConfig {
+ return NotificationConfig{Arn: arn, Filter: &Filter{}}
+}
+
+// AddEvents adds one event to the current notification config
+func (t *NotificationConfig) AddEvents(events ...NotificationEventType) {
+ t.Events = append(t.Events, events...)
+}
+
+// AddFilterSuffix sets the suffix configuration to the current notification config
+func (t *NotificationConfig) AddFilterSuffix(suffix string) {
+ if t.Filter == nil {
+ t.Filter = &Filter{}
+ }
+ newFilterRule := FilterRule{Name: "suffix", Value: suffix}
+ // Replace any suffix rule if existing and add to the list otherwise
+ for index := range t.Filter.S3Key.FilterRules {
+ if t.Filter.S3Key.FilterRules[index].Name == "suffix" {
+ t.Filter.S3Key.FilterRules[index] = newFilterRule
+ return
+ }
+ }
+ t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
+}
+
+// AddFilterPrefix sets the prefix configuration to the current notification config
+func (t *NotificationConfig) AddFilterPrefix(prefix string) {
+ if t.Filter == nil {
+ t.Filter = &Filter{}
+ }
+ newFilterRule := FilterRule{Name: "prefix", Value: prefix}
+ // Replace any prefix rule if existing and add to the list otherwise
+ for index := range t.Filter.S3Key.FilterRules {
+ if t.Filter.S3Key.FilterRules[index].Name == "prefix" {
+ t.Filter.S3Key.FilterRules[index] = newFilterRule
+ return
+ }
+ }
+ t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
+}
+
+// TopicConfig carries one single topic notification configuration
+type TopicConfig struct {
+ NotificationConfig
+ Topic string `xml:"Topic"`
+}
+
+// QueueConfig carries one single queue notification configuration
+type QueueConfig struct {
+ NotificationConfig
+ Queue string `xml:"Queue"`
+}
+
+// LambdaConfig carries one single cloudfunction notification configuration
+type LambdaConfig struct {
+ NotificationConfig
+ Lambda string `xml:"CloudFunction"`
+}
+
+// BucketNotification - the struct that represents the whole XML to be sent to the web service
+type BucketNotification struct {
+ XMLName xml.Name `xml:"NotificationConfiguration"`
+ LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"`
+ TopicConfigs []TopicConfig `xml:"TopicConfiguration"`
+ QueueConfigs []QueueConfig `xml:"QueueConfiguration"`
+}
+
+// AddTopic adds a given topic config to the general bucket notification config
+func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) bool {
+ newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()}
+ for _, n := range b.TopicConfigs {
+ // If new config matches existing one
+ if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter {
+
+ existingConfig := set.NewStringSet()
+ for _, v := range n.Events {
+ existingConfig.Add(string(v))
+ }
+
+ newConfig := set.NewStringSet()
+ for _, v := range topicConfig.Events {
+ newConfig.Add(string(v))
+ }
+
+ if !newConfig.Intersection(existingConfig).IsEmpty() {
+ return false
+ }
+ }
+ }
+ b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
+ return true
+}
+
+// AddQueue adds a given queue config to the general bucket notification config
+func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) bool {
+ newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()}
+ for _, n := range b.QueueConfigs {
+ if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter {
+
+ existingConfig := set.NewStringSet()
+ for _, v := range n.Events {
+ existingConfig.Add(string(v))
+ }
+
+ newConfig := set.NewStringSet()
+ for _, v := range queueConfig.Events {
+ newConfig.Add(string(v))
+ }
+
+ if !newConfig.Intersection(existingConfig).IsEmpty() {
+ return false
+ }
+ }
+ }
+ b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
+ return true
+}
+
+// AddLambda adds a given lambda config to the general bucket notification config
+func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) bool {
+ newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
+ for _, n := range b.LambdaConfigs {
+ if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter {
+
+ existingConfig := set.NewStringSet()
+ for _, v := range n.Events {
+ existingConfig.Add(string(v))
+ }
+
+ newConfig := set.NewStringSet()
+ for _, v := range lambdaConfig.Events {
+ newConfig.Add(string(v))
+ }
+
+ if !newConfig.Intersection(existingConfig).IsEmpty() {
+ return false
+ }
+ }
+ }
+ b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
+ return true
+}
+
+// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
+func (b *BucketNotification) RemoveTopicByArn(arn Arn) {
+ var topics []TopicConfig
+ for _, topic := range b.TopicConfigs {
+ if topic.Topic != arn.String() {
+ topics = append(topics, topic)
+ }
+ }
+ b.TopicConfigs = topics
+}
+
+// RemoveQueueByArn removes all queue configurations that match the exact specified ARN
+func (b *BucketNotification) RemoveQueueByArn(arn Arn) {
+ var queues []QueueConfig
+ for _, queue := range b.QueueConfigs {
+ if queue.Queue != arn.String() {
+ queues = append(queues, queue)
+ }
+ }
+ b.QueueConfigs = queues
+}
+
+// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN
+func (b *BucketNotification) RemoveLambdaByArn(arn Arn) {
+ var lambdas []LambdaConfig
+ for _, lambda := range b.LambdaConfigs {
+ if lambda.Lambda != arn.String() {
+ lambdas = append(lambdas, lambda)
+ }
+ }
+ b.LambdaConfigs = lambdas
+}
diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go
new file mode 100755
index 0000000..7377423
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/constants.go
@@ -0,0 +1,62 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+/// Multipart upload defaults.
+
+// absMinPartSize - absolute minimum part size (5 MiB) below which
+// a part in a multipart upload may not be uploaded.
+const absMinPartSize = 1024 * 1024 * 5
+
+// minPartSize - minimum part size 64MiB per object after which
+// putObject behaves internally as multipart.
+const minPartSize = 1024 * 1024 * 64
+
+// maxPartsCount - maximum number of parts for a single multipart session.
+const maxPartsCount = 10000
+
+// maxPartSize - maximum part size 5GiB for a single multipart upload
+// operation.
+const maxPartSize = 1024 * 1024 * 1024 * 5
+
+// maxSinglePutObjectSize - maximum size 5GiB of object per PUT
+// operation.
+const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
+
+// maxMultipartPutObjectSize - maximum size 5TiB of object for
+// Multipart operation.
+const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
+
+// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
+// we don't want to sign the request payload
+const unsignedPayload = "UNSIGNED-PAYLOAD"
+
+// Total number of parallel workers used for multipart operation.
+const totalWorkers = 4
+
+// Signature related constants.
+const (
+ signV4Algorithm = "AWS4-HMAC-SHA256"
+ iso8601DateFormat = "20060102T150405Z"
+)
+
+// Storage class header constant.
+const amzStorageClass = "X-Amz-Storage-Class"
+
+// Website redirect location header constant
+const amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location"
diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go
new file mode 100755
index 0000000..a5017d8
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/core.go
@@ -0,0 +1,150 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io"
+ "strings"
+)
+
+// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
+type Core struct {
+ *Client
+}
+
+// NewCore - Returns new initialized a Core client, this CoreClient should be
+// only used under special conditions such as need to access lower primitives
+// and being able to use them to write your own wrappers.
+func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) {
+ var s3Client Core
+ client, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure)
+ if err != nil {
+ return nil, err
+ }
+ s3Client.Client = client
+ return &s3Client, nil
+}
+
+// ListObjects - List all the objects at a prefix, optionally with marker and delimiter
+// you can further filter the results.
+func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {
+ return c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys)
+}
+
+// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses
+// continuationToken instead of marker to support iteration over the results.
+func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) {
+ return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys, startAfter)
+}
+
+// CopyObject - copies an object from source object to destination object on server side.
+func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) {
+ return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata)
+}
+
+// CopyObjectPart - creates a part in a multipart upload by copying (a
+// part of) an existing object.
+func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string,
+ partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) {
+
+ return c.copyObjectPartDo(context.Background(), srcBucket, srcObject, destBucket, destObject, uploadID,
+ partID, startOffset, length, metadata)
+}
+
+// PutObject - Upload object. Uploads using single PUT call.
+func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectInfo, error) {
+ opts := PutObjectOptions{}
+ m := make(map[string]string)
+ for k, v := range metadata {
+ if strings.ToLower(k) == "content-encoding" {
+ opts.ContentEncoding = v
+ } else if strings.ToLower(k) == "content-disposition" {
+ opts.ContentDisposition = v
+ } else if strings.ToLower(k) == "content-language" {
+ opts.ContentLanguage = v
+ } else if strings.ToLower(k) == "content-type" {
+ opts.ContentType = v
+ } else if strings.ToLower(k) == "cache-control" {
+ opts.CacheControl = v
+ } else if strings.ToLower(k) == strings.ToLower(amzWebsiteRedirectLocation) {
+ opts.WebsiteRedirectLocation = v
+ } else {
+ m[k] = metadata[k]
+ }
+ }
+ opts.UserMetadata = m
+ return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts)
+}
+
+// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID.
+func (c Core) NewMultipartUpload(bucket, object string, opts PutObjectOptions) (uploadID string, err error) {
+ result, err := c.initiateMultipartUpload(context.Background(), bucket, object, opts)
+ return result.UploadID, err
+}
+
+// ListMultipartUploads - List incomplete uploads.
+func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) {
+ return c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)
+}
+
+// PutObjectPart - Upload an object part.
+func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string) (ObjectPart, error) {
+ return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, nil)
+}
+
+// ListObjectParts - List uploaded parts of an incomplete upload.x
+func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) {
+ return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts)
+}
+
+// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
+func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error {
+ _, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{
+ Parts: parts,
+ })
+ return err
+}
+
+// AbortMultipartUpload - Abort an incomplete upload.
+func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error {
+ return c.abortMultipartUpload(context.Background(), bucket, object, uploadID)
+}
+
+// GetBucketPolicy - fetches bucket access policy for a given bucket.
+func (c Core) GetBucketPolicy(bucket string) (string, error) {
+ return c.getBucketPolicy(bucket)
+}
+
+// PutBucketPolicy - applies a new bucket access policy for a given bucket.
+func (c Core) PutBucketPolicy(bucket, bucketPolicy string) error {
+ return c.putBucketPolicy(bucket, bucketPolicy)
+}
+
+// GetObject is a lower level API implemented to support reading
+// partial objects and also downloading objects with special conditions
+// matching etag, modtime etc.
+func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) {
+ return c.getObject(context.Background(), bucketName, objectName, opts)
+}
+
+// StatObject is a lower level API implemented to support special
+// conditions matching etag, modtime on a request.
+func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
+ return c.statObject(context.Background(), bucketName, objectName, opts)
+}
diff --git a/vendor/github.com/minio/minio-go/hook-reader.go b/vendor/github.com/minio/minio-go/hook-reader.go
new file mode 100755
index 0000000..8f32291
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/hook-reader.go
@@ -0,0 +1,71 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import "io"
+
+// hookReader hooks additional reader in the source stream. It is
+// useful for making progress bars. Second reader is appropriately
+// notified about the exact number of bytes read from the primary
+// source on each Read operation.
+type hookReader struct {
+ source io.Reader
+ hook io.Reader
+}
+
+// Seek implements io.Seeker. Seeks source first, and if necessary
+// seeks hook if Seek method is appropriately found.
+func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
+ // Verify for source has embedded Seeker, use it.
+ sourceSeeker, ok := hr.source.(io.Seeker)
+ if ok {
+ return sourceSeeker.Seek(offset, whence)
+ }
+ // Verify if hook has embedded Seeker, use it.
+ hookSeeker, ok := hr.hook.(io.Seeker)
+ if ok {
+ return hookSeeker.Seek(offset, whence)
+ }
+ return n, nil
+}
+
+// Read implements io.Reader. Always reads from the source, the return
+// value 'n' number of bytes are reported through the hook. Returns
+// error for all non io.EOF conditions.
+func (hr *hookReader) Read(b []byte) (n int, err error) {
+ n, err = hr.source.Read(b)
+ if err != nil && err != io.EOF {
+ return n, err
+ }
+ // Progress the hook with the total read bytes from the source.
+ if _, herr := hr.hook.Read(b[:n]); herr != nil {
+ if herr != io.EOF {
+ return n, herr
+ }
+ }
+ return n, err
+}
+
+// newHook returns a io.ReadSeeker which implements hookReader that
+// reports the data read from the source to the hook.
+func newHook(source, hook io.Reader) io.Reader {
+ if hook == nil {
+ return source
+ }
+ return &hookReader{source, hook}
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go
new file mode 100755
index 0000000..e29826f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go
@@ -0,0 +1,89 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+// A Chain will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The Chain provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the
+// Providers in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the no credentials value.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again after IsExpired() is true.
+//
+// creds := credentials.NewChainCredentials(
+// []credentials.Provider{
+// &credentials.EnvAWSS3{},
+// &credentials.EnvMinio{},
+// })
+//
+// // Usage of ChainCredentials.
+// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
+// if err != nil {
+// log.Fatalln(err)
+// }
+//
+type Chain struct {
+ Providers []Provider
+ curr Provider
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return New(&Chain{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value, returns no credentials(anonymous)
+// if no credentials provider returned any value.
+//
+// If a provider is found with credentials, it will be cached and any calls
+// to IsExpired() will return the expired state of the cached provider.
+func (c *Chain) Retrieve() (Value, error) {
+ for _, p := range c.Providers {
+ creds, _ := p.Retrieve()
+ // Always prioritize non-anonymous providers, if any.
+ if creds.AccessKeyID == "" && creds.SecretAccessKey == "" {
+ continue
+ }
+ c.curr = p
+ return creds, nil
+ }
+ // At this point we have exhausted all the providers and
+ // are left without any credentials return anonymous.
+ return Value{
+ SignerType: SignatureAnonymous,
+ }, nil
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *Chain) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample
new file mode 100755
index 0000000..130746f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample
@@ -0,0 +1,17 @@
+{
+ "version": "8",
+ "hosts": {
+ "play": {
+ "url": "https://play.minio.io:9000",
+ "accessKey": "Q3AM3UQ867SPQQA43P2F",
+ "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
+ "api": "S3v2"
+ },
+ "s3": {
+ "url": "https://s3.amazonaws.com",
+ "accessKey": "accessKey",
+ "secretKey": "secret",
+ "api": "S3v4"
+ }
+ }
+}
\ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go
new file mode 100755
index 0000000..4bfdad4
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go
@@ -0,0 +1,175 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "sync"
+ "time"
+)
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Signature Type.
+ SignerType SignatureType
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+type Provider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+// type IAMCredentialProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ e.expiration = expiration
+ if window > 0 {
+ e.expiration = e.expiration.Add(-window)
+ }
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ if e.CurrentTime == nil {
+ e.CurrentTime = time.Now
+ }
+ return e.expiration.Before(e.CurrentTime())
+}
+
+// Credentials - A container for synchronous safe retrieval of credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ sync.Mutex
+
+ creds Value
+ forceRefresh bool
+ provider Provider
+}
+
+// New returns a pointer to a new Credentials with the provider set.
+func New(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.isExpired() {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.Lock()
+ defer c.Unlock()
+
+ c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be refreshed.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.Lock()
+ defer c.Unlock()
+
+ return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+ return c.forceRefresh || c.provider.IsExpired()
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample
new file mode 100755
index 0000000..7fc91d9
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample
@@ -0,0 +1,12 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+
+[with_colon]
+aws_access_key_id: accessKey
+aws_secret_access_key: secret
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go
new file mode 100755
index 0000000..c48784b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go
@@ -0,0 +1,62 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package credentials provides credential retrieval and management
+// for S3 compatible object storage.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := NewFromEnv()
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := NewFromIAM("")
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+//
+package credentials
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go
new file mode 100755
index 0000000..f9b2cc3
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go
@@ -0,0 +1,71 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "os"
+
+// A EnvAWS retrieves credentials from the environment variables of the
+// running process. EnvAWSironment credentials never expire.
+//
+// EnvAWSironment variables used:
+//
+// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY.
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY.
+// * Secret Token: AWS_SESSION_TOKEN.
+type EnvAWS struct {
+ retrieved bool
+}
+
+// NewEnvAWS returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvAWS() *Credentials {
+ return New(&EnvAWS{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvAWS) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ signerType := SignatureV4
+ if id == "" || secret == "" {
+ signerType = SignatureAnonymous
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ SignerType: signerType,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvAWS) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go
new file mode 100755
index 0000000..d72e771
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go
@@ -0,0 +1,62 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "os"
+
+// A EnvMinio retrieves credentials from the environment variables of the
+// running process. EnvMinioironment credentials never expire.
+//
+// EnvMinioironment variables used:
+//
+// * Access Key ID: MINIO_ACCESS_KEY.
+// * Secret Access Key: MINIO_SECRET_KEY.
+type EnvMinio struct {
+ retrieved bool
+}
+
+// NewEnvMinio returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvMinio() *Credentials {
+ return New(&EnvMinio{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvMinio) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("MINIO_ACCESS_KEY")
+ secret := os.Getenv("MINIO_SECRET_KEY")
+
+ signerType := SignatureV4
+ if id == "" || secret == "" {
+ signerType = SignatureAnonymous
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SignerType: signerType,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvMinio) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go
new file mode 100755
index 0000000..5ad6830
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go
@@ -0,0 +1,120 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/go-ini/ini"
+ homedir "github.com/mitchellh/go-homedir"
+)
+
+// A FileAWSCredentials retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type FileAWSCredentials struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.aws/credentials"
+ // Windows: "%USERPROFILE%\.aws\credentials"
+ filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewFileAWSCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewFileAWSCredentials(filename string, profile string) *Credentials {
+ return New(&FileAWSCredentials{
+ filename: filename,
+ profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *FileAWSCredentials) Retrieve() (Value, error) {
+ if p.filename == "" {
+ p.filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
+ if p.filename == "" {
+ homeDir, err := homedir.Dir()
+ if err != nil {
+ return Value{}, err
+ }
+ p.filename = filepath.Join(homeDir, ".aws", "credentials")
+ }
+ }
+ if p.profile == "" {
+ p.profile = os.Getenv("AWS_PROFILE")
+ if p.profile == "" {
+ p.profile = "default"
+ }
+ }
+
+ p.retrieved = false
+
+ iniProfile, err := loadProfile(p.filename, p.profile)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Default to empty string if not found.
+ id := iniProfile.Key("aws_access_key_id")
+ // Default to empty string if not found.
+ secret := iniProfile.Key("aws_secret_access_key")
+ // Default to empty string if not found.
+ token := iniProfile.Key("aws_session_token")
+
+ p.retrieved = true
+ return Value{
+ AccessKeyID: id.String(),
+ SecretAccessKey: secret.String(),
+ SessionToken: token.String(),
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *FileAWSCredentials) IsExpired() bool {
+ return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (*ini.Section, error) {
+ config, err := ini.Load(filename)
+ if err != nil {
+ return nil, err
+ }
+ iniProfile, err := config.GetSection(profile)
+ if err != nil {
+ return nil, err
+ }
+ return iniProfile, nil
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go
new file mode 100755
index 0000000..6a6827e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go
@@ -0,0 +1,133 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ homedir "github.com/mitchellh/go-homedir"
+)
+
+// A FileMinioClient retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Configuration file example: $HOME/.mc/config.json
+type FileMinioClient struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.mc/config.json"
+ // Windows: "%USERALIAS%\mc\config.json"
+ filename string
+
+ // Minio Alias to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "MINIO_ALIAS" or "default" if
+ // environment variable is also not set.
+ alias string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewFileMinioClient returns a pointer to a new Credentials object
+// wrapping the Alias file provider.
+func NewFileMinioClient(filename string, alias string) *Credentials {
+ return New(&FileMinioClient{
+ filename: filename,
+ alias: alias,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *FileMinioClient) Retrieve() (Value, error) {
+ if p.filename == "" {
+ if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok {
+ p.filename = value
+ } else {
+ homeDir, err := homedir.Dir()
+ if err != nil {
+ return Value{}, err
+ }
+ p.filename = filepath.Join(homeDir, ".mc", "config.json")
+ if runtime.GOOS == "windows" {
+ p.filename = filepath.Join(homeDir, "mc", "config.json")
+ }
+ }
+ }
+
+ if p.alias == "" {
+ p.alias = os.Getenv("MINIO_ALIAS")
+ if p.alias == "" {
+ p.alias = "s3"
+ }
+ }
+
+ p.retrieved = false
+
+ hostCfg, err := loadAlias(p.filename, p.alias)
+ if err != nil {
+ return Value{}, err
+ }
+
+ p.retrieved = true
+ return Value{
+ AccessKeyID: hostCfg.AccessKey,
+ SecretAccessKey: hostCfg.SecretKey,
+ SignerType: parseSignatureType(hostCfg.API),
+ }, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *FileMinioClient) IsExpired() bool {
+ return !p.retrieved
+}
+
+// hostConfig configuration of a host.
+type hostConfig struct {
+ URL string `json:"url"`
+ AccessKey string `json:"accessKey"`
+ SecretKey string `json:"secretKey"`
+ API string `json:"api"`
+}
+
+// config config version.
+type config struct {
+ Version string `json:"version"`
+ Hosts map[string]hostConfig `json:"hosts"`
+}
+
+// loadAliass loads from the file pointed to by shared credentials filename for alias.
+// The credentials retrieved from the alias will be returned or error. Error will be
+// returned if it fails to read from the file.
+func loadAlias(filename, alias string) (hostConfig, error) {
+ cfg := &config{}
+ configBytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return hostConfig{}, err
+ }
+ if err = json.Unmarshal(configBytes, cfg); err != nil {
+ return hostConfig{}, err
+ }
+ return cfg.Hosts[alias], nil
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go
new file mode 100755
index 0000000..637df74
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go
@@ -0,0 +1,214 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bufio"
+ "encoding/json"
+ "errors"
+ "net/http"
+ "net/url"
+ "path"
+ "time"
+)
+
+// DefaultExpiryWindow - Default expiry window.
+// ExpiryWindow will allow the credentials to trigger refreshing
+// prior to the credentials actually expiring. This is beneficial
+// so race conditions with expiring credentials do not cause
+// request to fail unexpectedly due to ExpiredTokenException exceptions.
+const DefaultExpiryWindow = time.Second * 10 // 10 secs
+
+// A IAM retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+type IAM struct {
+ Expiry
+
+ // Required http Client to use when connecting to IAM metadata service.
+ Client *http.Client
+
+ // Custom endpoint to fetch IAM role credentials.
+ endpoint string
+}
+
+// IAM Roles for Amazon EC2
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+const (
+ defaultIAMRoleEndpoint = "http://169.254.169.254"
+ defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials"
+)
+
+// NewIAM returns a pointer to a new Credentials object wrapping
+// the IAM. Takes a ConfigProvider to create a EC2Metadata client.
+// The ConfigProvider is satisfied by the session.Session type.
+func NewIAM(endpoint string) *Credentials {
+ if endpoint == "" {
+ endpoint = defaultIAMRoleEndpoint
+ }
+ p := &IAM{
+ Client: &http.Client{
+ Transport: http.DefaultTransport,
+ },
+ endpoint: endpoint,
+ }
+ return New(p)
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired
+func (m *IAM) Retrieve() (Value, error) {
+ roleCreds, err := getCredentials(m.Client, m.endpoint)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Expiry window is set to 10secs.
+ m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow)
+
+ return Value{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+
+ // Unused params.
+ LastUpdated time.Time
+ Type string
+}
+
+// Get the final IAM role URL where the request will
+// be sent to fetch the rolling access credentials.
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+func getIAMRoleURL(endpoint string) (*url.URL, error) {
+ if endpoint == "" {
+ endpoint = defaultIAMRoleEndpoint
+ }
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return nil, err
+ }
+ u.Path = defaultIAMSecurityCredsPath
+ return u, nil
+}
+
+// listRoleNames lists of credential role names associated
+// with the current EC2 service. If there are no credentials,
+// or there is an error making or receiving the request.
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+func listRoleNames(client *http.Client, u *url.URL) ([]string, error) {
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return nil, errors.New(resp.Status)
+ }
+
+ credsList := []string{}
+ s := bufio.NewScanner(resp.Body)
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return credsList, nil
+}
+
+// getCredentials - obtains the credentials from the IAM role name associated with
+// the current EC2 service.
+//
+// If the credentials cannot be found, or there is an error
+// reading the response an error will be returned.
+func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ u, err := getIAMRoleURL(endpoint)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ roleNames, err := listRoleNames(client, u)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ if len(roleNames) == 0 {
+ return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service")
+ }
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ // - An instance profile can contain only one IAM role. This limit cannot be increased.
+ roleName := roleNames[0]
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ // The following command retrieves the security credentials for an
+ // IAM role named `s3access`.
+ //
+ // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access
+ //
+ u.Path = path.Join(u.Path, roleName)
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return ec2RoleCredRespBody{}, errors.New(resp.Status)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ if respCreds.Code != "Success" {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{}, errors.New(respCreds.Message)
+ }
+
+ return respCreds, nil
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go
new file mode 100755
index 0000000..1b768e8
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go
@@ -0,0 +1,77 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "strings"
+
+// SignatureType is type of Authorization requested for a given HTTP request.
+type SignatureType int
+
+// Different types of supported signatures - default is SignatureV4 or SignatureDefault.
+const (
+ // SignatureDefault is always set to v4.
+ SignatureDefault SignatureType = iota
+ SignatureV4
+ SignatureV2
+ SignatureV4Streaming
+ SignatureAnonymous // Anonymous signature signifies, no signature.
+)
+
+// IsV2 - is signature SignatureV2?
+func (s SignatureType) IsV2() bool {
+ return s == SignatureV2
+}
+
+// IsV4 - is signature SignatureV4?
+func (s SignatureType) IsV4() bool {
+ return s == SignatureV4 || s == SignatureDefault
+}
+
+// IsStreamingV4 - is signature SignatureV4Streaming?
+func (s SignatureType) IsStreamingV4() bool {
+ return s == SignatureV4Streaming
+}
+
+// IsAnonymous - is signature empty?
+func (s SignatureType) IsAnonymous() bool {
+ return s == SignatureAnonymous
+}
+
+// Stringer humanized version of signature type,
+// strings returned here are case insensitive.
+func (s SignatureType) String() string {
+ if s.IsV2() {
+ return "S3v2"
+ } else if s.IsV4() {
+ return "S3v4"
+ } else if s.IsStreamingV4() {
+ return "S3v4Streaming"
+ }
+ return "Anonymous"
+}
+
+func parseSignatureType(str string) SignatureType {
+ if strings.EqualFold(str, "S3v4") {
+ return SignatureV4
+ } else if strings.EqualFold(str, "S3v2") {
+ return SignatureV2
+ } else if strings.EqualFold(str, "S3v4Streaming") {
+ return SignatureV4Streaming
+ }
+ return SignatureAnonymous
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/pkg/credentials/static.go
new file mode 100755
index 0000000..8b0ba71
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/static.go
@@ -0,0 +1,67 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+// A Static is a set of credentials which are set programmatically,
+// and will never expire.
+type Static struct {
+ Value
+}
+
+// NewStaticV2 returns a pointer to a new Credentials object
+// wrapping a static credentials value provider, signature is
+// set to v2. If access and secret are not specified then
+// regardless of signature type set it Value will return
+// as anonymous.
+func NewStaticV2(id, secret, token string) *Credentials {
+ return NewStatic(id, secret, token, SignatureV2)
+}
+
+// NewStaticV4 is similar to NewStaticV2 with similar considerations.
+func NewStaticV4(id, secret, token string) *Credentials {
+ return NewStatic(id, secret, token, SignatureV4)
+}
+
+// NewStatic returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStatic(id, secret, token string, signerType SignatureType) *Credentials {
+ return New(&Static{
+ Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ SignerType: signerType,
+ },
+ })
+}
+
+// Retrieve returns the static credentials.
+func (s *Static) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ // Anonymous is not an error
+ return Value{SignerType: SignatureAnonymous}, nil
+ }
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For Static, the credentials never expired.
+func (s *Static) IsExpired() bool {
+ return false
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go
new file mode 100755
index 0000000..2d3c70f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go
@@ -0,0 +1,195 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2018 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encrypt
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "net/http"
+
+ "golang.org/x/crypto/argon2"
+)
+
+const (
+ // sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS.
+ sseGenericHeader = "X-Amz-Server-Side-Encryption"
+
+ // sseKmsKeyID is the AWS SSE-KMS key id.
+ sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id"
+ // sseEncryptionContext is the AWS SSE-KMS Encryption Context data.
+ sseEncryptionContext = sseGenericHeader + "-Encryption-Context"
+
+ // sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key.
+ sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm"
+ // sseCustomerKey is the AWS SSE-C encryption key HTTP header key.
+ sseCustomerKey = sseGenericHeader + "-Customer-Key"
+ // sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key.
+ sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5"
+
+ // sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API.
+ sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
+ // sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API.
+ sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
+ // sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API.
+ sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"
+)
+
+// PBKDF creates a SSE-C key from the provided password and salt.
+// PBKDF is a password-based key derivation function
+// which can be used to derive a high-entropy cryptographic
+// key from a low-entropy password and a salt.
+type PBKDF func(password, salt []byte) ServerSide
+
+// DefaultPBKDF is the default PBKDF. It uses Argon2id with the
+// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads).
+var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide {
+ sse := ssec{}
+ copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32))
+ return sse
+}
+
+// Type is the server-side-encryption method. It represents one of
+// the following encryption methods:
+// - SSE-C: server-side-encryption with customer provided keys
+// - KMS: server-side-encryption with managed keys
+// - S3: server-side-encryption using S3 storage encryption
+type Type string
+
+const (
+ // SSEC represents server-side-encryption with customer provided keys
+ SSEC Type = "SSE-C"
+ // KMS represents server-side-encryption with managed keys
+ KMS Type = "KMS"
+ // S3 represents server-side-encryption using S3 storage encryption
+ S3 Type = "S3"
+)
+
+// ServerSide is a form of S3 server-side-encryption.
+type ServerSide interface {
+ // Type returns the server-side-encryption method.
+ Type() Type
+
+ // Marshal adds encryption headers to the provided HTTP headers.
+ // It marks an HTTP request as server-side-encryption request
+ // and inserts the required data into the headers.
+ Marshal(h http.Header)
+}
+
+// NewSSE returns a server-side-encryption using S3 storage encryption.
+// Using SSE-S3 the server will encrypt the object with server-managed keys.
+func NewSSE() ServerSide { return s3{} }
+
+// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context.
+func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) {
+ if context == nil {
+ return kms{key: keyID, hasContext: false}, nil
+ }
+ serializedContext, err := json.Marshal(context)
+ if err != nil {
+ return nil, err
+ }
+ return kms{key: keyID, context: serializedContext, hasContext: true}, nil
+}
+
+// NewSSEC returns a new server-side-encryption using SSE-C and the provided key.
+// The key must be 32 bytes long.
+func NewSSEC(key []byte) (ServerSide, error) {
+ if len(key) != 32 {
+ return nil, errors.New("encrypt: SSE-C key must be 256 bit long")
+ }
+ sse := ssec{}
+ copy(sse[:], key)
+ return sse, nil
+}
+
+// SSE transforms a SSE-C copy encryption into a SSE-C encryption.
+// It is the inverse of SSECopy(...).
+//
+// If the provided sse is no SSE-C copy encryption SSE returns
+// sse unmodified.
+func SSE(sse ServerSide) ServerSide {
+ if sse == nil || sse.Type() != SSEC {
+ return sse
+ }
+ if sse, ok := sse.(ssecCopy); ok {
+ return ssec(sse)
+ }
+ return sse
+}
+
+// SSECopy transforms a SSE-C encryption into a SSE-C copy
+// encryption. This is required for SSE-C key rotation or a SSE-C
+// copy where the source and the destination should be encrypted.
+//
+// If the provided sse is no SSE-C encryption SSECopy returns
+// sse unmodified.
+func SSECopy(sse ServerSide) ServerSide {
+ if sse == nil || sse.Type() != SSEC {
+ return sse
+ }
+ if sse, ok := sse.(ssec); ok {
+ return ssecCopy(sse)
+ }
+ return sse
+}
+
+type ssec [32]byte
+
+func (s ssec) Type() Type { return SSEC }
+
+func (s ssec) Marshal(h http.Header) {
+ keyMD5 := md5.Sum(s[:])
+ h.Set(sseCustomerAlgorithm, "AES256")
+ h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
+ h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
+}
+
+type ssecCopy [32]byte
+
+func (s ssecCopy) Type() Type { return SSEC }
+
+func (s ssecCopy) Marshal(h http.Header) {
+ keyMD5 := md5.Sum(s[:])
+ h.Set(sseCopyCustomerAlgorithm, "AES256")
+ h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
+ h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
+}
+
+type s3 struct{}
+
+func (s s3) Type() Type { return S3 }
+
+func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") }
+
+type kms struct {
+ key string
+ context []byte
+ hasContext bool
+}
+
+func (s kms) Type() Type { return KMS }
+
+func (s kms) Marshal(h http.Header) {
+ h.Set(sseGenericHeader, "aws:kms")
+ h.Set(sseKmsKeyID, s.key)
+ if s.hasContext {
+ h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context))
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
new file mode 100755
index 0000000..156a6d6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
@@ -0,0 +1,306 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3signer
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Reference for constants used below -
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
+const (
+ streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
+ streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
+ emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ payloadChunkSize = 64 * 1024
+ chunkSigConstLen = 17 // ";chunk-signature="
+ signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
+ crlfLen = 2 // CRLF
+)
+
+// Request headers to be ignored while calculating seed signature for
+// a request.
+var ignoredStreamingHeaders = map[string]bool{
+ "Authorization": true,
+ "User-Agent": true,
+ "Content-Type": true,
+}
+
+// getSignedChunkLength - calculates the length of chunk metadata
+func getSignedChunkLength(chunkDataSize int64) int64 {
+ return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
+ chunkSigConstLen +
+ signatureStrLen +
+ crlfLen +
+ chunkDataSize +
+ crlfLen
+}
+
+// getStreamLength - calculates the length of the overall stream (data + metadata)
+func getStreamLength(dataLen, chunkSize int64) int64 {
+ if dataLen <= 0 {
+ return 0
+ }
+
+ chunksCount := int64(dataLen / chunkSize)
+ remainingBytes := int64(dataLen % chunkSize)
+ streamLen := int64(0)
+ streamLen += chunksCount * getSignedChunkLength(chunkSize)
+ if remainingBytes > 0 {
+ streamLen += getSignedChunkLength(remainingBytes)
+ }
+ streamLen += getSignedChunkLength(0)
+ return streamLen
+}
+
+// buildChunkStringToSign - returns the string to sign given chunk data
+// and previous signature.
+func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
+ stringToSignParts := []string{
+ streamingPayloadHdr,
+ t.Format(iso8601DateFormat),
+ getScope(region, t),
+ previousSig,
+ emptySHA256,
+ hex.EncodeToString(sum256(chunkData)),
+ }
+
+ return strings.Join(stringToSignParts, "\n")
+}
+
+// prepareStreamingRequest - prepares a request with appropriate
+// headers before computing the seed signature.
+func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
+ // Set x-amz-content-sha256 header.
+ req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
+ if sessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", sessionToken)
+ }
+
+ req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
+ // Set content length with streaming signature for each chunk included.
+ req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize))
+ req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
+}
+
+// buildChunkHeader - returns the chunk header.
+// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
+func buildChunkHeader(chunkLen int64, signature string) []byte {
+ return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n")
+}
+
+// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
+func buildChunkSignature(chunkData []byte, reqTime time.Time, region,
+ previousSignature, secretAccessKey string) string {
+
+ chunkStringToSign := buildChunkStringToSign(reqTime, region,
+ previousSignature, chunkData)
+ signingKey := getSigningKey(secretAccessKey, region, reqTime)
+ return getSignature(signingKey, chunkStringToSign)
+}
+
+// getSeedSignature - returns the seed signature for a given request.
+func (s *StreamingReader) setSeedSignature(req *http.Request) {
+ // Get canonical request
+ canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest)
+
+ signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime)
+
+ // Calculate signature.
+ s.seedSignature = getSignature(signingKey, stringToSign)
+}
+
+// StreamingReader implements chunked upload signature as a reader on
+// top of req.Body's ReaderCloser chunk header;data;... repeat
+type StreamingReader struct {
+ accessKeyID string
+ secretAccessKey string
+ sessionToken string
+ region string
+ prevSignature string
+ seedSignature string
+ contentLen int64 // Content-Length from req header
+ baseReadCloser io.ReadCloser // underlying io.Reader
+ bytesRead int64 // bytes read from underlying io.Reader
+ buf bytes.Buffer // holds signed chunk
+ chunkBuf []byte // holds raw data read from req Body
+ chunkBufLen int // no. of bytes read so far into chunkBuf
+ done bool // done reading the underlying reader to EOF
+ reqTime time.Time
+ chunkNum int
+ totalChunks int
+ lastChunkSize int
+}
+
+// signChunk - signs a chunk read from s.baseReader of chunkLen size.
+func (s *StreamingReader) signChunk(chunkLen int) {
+ // Compute chunk signature for next header
+ signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime,
+ s.region, s.prevSignature, s.secretAccessKey)
+
+ // For next chunk signature computation
+ s.prevSignature = signature
+
+ // Write chunk header into streaming buffer
+ chunkHdr := buildChunkHeader(int64(chunkLen), signature)
+ s.buf.Write(chunkHdr)
+
+ // Write chunk data into streaming buffer
+ s.buf.Write(s.chunkBuf[:chunkLen])
+
+ // Write the chunk trailer.
+ s.buf.Write([]byte("\r\n"))
+
+ // Reset chunkBufLen for next chunk read.
+ s.chunkBufLen = 0
+ s.chunkNum++
+}
+
+// setStreamingAuthHeader - builds and sets authorization header value
+// for streaming signature.
+func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
+ credential := GetCredential(s.accessKeyID, s.region, s.reqTime)
+ authParts := []string{
+ signV4Algorithm + " Credential=" + credential,
+ "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders),
+ "Signature=" + s.seedSignature,
+ }
+
+ // Set authorization header.
+ auth := strings.Join(authParts, ",")
+ req.Header.Set("Authorization", auth)
+}
+
+// StreamingSignV4 - provides chunked upload signatureV4 support by
+// implementing io.Reader.
+func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
+ region string, dataLen int64, reqTime time.Time) *http.Request {
+
+ // Set headers needed for streaming signature.
+ prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
+
+ if req.Body == nil {
+ req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
+ }
+
+ stReader := &StreamingReader{
+ baseReadCloser: req.Body,
+ accessKeyID: accessKeyID,
+ secretAccessKey: secretAccessKey,
+ sessionToken: sessionToken,
+ region: region,
+ reqTime: reqTime,
+ chunkBuf: make([]byte, payloadChunkSize),
+ contentLen: dataLen,
+ chunkNum: 1,
+ totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
+ lastChunkSize: int(dataLen % payloadChunkSize),
+ }
+
+ // Add the request headers required for chunk upload signing.
+
+ // Compute the seed signature.
+ stReader.setSeedSignature(req)
+
+ // Set the authorization header with the seed signature.
+ stReader.setStreamingAuthHeader(req)
+
+ // Set seed signature as prevSignature for subsequent
+ // streaming signing process.
+ stReader.prevSignature = stReader.seedSignature
+ req.Body = stReader
+
+ return req
+}
+
+// Read - this method performs chunk upload signature providing a
+// io.Reader interface.
+func (s *StreamingReader) Read(buf []byte) (int, error) {
+ switch {
+ // After the last chunk is read from underlying reader, we
+ // never re-fill s.buf.
+ case s.done:
+
+ // s.buf will be (re-)filled with next chunk when has lesser
+ // bytes than asked for.
+ case s.buf.Len() < len(buf):
+ s.chunkBufLen = 0
+ for {
+ n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
+ // Usually we validate `err` first, but in this case
+ // we are validating n > 0 for the following reasons.
+ //
+ // 1. n > 0, err is one of io.EOF, nil (near end of stream)
+ // A Reader returning a non-zero number of bytes at the end
+ // of the input stream may return either err == EOF or err == nil
+ //
+ // 2. n == 0, err is io.EOF (actual end of stream)
+ //
+ // Callers should always process the n > 0 bytes returned
+ // before considering the error err.
+ if n1 > 0 {
+ s.chunkBufLen += n1
+ s.bytesRead += int64(n1)
+
+ if s.chunkBufLen == payloadChunkSize ||
+ (s.chunkNum == s.totalChunks-1 &&
+ s.chunkBufLen == s.lastChunkSize) {
+ // Sign the chunk and write it to s.buf.
+ s.signChunk(s.chunkBufLen)
+ break
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ // No more data left in baseReader - last chunk.
+ // Done reading the last chunk from baseReader.
+ s.done = true
+
+ // bytes read from baseReader different than
+ // content length provided.
+ if s.bytesRead != s.contentLen {
+ return 0, io.ErrUnexpectedEOF
+ }
+
+ // Sign the chunk and write it to s.buf.
+ s.signChunk(0)
+ break
+ }
+ return 0, err
+ }
+
+ }
+ }
+ return s.buf.Read(buf)
+}
+
+// Close - this method makes underlying io.ReadCloser's Close method available.
+func (s *StreamingReader) Close() error {
+ return s.baseReadCloser.Close()
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
new file mode 100755
index 0000000..b407093
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
@@ -0,0 +1,316 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3signer
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha1"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// Signature and API related constants.
+const (
+ signV2Algorithm = "AWS"
+)
+
+// Encode input URL path to URL encoded path.
+func encodeURL2Path(req *http.Request, virtualHost bool) (path string) {
+ if virtualHost {
+ reqHost := getHostAddr(req)
+ dotPos := strings.Index(reqHost, ".")
+ if dotPos > -1 {
+ bucketName := reqHost[:dotPos]
+ path = "/" + bucketName
+ path += req.URL.Path
+ path = s3utils.EncodePath(path)
+ return
+ }
+ }
+ path = s3utils.EncodePath(req.URL.Path)
+ return
+}
+
+// PreSignV2 - presign the request in following style.
+// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
+func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request {
+ // Presign is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ d := time.Now().UTC()
+ // Find epoch expires when the request will expire.
+ epochExpires := d.Unix() + expires
+
+ // Add expires header if not present.
+ if expiresStr := req.Header.Get("Expires"); expiresStr == "" {
+ req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10))
+ }
+
+ // Get presigned string to sign.
+ stringToSign := preStringToSignV2(req, virtualHost)
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(stringToSign))
+
+ // Calculate signature.
+ signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
+
+ query := req.URL.Query()
+ // Handle specially for Google Cloud Storage.
+ if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") {
+ query.Set("GoogleAccessId", accessKeyID)
+ } else {
+ query.Set("AWSAccessKeyId", accessKeyID)
+ }
+
+ // Fill in Expires for presigned query.
+ query.Set("Expires", strconv.FormatInt(epochExpires, 10))
+
+ // Encode query and save.
+ req.URL.RawQuery = s3utils.QueryEncode(query)
+
+ // Save signature finally.
+ req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
+
+ // Return.
+ return &req
+}
+
+// PostPresignSignatureV2 - presigned signature for PostPolicy
+// request.
+func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(policyBase64))
+ signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
+ return signature
+}
+
+// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
+// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
+//
+// StringToSign = HTTP-Verb + "\n" +
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Date + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+//
+// CanonicalizedResource = [ "/" + Bucket ] +
+// +
+// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
+//
+// CanonicalizedProtocolHeaders =
+
+// SignV2 sign the request before Do() (AWS Signature Version 2).
+func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request {
+ // Signature calculation is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ // Initial time.
+ d := time.Now().UTC()
+
+ // Add date if not present.
+ if date := req.Header.Get("Date"); date == "" {
+ req.Header.Set("Date", d.Format(http.TimeFormat))
+ }
+
+ // Calculate HMAC for secretAccessKey.
+ stringToSign := stringToSignV2(req, virtualHost)
+ hm := hmac.New(sha1.New, []byte(secretAccessKey))
+ hm.Write([]byte(stringToSign))
+
+ // Prepare auth header.
+ authHeader := new(bytes.Buffer)
+ authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID))
+ encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
+ encoder.Write(hm.Sum(nil))
+ encoder.Close()
+
+ // Set Authorization header.
+ req.Header.Set("Authorization", authHeader.String())
+
+ return &req
+}
+
+// From the Amazon docs:
+//
+// StringToSign = HTTP-Verb + "\n" +
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Expires + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+func preStringToSignV2(req http.Request, virtualHost bool) string {
+ buf := new(bytes.Buffer)
+ // Write standard headers.
+ writePreSignV2Headers(buf, req)
+ // Write canonicalized protocol headers if any.
+ writeCanonicalizedHeaders(buf, req)
+ // Write canonicalized Query resources if any.
+ writeCanonicalizedResource(buf, req, virtualHost)
+ return buf.String()
+}
+
+// writePreSignV2Headers - write preSign v2 required headers.
+func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
+ buf.WriteString(req.Method + "\n")
+ buf.WriteString(req.Header.Get("Content-Md5") + "\n")
+ buf.WriteString(req.Header.Get("Content-Type") + "\n")
+ buf.WriteString(req.Header.Get("Expires") + "\n")
+}
+
+// From the Amazon docs:
+//
+// StringToSign = HTTP-Verb + "\n" +
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Date + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+func stringToSignV2(req http.Request, virtualHost bool) string {
+ buf := new(bytes.Buffer)
+ // Write standard headers.
+ writeSignV2Headers(buf, req)
+ // Write canonicalized protocol headers if any.
+ writeCanonicalizedHeaders(buf, req)
+ // Write canonicalized Query resources if any.
+ writeCanonicalizedResource(buf, req, virtualHost)
+ return buf.String()
+}
+
+// writeSignV2Headers - write signV2 required headers.
+func writeSignV2Headers(buf *bytes.Buffer, req http.Request) {
+ buf.WriteString(req.Method + "\n")
+ buf.WriteString(req.Header.Get("Content-Md5") + "\n")
+ buf.WriteString(req.Header.Get("Content-Type") + "\n")
+ buf.WriteString(req.Header.Get("Date") + "\n")
+}
+
+// writeCanonicalizedHeaders - write canonicalized headers.
+func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
+ var protoHeaders []string
+ vals := make(map[string][]string)
+ for k, vv := range req.Header {
+ // All the AMZ headers should be lowercase
+ lk := strings.ToLower(k)
+ if strings.HasPrefix(lk, "x-amz") {
+ protoHeaders = append(protoHeaders, lk)
+ vals[lk] = vv
+ }
+ }
+ sort.Strings(protoHeaders)
+ for _, k := range protoHeaders {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ for idx, v := range vals[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ if strings.Contains(v, "\n") {
+ // TODO: "Unfold" long headers that
+ // span multiple lines (as allowed by
+ // RFC 2616, section 4.2) by replacing
+ // the folding white-space (including
+ // new-line) by a single space.
+ buf.WriteString(v)
+ } else {
+ buf.WriteString(v)
+ }
+ }
+ buf.WriteByte('\n')
+ }
+}
+
+// AWS S3 Signature V2 calculation rule is give here:
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign
+
+// Whitelist resource list that will be used in query string for signature-V2 calculation.
+// The list should be alphabetically sorted
+var resourceList = []string{
+ "acl",
+ "delete",
+ "lifecycle",
+ "location",
+ "logging",
+ "notification",
+ "partNumber",
+ "policy",
+ "requestPayment",
+ "response-cache-control",
+ "response-content-disposition",
+ "response-content-encoding",
+ "response-content-language",
+ "response-content-type",
+ "response-expires",
+ "torrent",
+ "uploadId",
+ "uploads",
+ "versionId",
+ "versioning",
+ "versions",
+ "website",
+}
+
+// From the Amazon docs:
+//
+// CanonicalizedResource = [ "/" + Bucket ] +
+// +
+// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
+func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) {
+ // Save request URL.
+ requestURL := req.URL
+ // Get encoded URL path.
+ buf.WriteString(encodeURL2Path(&req, virtualHost))
+ if requestURL.RawQuery != "" {
+ var n int
+ vals, _ := url.ParseQuery(requestURL.RawQuery)
+ // Verify if any sub resource queries are present, if yes
+ // canonicallize them.
+ for _, resource := range resourceList {
+ if vv, ok := vals[resource]; ok && len(vv) > 0 {
+ n++
+ // First element
+ switch n {
+ case 1:
+ buf.WriteByte('?')
+ // The rest
+ default:
+ buf.WriteByte('&')
+ }
+ buf.WriteString(resource)
+ // Request parameters
+ if len(vv[0]) > 0 {
+ buf.WriteByte('=')
+ buf.WriteString(vv[0])
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
new file mode 100755
index 0000000..daf02fe
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
@@ -0,0 +1,315 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3signer
+
+import (
+ "bytes"
+ "encoding/hex"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// Signature and API related constants.
+const (
+ signV4Algorithm = "AWS4-HMAC-SHA256"
+ iso8601DateFormat = "20060102T150405Z"
+ yyyymmdd = "20060102"
+)
+
+///
+/// Excerpts from @lsegal -
+/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
+///
+/// User-Agent:
+///
+/// This is ignored from signing because signing this causes
+/// problems with generating pre-signed URLs (that are executed
+/// by other agents) or when customers pass requests through
+/// proxies, which may modify the user-agent.
+///
+/// Content-Length:
+///
+/// This is ignored from signing because generating a pre-signed
+/// URL should not provide a content-length constraint,
+/// specifically when vending a S3 pre-signed PUT URL. The
+/// corollary to this is that when sending regular requests
+/// (non-pre-signed), the signature contains a checksum of the
+/// body, which implicitly validates the payload length (since
+/// changing the number of bytes would change the checksum)
+/// and therefore this header is not valuable in the signature.
+///
+/// Content-Type:
+///
+/// Signing this header causes quite a number of problems in
+/// browser environments, where browsers like to modify and
+/// normalize the content-type header in different ways. There is
+/// more information on this in https://goo.gl/2E9gyy. Avoiding
+/// this field simplifies logic and reduces the possibility of
+/// future bugs.
+///
+/// Authorization:
+///
+/// Is skipped for obvious reasons
+///
+var v4IgnoredHeaders = map[string]bool{
+ "Authorization": true,
+ "Content-Type": true,
+ "Content-Length": true,
+ "User-Agent": true,
+}
+
+// getSigningKey hmac seed to calculate final signature.
+func getSigningKey(secret, loc string, t time.Time) []byte {
+ date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
+ location := sumHMAC(date, []byte(loc))
+ service := sumHMAC(location, []byte("s3"))
+ signingKey := sumHMAC(service, []byte("aws4_request"))
+ return signingKey
+}
+
+// getSignature final signature in hexadecimal form.
+func getSignature(signingKey []byte, stringToSign string) string {
+ return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
+}
+
+// getScope generate a string of a specific date, an AWS region, and a
+// service.
+func getScope(location string, t time.Time) string {
+ scope := strings.Join([]string{
+ t.Format(yyyymmdd),
+ location,
+ "s3",
+ "aws4_request",
+ }, "/")
+ return scope
+}
+
+// GetCredential generate a credential string.
+func GetCredential(accessKeyID, location string, t time.Time) string {
+ scope := getScope(location, t)
+ return accessKeyID + "/" + scope
+}
+
+// getHashedPayload get the hexadecimal value of the SHA256 hash of
+// the request payload.
+func getHashedPayload(req http.Request) string {
+ hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
+ if hashedPayload == "" {
+ // Presign does not have a payload, use S3 recommended value.
+ hashedPayload = unsignedPayload
+ }
+ return hashedPayload
+}
+
+// getCanonicalHeaders generate a list of request headers for
+// signature.
+func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string {
+ var headers []string
+ vals := make(map[string][]string)
+ for k, vv := range req.Header {
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+ continue // ignored header
+ }
+ headers = append(headers, strings.ToLower(k))
+ vals[strings.ToLower(k)] = vv
+ }
+ headers = append(headers, "host")
+ sort.Strings(headers)
+
+ var buf bytes.Buffer
+ // Save all the headers in canonical form : newline
+ // separated for each header.
+ for _, k := range headers {
+ buf.WriteString(k)
+ buf.WriteByte(':')
+ switch {
+ case k == "host":
+ buf.WriteString(getHostAddr(&req))
+ fallthrough
+ default:
+ for idx, v := range vals[k] {
+ if idx > 0 {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(v)
+ }
+ buf.WriteByte('\n')
+ }
+ }
+ return buf.String()
+}
+
+// getSignedHeaders generate all signed request headers.
+// i.e lexically sorted, semicolon-separated list of lowercase
+// request header names.
+func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
+ var headers []string
+ for k := range req.Header {
+ if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
+ continue // Ignored header found continue.
+ }
+ headers = append(headers, strings.ToLower(k))
+ }
+ headers = append(headers, "host")
+ sort.Strings(headers)
+ return strings.Join(headers, ";")
+}
+
+// getCanonicalRequest generate a canonical request of style.
+//
+// canonicalRequest =
+// \n
+// \n
+// \n
+// \n
+// \n
+//
+func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) string {
+ req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
+ canonicalRequest := strings.Join([]string{
+ req.Method,
+ s3utils.EncodePath(req.URL.Path),
+ req.URL.RawQuery,
+ getCanonicalHeaders(req, ignoredHeaders),
+ getSignedHeaders(req, ignoredHeaders),
+ getHashedPayload(req),
+ }, "\n")
+ return canonicalRequest
+}
+
+// getStringToSign a string based on selected query values.
+func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
+ stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n"
+ stringToSign = stringToSign + getScope(location, t) + "\n"
+ stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
+ return stringToSign
+}
+
+// PreSignV4 presign the request, in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
+func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request {
+ // Presign is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ // Initial time.
+ t := time.Now().UTC()
+
+ // Get credential string.
+ credential := GetCredential(accessKeyID, location, t)
+
+ // Get all signed headers.
+ signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
+
+ // Set URL query.
+ query := req.URL.Query()
+ query.Set("X-Amz-Algorithm", signV4Algorithm)
+ query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+ query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
+ query.Set("X-Amz-SignedHeaders", signedHeaders)
+ query.Set("X-Amz-Credential", credential)
+ // Set session token if available.
+ if sessionToken != "" {
+ query.Set("X-Amz-Security-Token", sessionToken)
+ }
+ req.URL.RawQuery = query.Encode()
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(t, location, canonicalRequest)
+
+ // Gext hmac signing key.
+ signingKey := getSigningKey(secretAccessKey, location, t)
+
+ // Calculate signature.
+ signature := getSignature(signingKey, stringToSign)
+
+ // Add signature header to RawQuery.
+ req.URL.RawQuery += "&X-Amz-Signature=" + signature
+
+ return &req
+}
+
+// PostPresignSignatureV4 - presigned signature for PostPolicy
+// requests.
+func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
+ // Get signining key.
+ signingkey := getSigningKey(secretAccessKey, location, t)
+ // Calculate signature.
+ signature := getSignature(signingkey, policyBase64)
+ return signature
+}
+
+// SignV4 sign the request before Do(), in accordance with
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
+func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
+ // Signature calculation is not needed for anonymous credentials.
+ if accessKeyID == "" || secretAccessKey == "" {
+ return &req
+ }
+
+ // Initial time.
+ t := time.Now().UTC()
+
+ // Set x-amz-date.
+ req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+
+ // Set session token if available.
+ if sessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", sessionToken)
+ }
+
+ // Get canonical request.
+ canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(t, location, canonicalRequest)
+
+ // Get hmac signing key.
+ signingKey := getSigningKey(secretAccessKey, location, t)
+
+ // Get credential string.
+ credential := GetCredential(accessKeyID, location, t)
+
+ // Get all signed headers.
+ signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
+
+ // Calculate signature.
+ signature := getSignature(signingKey, stringToSign)
+
+ // If regular request, construct the final authorization header.
+ parts := []string{
+ signV4Algorithm + " Credential=" + credential,
+ "SignedHeaders=" + signedHeaders,
+ "Signature=" + signature,
+ }
+
+ // Set authorization header.
+ auth := strings.Join(parts, ", ")
+ req.Header.Set("Authorization", auth)
+
+ return &req
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go
new file mode 100755
index 0000000..33b1752
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go
@@ -0,0 +1,49 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3signer
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "net/http"
+)
+
+// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
+const unsignedPayload = "UNSIGNED-PAYLOAD"
+
+// sum256 calculate sha256 sum for an input byte array.
+func sum256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// sumHMAC calculate hmac between two input byte array.
+func sumHMAC(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+// getHostAddr returns host header if available, otherwise returns host from URL
+func getHostAddr(req *http.Request) string {
+ if req.Host != "" {
+ return req.Host
+ }
+ return req.URL.Host
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
new file mode 100755
index 0000000..bfeb73e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
@@ -0,0 +1,302 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3utils
+
+import (
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "net"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode/utf8"
+)
+
+// Sentinel URL is the default url value which is invalid.
+var sentinelURL = url.URL{}
+
+// IsValidDomain validates if input string is a valid domain name.
+func IsValidDomain(host string) bool {
+ // See RFC 1035, RFC 3696.
+ host = strings.TrimSpace(host)
+ if len(host) == 0 || len(host) > 255 {
+ return false
+ }
+ // host cannot start or end with "-"
+ if host[len(host)-1:] == "-" || host[:1] == "-" {
+ return false
+ }
+ // host cannot start or end with "_"
+ if host[len(host)-1:] == "_" || host[:1] == "_" {
+ return false
+ }
+ // host cannot start or end with a "."
+ if host[len(host)-1:] == "." || host[:1] == "." {
+ return false
+ }
+ // All non alphanumeric characters are invalid.
+ if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:>/") {
+ return false
+ }
+ // No need to regexp match, since the list is non-exhaustive.
+ // We let it valid and fail later.
+ return true
+}
+
+// IsValidIP parses input string for ip address validity.
+func IsValidIP(ip string) bool {
+ return net.ParseIP(ip) != nil
+}
+
+// IsVirtualHostSupported - verifies if bucketName can be part of
+// virtual host. Currently only Amazon S3 and Google Cloud Storage
+// would support this.
+func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ // bucketName can be valid but '.' in the hostname will fail SSL
+ // certificate validation. So do not use host-style for such buckets.
+ if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
+ return false
+ }
+ // Return true for all other cases
+ return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL)
+}
+
+// Refer for region styles - https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+
+// amazonS3HostHyphen - regular expression used to determine if an arg is s3 host in hyphenated style.
+var amazonS3HostHyphen = regexp.MustCompile(`^s3-(.*?)\.amazonaws\.com$`)
+
+// amazonS3HostDualStack - regular expression used to determine if an arg is s3 host dualstack.
+var amazonS3HostDualStack = regexp.MustCompile(`^s3\.dualstack\.(.*?)\.amazonaws\.com$`)
+
+// amazonS3HostDot - regular expression used to determine if an arg is s3 host in . style.
+var amazonS3HostDot = regexp.MustCompile(`^s3\.(.*?)\.amazonaws\.com$`)
+
+// amazonS3ChinaHost - regular expression used to determine if the arg is s3 china host.
+var amazonS3ChinaHost = regexp.MustCompile(`^s3\.(cn.*?)\.amazonaws\.com\.cn$`)
+
+// GetRegionFromURL - returns a region from url host.
+func GetRegionFromURL(endpointURL url.URL) string {
+ if endpointURL == sentinelURL {
+ return ""
+ }
+ if endpointURL.Host == "s3-external-1.amazonaws.com" {
+ return ""
+ }
+ if IsAmazonGovCloudEndpoint(endpointURL) {
+ return "us-gov-west-1"
+ }
+ parts := amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
+func IsAmazonEndpoint(endpointURL url.URL) bool {
+ if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" {
+ return true
+ }
+ return GetRegionFromURL(endpointURL) != ""
+}
+
+// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
+func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" ||
+ IsAmazonFIPSGovCloudEndpoint(endpointURL))
+}
+
+// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint.
+func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com"
+}
+
+// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
+func IsGoogleEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Host == "storage.googleapis.com"
+}
+
+// Expects ascii encoded strings - from output of urlEncodePath
+func percentEncodeSlash(s string) string {
+ return strings.Replace(s, "/", "%2F", -1)
+}
+
+// QueryEncode - encodes query values in their URL encoded form. In
+// addition to the percent encoding performed by urlEncodePath() used
+// here, it also percent encodes '/' (forward slash)
+func QueryEncode(v url.Values) string {
+ if v == nil {
+ return ""
+ }
+ var buf bytes.Buffer
+ keys := make([]string, 0, len(v))
+ for k := range v {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := v[k]
+ prefix := percentEncodeSlash(EncodePath(k)) + "="
+ for _, v := range vs {
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(prefix)
+ buf.WriteString(percentEncodeSlash(EncodePath(v)))
+ }
+ }
+ return buf.String()
+}
+
+// if object matches reserved string, no need to encode them
+var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func EncodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}
+
+// We support '.' with bucket names but we fallback to using path
+// style requests instead for such buckets.
+var (
+ validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`)
+ validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+ ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
+)
+
+// Common checker for both stricter and basic validation.
+func checkBucketNameCommon(bucketName string, strict bool) (err error) {
+ if strings.TrimSpace(bucketName) == "" {
+ return errors.New("Bucket name cannot be empty")
+ }
+ if len(bucketName) < 3 {
+ return errors.New("Bucket name cannot be smaller than 3 characters")
+ }
+ if len(bucketName) > 63 {
+ return errors.New("Bucket name cannot be greater than 63 characters")
+ }
+ if ipAddress.MatchString(bucketName) {
+ return errors.New("Bucket name cannot be an ip address")
+ }
+ if strings.Contains(bucketName, "..") {
+ return errors.New("Bucket name contains invalid characters")
+ }
+ if strict {
+ if !validBucketNameStrict.MatchString(bucketName) {
+ err = errors.New("Bucket name contains invalid characters")
+ }
+ return err
+ }
+ if !validBucketName.MatchString(bucketName) {
+ err = errors.New("Bucket name contains invalid characters")
+ }
+ return err
+}
+
+// CheckValidBucketName - checks if we have a valid input bucket name.
+func CheckValidBucketName(bucketName string) (err error) {
+ return checkBucketNameCommon(bucketName, false)
+}
+
+// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
+// This is a stricter version.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
+func CheckValidBucketNameStrict(bucketName string) (err error) {
+ return checkBucketNameCommon(bucketName, true)
+}
+
+// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
+func CheckValidObjectNamePrefix(objectName string) error {
+ if len(objectName) > 1024 {
+ return errors.New("Object name cannot be greater than 1024 characters")
+ }
+ if !utf8.ValidString(objectName) {
+ return errors.New("Object name with non UTF-8 strings are not supported")
+ }
+ return nil
+}
+
+// CheckValidObjectName - checks if we have a valid input object name.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
+func CheckValidObjectName(objectName string) error {
+ if strings.TrimSpace(objectName) == "" {
+ return errors.New("Object name cannot be empty")
+ }
+ return CheckValidObjectNamePrefix(objectName)
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/pkg/set/stringset.go
new file mode 100755
index 0000000..efd0262
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/set/stringset.go
@@ -0,0 +1,197 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package set
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+)
+
+// StringSet - uses map as set of strings.
+type StringSet map[string]struct{}
+
+// ToSlice - returns StringSet as string slice.
+func (set StringSet) ToSlice() []string {
+ keys := make([]string, 0, len(set))
+ for k := range set {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// IsEmpty - returns whether the set is empty or not.
+func (set StringSet) IsEmpty() bool {
+ return len(set) == 0
+}
+
+// Add - adds string to the set.
+func (set StringSet) Add(s string) {
+ set[s] = struct{}{}
+}
+
+// Remove - removes string in the set. It does nothing if string does not exist in the set.
+func (set StringSet) Remove(s string) {
+ delete(set, s)
+}
+
+// Contains - checks if string is in the set.
+func (set StringSet) Contains(s string) bool {
+ _, ok := set[s]
+ return ok
+}
+
+// FuncMatch - returns new set containing each value who passes match function.
+// A 'matchFn' should accept element in a set as first argument and
+// 'matchString' as second argument. The function can do any logic to
+// compare both the arguments and should return true to accept element in
+// a set to include in output set else the element is ignored.
+func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if matchFn(k, matchString) {
+ nset.Add(k)
+ }
+ }
+ return nset
+}
+
+// ApplyFunc - returns new set containing each value processed by 'applyFn'.
+// A 'applyFn' should accept element in a set as a argument and return
+// a processed string. The function can do any logic to return a processed
+// string.
+func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ nset.Add(applyFn(k))
+ }
+ return nset
+}
+
+// Equals - checks whether given set is equal to current set or not.
+func (set StringSet) Equals(sset StringSet) bool {
+ // If length of set is not equal to length of given set, the
+ // set is not equal to given set.
+ if len(set) != len(sset) {
+ return false
+ }
+
+ // As both sets are equal in length, check each elements are equal.
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection - returns the intersection with given set as new set.
+func (set StringSet) Intersection(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if _, ok := sset[k]; ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Difference - returns the difference with given set as new set.
+func (set StringSet) Difference(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Union - returns the union with given set as new set.
+func (set StringSet) Union(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ nset.Add(k)
+ }
+
+ for k := range sset {
+ nset.Add(k)
+ }
+
+ return nset
+}
+
+// MarshalJSON - converts to JSON data.
+func (set StringSet) MarshalJSON() ([]byte, error) {
+ return json.Marshal(set.ToSlice())
+}
+
+// UnmarshalJSON - parses JSON data and creates new set with it.
+// If 'data' contains JSON string array, the set contains each string.
+// If 'data' contains JSON string, the set contains the string as one element.
+// If 'data' contains Other JSON types, JSON parse error is returned.
+func (set *StringSet) UnmarshalJSON(data []byte) error {
+ sl := []string{}
+ var err error
+ if err = json.Unmarshal(data, &sl); err == nil {
+ *set = make(StringSet)
+ for _, s := range sl {
+ set.Add(s)
+ }
+ } else {
+ var s string
+ if err = json.Unmarshal(data, &s); err == nil {
+ *set = make(StringSet)
+ set.Add(s)
+ }
+ }
+
+ return err
+}
+
+// String - returns printable string of the set.
+func (set StringSet) String() string {
+ return fmt.Sprintf("%s", set.ToSlice())
+}
+
+// NewStringSet - creates new string set.
+func NewStringSet() StringSet {
+ return make(StringSet)
+}
+
+// CreateStringSet - creates new string set with given string values.
+func CreateStringSet(sl ...string) StringSet {
+ set := make(StringSet)
+ for _, k := range sl {
+ set.Add(k)
+ }
+ return set
+}
+
+// CopyStringSet - returns copy of given set.
+func CopyStringSet(set StringSet) StringSet {
+ nset := NewStringSet()
+ for k, v := range set {
+ nset[k] = v
+ }
+ return nset
+}
diff --git a/vendor/github.com/minio/minio-go/post-policy.go b/vendor/github.com/minio/minio-go/post-policy.go
new file mode 100755
index 0000000..b3ae705
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/post-policy.go
@@ -0,0 +1,248 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/base64"
+ "fmt"
+ "strings"
+ "time"
+)
+
+// expirationDateFormat date format for expiration key in json policy.
+const expirationDateFormat = "2006-01-02T15:04:05.999Z"
+
+// policyCondition explanation:
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
+//
+// Example:
+//
+// policyCondition {
+// matchType: "$eq",
+// key: "$Content-Type",
+// value: "image/png",
+// }
+//
+type policyCondition struct {
+ matchType string
+ condition string
+ value string
+}
+
+// PostPolicy - Provides strict static type conversion and validation
+// for Amazon S3's POST policy JSON string.
+type PostPolicy struct {
+ // Expiration date and time of the POST policy.
+ expiration time.Time
+ // Collection of different policy conditions.
+ conditions []policyCondition
+ // ContentLengthRange minimum and maximum allowable size for the
+ // uploaded content.
+ contentLengthRange struct {
+ min int64
+ max int64
+ }
+
+ // Post form data.
+ formData map[string]string
+}
+
+// NewPostPolicy - Instantiate new post policy.
+func NewPostPolicy() *PostPolicy {
+ p := &PostPolicy{}
+ p.conditions = make([]policyCondition, 0)
+ p.formData = make(map[string]string)
+ return p
+}
+
+// SetExpires - Sets expiration time for the new policy.
+func (p *PostPolicy) SetExpires(t time.Time) error {
+ if t.IsZero() {
+ return ErrInvalidArgument("No expiry time set.")
+ }
+ p.expiration = t
+ return nil
+}
+
+// SetKey - Sets an object name for the policy based upload.
+func (p *PostPolicy) SetKey(key string) error {
+ if strings.TrimSpace(key) == "" || key == "" {
+ return ErrInvalidArgument("Object name is empty.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$key",
+ value: key,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["key"] = key
+ return nil
+}
+
+// SetKeyStartsWith - Sets an object name that an policy based upload
+// can start with.
+func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
+ if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" {
+ return ErrInvalidArgument("Object prefix is empty.")
+ }
+ policyCond := policyCondition{
+ matchType: "starts-with",
+ condition: "$key",
+ value: keyStartsWith,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["key"] = keyStartsWith
+ return nil
+}
+
+// SetBucket - Sets bucket at which objects will be uploaded to.
+func (p *PostPolicy) SetBucket(bucketName string) error {
+ if strings.TrimSpace(bucketName) == "" || bucketName == "" {
+ return ErrInvalidArgument("Bucket name is empty.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$bucket",
+ value: bucketName,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["bucket"] = bucketName
+ return nil
+}
+
+// SetContentType - Sets content-type of the object for this policy
+// based upload.
+func (p *PostPolicy) SetContentType(contentType string) error {
+ if strings.TrimSpace(contentType) == "" || contentType == "" {
+ return ErrInvalidArgument("No content type specified.")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$Content-Type",
+ value: contentType,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["Content-Type"] = contentType
+ return nil
+}
+
+// SetContentLengthRange - Set new min and max content length
+// condition for all incoming uploads.
+func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
+ if min > max {
+ return ErrInvalidArgument("Minimum limit is larger than maximum limit.")
+ }
+ if min < 0 {
+ return ErrInvalidArgument("Minimum limit cannot be negative.")
+ }
+ if max < 0 {
+ return ErrInvalidArgument("Maximum limit cannot be negative.")
+ }
+ p.contentLengthRange.min = min
+ p.contentLengthRange.max = max
+ return nil
+}
+
+// SetSuccessStatusAction - Sets the status success code of the object for this policy
+// based upload.
+func (p *PostPolicy) SetSuccessStatusAction(status string) error {
+ if strings.TrimSpace(status) == "" || status == "" {
+ return ErrInvalidArgument("Status is empty")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$success_action_status",
+ value: status,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["success_action_status"] = status
+ return nil
+}
+
+// SetUserMetadata - Set user metadata as a key/value couple.
+// Can be retrieved through a HEAD request or an event.
+func (p *PostPolicy) SetUserMetadata(key string, value string) error {
+ if strings.TrimSpace(key) == "" || key == "" {
+ return ErrInvalidArgument("Key is empty")
+ }
+ if strings.TrimSpace(value) == "" || value == "" {
+ return ErrInvalidArgument("Value is empty")
+ }
+ headerName := fmt.Sprintf("x-amz-meta-%s", key)
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: fmt.Sprintf("$%s", headerName),
+ value: value,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData[headerName] = value
+ return nil
+}
+
+// addNewPolicy - internal helper to validate adding new policies.
+func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
+ if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
+ return ErrInvalidArgument("Policy fields are empty.")
+ }
+ p.conditions = append(p.conditions, policyCond)
+ return nil
+}
+
+// Stringer interface for printing policy in json formatted string.
+func (p PostPolicy) String() string {
+ return string(p.marshalJSON())
+}
+
+// marshalJSON - Provides Marshalled JSON in bytes.
+func (p PostPolicy) marshalJSON() []byte {
+ expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
+ var conditionsStr string
+ conditions := []string{}
+ for _, po := range p.conditions {
+ conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value))
+ }
+ if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 {
+ conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]",
+ p.contentLengthRange.min, p.contentLengthRange.max))
+ }
+ if len(conditions) > 0 {
+ conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]"
+ }
+ retStr := "{"
+ retStr = retStr + expirationStr + ","
+ retStr = retStr + conditionsStr
+ retStr = retStr + "}"
+ return []byte(retStr)
+}
+
+// base64 - Produces base64 of PostPolicy's Marshalled json.
+func (p PostPolicy) base64() string {
+ return base64.StdEncoding.EncodeToString(p.marshalJSON())
+}
diff --git a/vendor/github.com/minio/minio-go/retry-continous.go b/vendor/github.com/minio/minio-go/retry-continous.go
new file mode 100755
index 0000000..f31dfa6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/retry-continous.go
@@ -0,0 +1,69 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import "time"
+
+// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
+func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
+ attemptCh := make(chan int)
+
+ // normalize jitter to the range [0, 1.0]
+ if jitter < NoJitter {
+ jitter = NoJitter
+ }
+ if jitter > MaxJitter {
+ jitter = MaxJitter
+ }
+
+ // computes the exponential backoff duration according to
+ // https://www.awsarchitectureblog.com/2015/03/backoff.html
+ exponentialBackoffWait := func(attempt int) time.Duration {
+ // 1< maxAttempt {
+ attempt = maxAttempt
+ }
+ //sleep = random_between(0, min(cap, base * 2 ** attempt))
+ sleep := unit * time.Duration(1< cap {
+ sleep = cap
+ }
+ if jitter != NoJitter {
+ sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
+ }
+ return sleep
+ }
+
+ go func() {
+ defer close(attemptCh)
+ var nextBackoff int
+ for {
+ select {
+ // Attempts starts.
+ case attemptCh <- nextBackoff:
+ nextBackoff++
+ case <-doneCh:
+ // Stop the routine.
+ return
+ }
+ time.Sleep(exponentialBackoffWait(nextBackoff))
+ }
+ }()
+ return attemptCh
+}
diff --git a/vendor/github.com/minio/minio-go/retry.go b/vendor/github.com/minio/minio-go/retry.go
new file mode 100755
index 0000000..2a76707
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/retry.go
@@ -0,0 +1,153 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// MaxRetry is the maximum number of retries before stopping.
+var MaxRetry = 10
+
+// MaxJitter will randomize over the full exponential backoff time
+const MaxJitter = 1.0
+
+// NoJitter disables the use of jitter for randomizing the exponential backoff time
+const NoJitter = 0.0
+
+// DefaultRetryUnit - default unit multiplicative per retry.
+// defaults to 1 second.
+const DefaultRetryUnit = time.Second
+
+// DefaultRetryCap - Each retry attempt never waits no longer than
+// this maximum time duration.
+const DefaultRetryCap = time.Second * 30
+
+// newRetryTimer creates a timer with exponentially increasing
+// delays until the maximum retry attempts are reached.
+func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
+ attemptCh := make(chan int)
+
+ // computes the exponential backoff duration according to
+ // https://www.awsarchitectureblog.com/2015/03/backoff.html
+ exponentialBackoffWait := func(attempt int) time.Duration {
+ // normalize jitter to the range [0, 1.0]
+ if jitter < NoJitter {
+ jitter = NoJitter
+ }
+ if jitter > MaxJitter {
+ jitter = MaxJitter
+ }
+
+ //sleep = random_between(0, min(cap, base * 2 ** attempt))
+ sleep := unit * time.Duration(1< cap {
+ sleep = cap
+ }
+ if jitter != NoJitter {
+ sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
+ }
+ return sleep
+ }
+
+ go func() {
+ defer close(attemptCh)
+ for i := 0; i < maxRetry; i++ {
+ select {
+ // Attempts start from 1.
+ case attemptCh <- i + 1:
+ case <-doneCh:
+ // Stop the routine.
+ return
+ }
+ time.Sleep(exponentialBackoffWait(i))
+ }
+ }()
+ return attemptCh
+}
+
+// isHTTPReqErrorRetryable - is http requests error retryable, such
+// as i/o timeout, connection broken etc..
+func isHTTPReqErrorRetryable(err error) bool {
+ if err == nil {
+ return false
+ }
+ switch e := err.(type) {
+ case *url.Error:
+ switch e.Err.(type) {
+ case *net.DNSError, *net.OpError, net.UnknownNetworkError:
+ return true
+ }
+ if strings.Contains(err.Error(), "Connection closed by foreign host") {
+ return true
+ } else if strings.Contains(err.Error(), "net/http: TLS handshake timeout") {
+ // If error is - tlsHandshakeTimeoutError, retry.
+ return true
+ } else if strings.Contains(err.Error(), "i/o timeout") {
+ // If error is - tcp timeoutError, retry.
+ return true
+ } else if strings.Contains(err.Error(), "connection timed out") {
+ // If err is a net.Dial timeout, retry.
+ return true
+ } else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") {
+ // If error is transport connection broken, retry.
+ return true
+ }
+ }
+ return false
+}
+
+// List of AWS S3 error codes which are retryable.
+var retryableS3Codes = map[string]struct{}{
+ "RequestError": {},
+ "RequestTimeout": {},
+ "Throttling": {},
+ "ThrottlingException": {},
+ "RequestLimitExceeded": {},
+ "RequestThrottled": {},
+ "InternalError": {},
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ "SlowDown": {},
+ // Add more AWS S3 codes here.
+}
+
+// isS3CodeRetryable - is s3 error code retryable.
+func isS3CodeRetryable(s3Code string) (ok bool) {
+ _, ok = retryableS3Codes[s3Code]
+ return ok
+}
+
+// List of HTTP status codes which are retryable.
+var retryableHTTPStatusCodes = map[int]struct{}{
+ 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet
+ http.StatusInternalServerError: {},
+ http.StatusBadGateway: {},
+ http.StatusServiceUnavailable: {},
+ // Add more HTTP status codes here.
+}
+
+// isHTTPStatusRetryable - is HTTP error code retryable.
+func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
+ _, ok = retryableHTTPStatusCodes[httpStatusCode]
+ return ok
+}
diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go
new file mode 100755
index 0000000..0589295
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/s3-endpoints.go
@@ -0,0 +1,50 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+// awsS3EndpointMap Amazon S3 endpoint map.
+var awsS3EndpointMap = map[string]string{
+ "us-east-1": "s3.amazonaws.com",
+ "us-east-2": "s3-us-east-2.amazonaws.com",
+ "us-west-2": "s3-us-west-2.amazonaws.com",
+ "us-west-1": "s3-us-west-1.amazonaws.com",
+ "ca-central-1": "s3-ca-central-1.amazonaws.com",
+ "eu-west-1": "s3-eu-west-1.amazonaws.com",
+ "eu-west-2": "s3-eu-west-2.amazonaws.com",
+ "eu-west-3": "s3-eu-west-3.amazonaws.com",
+ "eu-central-1": "s3-eu-central-1.amazonaws.com",
+ "ap-south-1": "s3-ap-south-1.amazonaws.com",
+ "ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
+ "ap-southeast-2": "s3-ap-southeast-2.amazonaws.com",
+ "ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
+ "ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
+ "sa-east-1": "s3-sa-east-1.amazonaws.com",
+ "us-gov-west-1": "s3-us-gov-west-1.amazonaws.com",
+ "cn-north-1": "s3.cn-north-1.amazonaws.com.cn",
+ "cn-northwest-1": "s3.cn-northwest-1.amazonaws.com.cn",
+}
+
+// getS3Endpoint get Amazon S3 endpoint based on the bucket location.
+func getS3Endpoint(bucketLocation string) (s3Endpoint string) {
+ s3Endpoint, ok := awsS3EndpointMap[bucketLocation]
+ if !ok {
+ // Default to 's3.amazonaws.com' endpoint.
+ s3Endpoint = "s3.amazonaws.com"
+ }
+ return s3Endpoint
+}
diff --git a/vendor/github.com/minio/minio-go/s3-error.go b/vendor/github.com/minio/minio-go/s3-error.go
new file mode 100755
index 0000000..f9e8233
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/s3-error.go
@@ -0,0 +1,61 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+// Non exhaustive list of AWS S3 standard error responses -
+// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+var s3ErrorResponseMap = map[string]string{
+ "AccessDenied": "Access Denied.",
+ "BadDigest": "The Content-Md5 you specified did not match what we received.",
+ "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.",
+ "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.",
+ "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.",
+ "InternalError": "We encountered an internal error, please try again.",
+ "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.",
+ "InvalidBucketName": "The specified bucket is not valid.",
+ "InvalidDigest": "The Content-Md5 you specified is not valid.",
+ "InvalidRange": "The requested range is not satisfiable",
+ "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.",
+ "MissingContentLength": "You must provide the Content-Length HTTP header.",
+ "MissingContentMD5": "Missing required header for this request: Content-Md5.",
+ "MissingRequestBodyError": "Request body is empty.",
+ "NoSuchBucket": "The specified bucket does not exist",
+ "NoSuchBucketPolicy": "The bucket policy does not exist",
+ "NoSuchKey": "The specified key does not exist.",
+ "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
+ "NotImplemented": "A header you provided implies functionality that is not implemented",
+ "PreconditionFailed": "At least one of the pre-conditions you specified did not hold",
+ "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.",
+ "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
+ "MethodNotAllowed": "The specified method is not allowed against this resource.",
+ "InvalidPart": "One or more of the specified parts could not be found.",
+ "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
+ "InvalidObjectState": "The operation is not valid for the current state of the object.",
+ "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.",
+ "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.",
+ "BucketNotEmpty": "The bucket you tried to delete is not empty",
+ "AllAccessDisabled": "All access to this bucket has been disabled.",
+ "MalformedPolicy": "Policy has invalid resource.",
+ "MissingFields": "Missing fields in request.",
+ "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".",
+ "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
+ "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.",
+ "InvalidDuration": "Duration provided in the request is invalid.",
+ "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.",
+ // Add new API errors here.
+}
diff --git a/vendor/github.com/minio/minio-go/transport.go b/vendor/github.com/minio/minio-go/transport.go
new file mode 100755
index 0000000..88700cf
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/transport.go
@@ -0,0 +1,50 @@
+// +build go1.7 go1.8
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017-2018 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// DefaultTransport - this default transport is similar to
+// http.DefaultTransport but with additional param DisableCompression
+// is set to true to avoid decompressing content with 'gzip' encoding.
+var DefaultTransport http.RoundTripper = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ MaxIdleConnsPerHost: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ // Set this value so that the underlying transport round-tripper
+ // doesn't try to auto decode the body of objects with
+ // content-encoding set to `gzip`.
+ //
+ // Refer:
+ // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
+ DisableCompression: true,
+}
diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go
new file mode 100755
index 0000000..be4c8b2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/utils.go
@@ -0,0 +1,272 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "crypto/md5"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// xmlDecoder provide decoded value in xml.
+func xmlDecoder(body io.Reader, v interface{}) error {
+ d := xml.NewDecoder(body)
+ return d.Decode(v)
+}
+
+// sum256 calculate sha256sum for an input byte array, returns hex encoded.
+func sum256Hex(data []byte) string {
+ hash := sha256.New()
+ hash.Write(data)
+ return hex.EncodeToString(hash.Sum(nil))
+}
+
+// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded.
+func sumMD5Base64(data []byte) string {
+ hash := md5.New()
+ hash.Write(data)
+ return base64.StdEncoding.EncodeToString(hash.Sum(nil))
+}
+
+// getEndpointURL - construct a new endpoint.
+func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
+ if strings.Contains(endpoint, ":") {
+ host, _, err := net.SplitHostPort(endpoint)
+ if err != nil {
+ return nil, err
+ }
+ if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
+ msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
+ return nil, ErrInvalidArgument(msg)
+ }
+ } else {
+ if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) {
+ msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
+ return nil, ErrInvalidArgument(msg)
+ }
+ }
+ // If secure is false, use 'http' scheme.
+ scheme := "https"
+ if !secure {
+ scheme = "http"
+ }
+
+ // Construct a secured endpoint URL.
+ endpointURLStr := scheme + "://" + endpoint
+ endpointURL, err := url.Parse(endpointURLStr)
+ if err != nil {
+ return nil, err
+ }
+
+ // Validate incoming endpoint URL.
+ if err := isValidEndpointURL(*endpointURL); err != nil {
+ return nil, err
+ }
+ return endpointURL, nil
+}
+
+// closeResponse close non nil response with any response Body.
+// convenient wrapper to drain any remaining data on response body.
+//
+// Subsequently this allows golang http RoundTripper
+// to re-use the same connection for future requests.
+func closeResponse(resp *http.Response) {
+ // Callers should close resp.Body when done reading from it.
+ // If resp.Body is not closed, the Client's underlying RoundTripper
+ // (typically Transport) may not be able to re-use a persistent TCP
+ // connection to the server for a subsequent "keep-alive" request.
+ if resp != nil && resp.Body != nil {
+ // Drain any remaining Body and then close the connection.
+ // Without this closing connection would disallow re-using
+ // the same connection for future uses.
+ // - http://stackoverflow.com/a/17961593/4465767
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ }
+}
+
+var (
+ // Hex encoded string of nil sha256sum bytes.
+ emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+
+ // Sentinel URL is the default url value which is invalid.
+ sentinelURL = url.URL{}
+)
+
+// Verify if input endpoint URL is valid.
+func isValidEndpointURL(endpointURL url.URL) error {
+ if endpointURL == sentinelURL {
+ return ErrInvalidArgument("Endpoint url cannot be empty.")
+ }
+ if endpointURL.Path != "/" && endpointURL.Path != "" {
+ return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
+ }
+ if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") {
+ if !s3utils.IsAmazonEndpoint(endpointURL) {
+ return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
+ }
+ }
+ if strings.Contains(endpointURL.Host, ".googleapis.com") {
+ if !s3utils.IsGoogleEndpoint(endpointURL) {
+ return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
+ }
+ }
+ return nil
+}
+
+// Verify if input expires value is valid.
+func isValidExpiry(expires time.Duration) error {
+ expireSeconds := int64(expires / time.Second)
+ if expireSeconds < 1 {
+ return ErrInvalidArgument("Expires cannot be lesser than 1 second.")
+ }
+ if expireSeconds > 604800 {
+ return ErrInvalidArgument("Expires cannot be greater than 7 days.")
+ }
+ return nil
+}
+
+// make a copy of http.Header
+func cloneHeader(h http.Header) http.Header {
+ h2 := make(http.Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
+ }
+ return h2
+}
+
+// Filter relevant response headers from
+// the HEAD, GET http response. The function takes
+// a list of headers which are filtered out and
+// returned as a new http header.
+func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) {
+ filteredHeader = cloneHeader(header)
+ for _, key := range filterKeys {
+ filteredHeader.Del(key)
+ }
+ return filteredHeader
+}
+
+// regCred matches credential string in HTTP header
+var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
+
+// regCred matches signature string in HTTP header
+var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
+
+// Redact out signature value from authorization string.
+func redactSignature(origAuth string) string {
+ if !strings.HasPrefix(origAuth, signV4Algorithm) {
+ // Set a temporary redacted auth
+ return "AWS **REDACTED**:**REDACTED**"
+ }
+
+ /// Signature V4 authorization header.
+
+ // Strip out accessKeyID from:
+ // Credential=////aws4_request
+ newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
+
+ // Strip out 256-bit signature from: Signature=<256-bit signature>
+ return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
+}
+
+// Get default location returns the location based on the input
+// URL `u`, if region override is provided then all location
+// defaults to regionOverride.
+//
+// If no other cases match then the location is set to `us-east-1`
+// as a last resort.
+func getDefaultLocation(u url.URL, regionOverride string) (location string) {
+ if regionOverride != "" {
+ return regionOverride
+ }
+ region := s3utils.GetRegionFromURL(u)
+ if region == "" {
+ region = "us-east-1"
+ }
+ return region
+}
+
+var supportedHeaders = []string{
+ "content-type",
+ "cache-control",
+ "content-encoding",
+ "content-disposition",
+ "content-language",
+ "x-amz-website-redirect-location",
+ "expires",
+ // Add more supported headers here.
+}
+
+// isStorageClassHeader returns true if the header is a supported storage class header
+func isStorageClassHeader(headerKey string) bool {
+ return strings.ToLower(amzStorageClass) == strings.ToLower(headerKey)
+}
+
+// isStandardHeader returns true if header is a supported header and not a custom header
+func isStandardHeader(headerKey string) bool {
+ key := strings.ToLower(headerKey)
+ for _, header := range supportedHeaders {
+ if strings.ToLower(header) == key {
+ return true
+ }
+ }
+ return false
+}
+
+// sseHeaders is list of server side encryption headers
+var sseHeaders = []string{
+ "x-amz-server-side-encryption",
+ "x-amz-server-side-encryption-aws-kms-key-id",
+ "x-amz-server-side-encryption-context",
+ "x-amz-server-side-encryption-customer-algorithm",
+ "x-amz-server-side-encryption-customer-key",
+ "x-amz-server-side-encryption-customer-key-MD5",
+}
+
+// isSSEHeader returns true if header is a server side encryption header.
+func isSSEHeader(headerKey string) bool {
+ key := strings.ToLower(headerKey)
+ for _, h := range sseHeaders {
+ if strings.ToLower(h) == key {
+ return true
+ }
+ }
+ return false
+}
+
+// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header.
+func isAmzHeader(headerKey string) bool {
+ key := strings.ToLower(headerKey)
+
+ return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl"
+}
diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE
new file mode 100755
index 0000000..f9c841a
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md
new file mode 100755
index 0000000..d70706d
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/README.md
@@ -0,0 +1,14 @@
+# go-homedir
+
+This is a Go library for detecting the user's home directory without
+the use of cgo, so the library can be used in cross-compilation environments.
+
+Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
+for a user, and `homedir.Expand()` to expand the `~` in a path to the home
+directory.
+
+**Why not just use `os/user`?** The built-in `os/user` package requires
+cgo on Darwin systems. This means that any Go code that uses that package
+cannot cross compile. But 99% of the time the use for `os/user` is just to
+retrieve the home directory, which we can do for the current user without
+cgo. This library does that, enabling cross-compilation.
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
new file mode 100755
index 0000000..fb87bef
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/homedir.go
@@ -0,0 +1,157 @@
+package homedir
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// DisableCache will disable caching of the home directory. Caching is enabled
+// by default.
+var DisableCache bool
+
+var homedirCache string
+var cacheLock sync.RWMutex
+
+// Dir returns the home directory for the executing user.
+//
+// This uses an OS-specific method for discovering the home directory.
+// An error is returned if a home directory cannot be detected.
+func Dir() (string, error) {
+ if !DisableCache {
+ cacheLock.RLock()
+ cached := homedirCache
+ cacheLock.RUnlock()
+ if cached != "" {
+ return cached, nil
+ }
+ }
+
+ cacheLock.Lock()
+ defer cacheLock.Unlock()
+
+ var result string
+ var err error
+ if runtime.GOOS == "windows" {
+ result, err = dirWindows()
+ } else {
+ // Unix-like system, so just assume Unix
+ result, err = dirUnix()
+ }
+
+ if err != nil {
+ return "", err
+ }
+ homedirCache = result
+ return result, nil
+}
+
+// Expand expands the path to include the home directory if the path
+// is prefixed with `~`. If it isn't prefixed with `~`, the path is
+// returned as-is.
+func Expand(path string) (string, error) {
+ if len(path) == 0 {
+ return path, nil
+ }
+
+ if path[0] != '~' {
+ return path, nil
+ }
+
+ if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
+ return "", errors.New("cannot expand user-specific home dir")
+ }
+
+ dir, err := Dir()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(dir, path[1:]), nil
+}
+
+func dirUnix() (string, error) {
+ homeEnv := "HOME"
+ if runtime.GOOS == "plan9" {
+ // On plan9, env vars are lowercase.
+ homeEnv = "home"
+ }
+
+ // First prefer the HOME environmental variable
+ if home := os.Getenv(homeEnv); home != "" {
+ return home, nil
+ }
+
+ var stdout bytes.Buffer
+
+ // If that fails, try OS specific commands
+ if runtime.GOOS == "darwin" {
+ cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`)
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err == nil {
+ result := strings.TrimSpace(stdout.String())
+ if result != "" {
+ return result, nil
+ }
+ }
+ } else {
+ cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ // If the error is ErrNotFound, we ignore it. Otherwise, return it.
+ if err != exec.ErrNotFound {
+ return "", err
+ }
+ } else {
+ if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
+ // username:password:uid:gid:gecos:home:shell
+ passwdParts := strings.SplitN(passwd, ":", 7)
+ if len(passwdParts) > 5 {
+ return passwdParts[5], nil
+ }
+ }
+ }
+ }
+
+ // If all else fails, try the shell
+ stdout.Reset()
+ cmd := exec.Command("sh", "-c", "cd && pwd")
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ return "", err
+ }
+
+ result := strings.TrimSpace(stdout.String())
+ if result == "" {
+ return "", errors.New("blank output when reading home directory")
+ }
+
+ return result, nil
+}
+
+func dirWindows() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+
+ // Prefer standard environment variable USERPROFILE
+ if home := os.Getenv("USERPROFILE"); home != "" {
+ return home, nil
+ }
+
+ drive := os.Getenv("HOMEDRIVE")
+ path := os.Getenv("HOMEPATH")
+ home := drive + path
+ if drive == "" || path == "" {
+ return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank")
+ }
+
+ return home, nil
+}
diff --git a/vendor/github.com/mmcdole/gofeed/LICENSE b/vendor/github.com/mmcdole/gofeed/LICENSE
old mode 100644
new mode 100755
diff --git a/vendor/github.com/mmcdole/gofeed/README.md b/vendor/github.com/mmcdole/gofeed/README.md
old mode 100644
new mode 100755
index 7fb1ce9..1606160
--- a/vendor/github.com/mmcdole/gofeed/README.md
+++ b/vendor/github.com/mmcdole/gofeed/README.md
@@ -2,9 +2,23 @@
[](https://travis-ci.org/mmcdole/gofeed) [](https://coveralls.io/github/mmcdole/gofeed?branch=master) [](https://goreportcard.com/report/github.com/mmcdole/gofeed) [](http://godoc.org/github.com/mmcdole/gofeed) [](http://doge.mit-license.org)
-The `gofeed` library is a robust feed parser that supports parsing both [RSS](https://en.wikipedia.org/wiki/RSS) and [Atom](https://en.wikipedia.org/wiki/Atom_(standard)) feeds. The universal `gofeed.Parser` will parse and convert all feed types into a hybrid `gofeed.Feed` model. You also have the option of parsing them into their respective `atom.Feed` and `rss.Feed` models using the feed specific `atom.Parser` or `rss.Parser`.
+The `gofeed` library is a robust feed parser that supports parsing both [RSS](https://en.wikipedia.org/wiki/RSS) and [Atom](https://en.wikipedia.org/wiki/Atom_(standard)) feeds. The library provides a universal `gofeed.Parser` that will parse and convert all feed types into a hybrid `gofeed.Feed` model. You also have the option of utilizing the feed specific `atom.Parser` or `rss.Parser` parsers which generate `atom.Feed` and `rss.Feed` respectively.
-##### Supported feed types:
+## Table of Contents
+- [Features](#features)
+- [Overview](#overview)
+- [Basic Usage](#basic-usage)
+- [Advanced Usage](#advanced-usage)
+- [Extensions](#extensions)
+- [Invalid Feeds](#invalid-feeds)
+- [Default Mappings](#default-mappings)
+- [Dependencies](#dependencies)
+- [License](#license)
+- [Credits](#credits)
+
+## Features
+
+#### Supported feed types:
* RSS 0.90
* Netscape RSS 0.91
* Userland RSS 0.91
@@ -16,21 +30,26 @@ The `gofeed` library is a robust feed parser that supports parsing both [RSS](ht
* Atom 0.3
* Atom 1.0
-It also provides support for parsing several popular predefined extension modules, including [Dublin Core](http://dublincore.org/documents/dces/) and [Apple’s iTunes](https://help.apple.com/itc/podcasts_connect/#/itcb54353390), as well as arbitrary extensions. See the [Extensions](#extensions) section for more details.
+#### Extension Support
-## Table of Contents
-- [Overview](#overview)
-- [Basic Usage](#basic-usage)
-- [Advanced Usage](#advanced-usage)
-- [Extensions](#extensions)
-- [Invalid Feeds](#invalid-feeds)
-- [Default Mappings](#default-mappings)
-- [Dependencies](#dependencies)
-- [License](#license)
-- [Credits](#credits)
+The `gofeed` library provides support for parsing several popular predefined extensions into ready-made structs, including [Dublin Core](http://dublincore.org/documents/dces/) and [Apple’s iTunes](https://help.apple.com/itc/podcasts_connect/#/itcb54353390).
+
+It parses all other feed extensions in a generic way (see the [Extensions](#extensions) section for more details).
+
+#### Invalid Feeds
+
+A best-effort attempt is made at parsing broken and invalid XML feeds. Currently, `gofeed` can succesfully parse feeds with the following issues:
+- Unescaped/Naked Markup in feed elements
+- Undeclared namespace prefixes
+- Missing closing tags on certain elements
+- Illegal tags within feed elements without namespace prefixes
+- Missing "required" elements as specified by the respective feed specs.
+- Incorrect date formats
## Overview
+The `gofeed` library is comprised of a universal feed parser and several feed specific parsers. Which one you choose depends entirely on your usecase. If you will be handling both rss and atom feeds then it makes sense to use the `gofeed.Parser`. If you know ahead of time that you will only be parsing one feed type then it would make sense to use `rss.Parser` or `atom.Parser`.
+
#### Universal Feed Parser
The universal `gofeed.Parser` works in 3 stages: detection, parsing and translation. It first detects the feed type that it is currently parsing. Then it uses a feed specific parser to parse the feed into its true representation which will be either a `rss.Feed` or `atom.Feed`. These models cover every field possible for their respective feed types. Finally, they are *translated* into a `gofeed.Feed` model that is a hybrid of both feed types. Performing the universal feed parsing in these 3 stages allows for more flexibility and keeps the code base more maintainable by separating RSS and Atom parsing into seperate packages.
@@ -43,8 +62,6 @@ The translation step is done by anything which adheres to the `gofeed.Translator
The `gofeed` library provides two feed specific parsers: `atom.Parser` and `rss.Parser`. If the hybrid `gofeed.Feed` model that the universal `gofeed.Parser` produces does not contain a field from the `atom.Feed` or `rss.Feed` model that you require, it might be beneficial to use the feed specific parsers. When using the `atom.Parser` or `rss.Parser` directly, you can access all of fields found in the `atom.Feed` and `rss.Feed` models. It is also marginally faster because you are able to skip the translation step.
-However, for the *vast* majority of users, the universal `gofeed.Parser` is the best way to parse feeds. This allows the user of `gofeed` library to not care about the differences between RSS or Atom feeds.
-
## Basic Usage
#### Universal Feed Parser
@@ -184,16 +201,6 @@ Every element which does not belong to the feed's default namespace is considere
In addition to the generic handling of extensions, `gofeed` also has built in support for parsing certain popular extensions into their own structs for convenience. It currently supports the [Dublin Core](http://dublincore.org/documents/dces/) and [Apple iTunes](https://help.apple.com/itc/podcasts_connect/#/itcb54353390) extensions which you can access at `Feed.ItunesExt`, `feed.DublinCoreExt` and `Item.ITunesExt` and `Item.DublinCoreExt`
-## Invalid Feeds
-
-A best-effort attempt is made at parsing broken and invalid XML feeds. Currently, `gofeed` can succesfully parse feeds with the following issues:
-- Unescaped/Naked Markup in feed elements
-- Undeclared namespace prefixes
-- Missing closing tags on certain elements
-- Illegal tags within feed elements without namespace prefixes
-- Missing "required" elements as specified by the respective feed specs.
-- Incorrect date formats
-
## Default Mappings
The ```DefaultRSSTranslator``` and the ```DefaultAtomTranslator``` map the following ```rss.Feed``` and ```atom.Feed``` fields to their respective ```gofeed.Feed``` fields. They are listed in order of precedence (highest to lowest):
diff --git a/vendor/github.com/mmcdole/gofeed/atom/feed.go b/vendor/github.com/mmcdole/gofeed/atom/feed.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/mmcdole/gofeed/atom/parser.go b/vendor/github.com/mmcdole/gofeed/atom/parser.go
old mode 100644
new mode 100755
index 2309cb8..000ace6
--- a/vendor/github.com/mmcdole/gofeed/atom/parser.go
+++ b/vendor/github.com/mmcdole/gofeed/atom/parser.go
@@ -6,9 +6,9 @@ import (
"strings"
"github.com/PuerkitoBio/goquery"
- "github.com/mmcdole/gofeed/extensions"
+ ext "github.com/mmcdole/gofeed/extensions"
"github.com/mmcdole/gofeed/internal/shared"
- "github.com/mmcdole/goxpp"
+ xpp "github.com/mmcdole/goxpp"
)
var (
@@ -681,10 +681,8 @@ func (ap *Parser) parseAtomText(p *xpp.XMLPullParser) (string, error) {
lowerType := strings.ToLower(text.Type)
lowerMode := strings.ToLower(text.Mode)
- if strings.HasPrefix(result, "") {
- result = strings.TrimPrefix(result, "")
+ if strings.Contains(result, ""
+
// ParseText is a helper function for parsing the text
// from the current element of the XMLPullParser.
// This function can handle parsing naked XML text from
@@ -39,16 +42,46 @@ func ParseText(p *xpp.XMLPullParser) (string, error) {
result := text.InnerXML
result = strings.TrimSpace(result)
- if strings.HasPrefix(result, "") {
- result = strings.TrimPrefix(result, "")
- return result, nil
+ if strings.Contains(result, CDATA_START) {
+ return StripCDATA(result), nil
}
return DecodeEntities(result)
}
+// StripCDATA removes CDATA tags from the string
+// content outside of CDATA tags is passed via DecodeEntities
+func StripCDATA(str string) string {
+ buf := bytes.NewBuffer([]byte{})
+
+ curr := 0
+
+ for curr < len(str) {
+
+ start := indexAt(str, CDATA_START, curr)
+
+ if start == -1 {
+ dec, _ := DecodeEntities(str[curr:])
+ buf.Write([]byte(dec))
+ return buf.String()
+ }
+
+ end := indexAt(str, CDATA_END, start)
+
+ if end == -1 {
+ dec, _ := DecodeEntities(str[curr:])
+ buf.Write([]byte(dec))
+ return buf.String()
+ }
+
+ buf.Write([]byte(str[start+len(CDATA_START) : end]))
+
+ curr = curr + end + len(CDATA_END)
+ }
+
+ return buf.String()
+}
+
// DecodeEntities decodes escaped XML entities
// in a string and returns the unescaped string
func DecodeEntities(str string) (string, error) {
@@ -151,3 +184,11 @@ func ParseNameAddress(nameAddressText string) (name string, address string) {
}
return
}
+
+func indexAt(str, substr string, start int) int {
+ idx := strings.Index(str[start:], substr)
+ if idx > -1 {
+ idx += start
+ }
+ return idx
+}
diff --git a/vendor/github.com/mmcdole/gofeed/internal/shared/xmlbase.go b/vendor/github.com/mmcdole/gofeed/internal/shared/xmlbase.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/mmcdole/gofeed/internal/shared/xmlsanitizer.go b/vendor/github.com/mmcdole/gofeed/internal/shared/xmlsanitizer.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/mmcdole/gofeed/parser.go b/vendor/github.com/mmcdole/gofeed/parser.go
old mode 100644
new mode 100755
index 1c5a243..273b466
--- a/vendor/github.com/mmcdole/gofeed/parser.go
+++ b/vendor/github.com/mmcdole/gofeed/parser.go
@@ -2,6 +2,7 @@ package gofeed
import (
"bytes"
+ "context"
"errors"
"fmt"
"io"
@@ -76,9 +77,20 @@ func (f *Parser) Parse(feed io.Reader) (*Feed, error) {
// ParseURL fetches the contents of a given url and
// attempts to parse the response into the universal feed type.
func (f *Parser) ParseURL(feedURL string) (feed *Feed, err error) {
+ return f.ParseURLWithContext(feedURL, context.Background())
+}
+
+// ParseURLWithContext fetches contents of a given url and
+// attempts to parse the response into the universal feed type.
+// Request could be canceled or timeout via given context
+func (f *Parser) ParseURLWithContext(feedURL string, ctx context.Context) (feed *Feed, err error) {
client := f.httpClient()
- req, _ := http.NewRequest("GET", feedURL, nil)
+ req, err := http.NewRequest("GET", feedURL, nil)
+ if err != nil {
+ return nil, err
+ }
+ req = req.WithContext(ctx)
req.Header.Set("User-Agent", "Gofeed/1.0")
resp, err := client.Do(req)
diff --git a/vendor/github.com/mmcdole/gofeed/rss/feed.go b/vendor/github.com/mmcdole/gofeed/rss/feed.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/mmcdole/gofeed/rss/parser.go b/vendor/github.com/mmcdole/gofeed/rss/parser.go
old mode 100644
new mode 100755
index 9fe9029..684d160
--- a/vendor/github.com/mmcdole/gofeed/rss/parser.go
+++ b/vendor/github.com/mmcdole/gofeed/rss/parser.go
@@ -5,9 +5,9 @@ import (
"io"
"strings"
- "github.com/mmcdole/gofeed/extensions"
+ ext "github.com/mmcdole/gofeed/extensions"
"github.com/mmcdole/gofeed/internal/shared"
- "github.com/mmcdole/goxpp"
+ xpp "github.com/mmcdole/goxpp"
)
// Parser is a RSS Parser
diff --git a/vendor/github.com/mmcdole/gofeed/translator.go b/vendor/github.com/mmcdole/gofeed/translator.go
old mode 100644
new mode 100755
index 244ce75..4756b4b
--- a/vendor/github.com/mmcdole/gofeed/translator.go
+++ b/vendor/github.com/mmcdole/gofeed/translator.go
@@ -6,7 +6,7 @@ import (
"time"
"github.com/mmcdole/gofeed/atom"
- "github.com/mmcdole/gofeed/extensions"
+ ext "github.com/mmcdole/gofeed/extensions"
"github.com/mmcdole/gofeed/internal/shared"
"github.com/mmcdole/gofeed/rss"
)
diff --git a/vendor/github.com/mmcdole/goxpp/LICENSE b/vendor/github.com/mmcdole/goxpp/LICENSE
old mode 100644
new mode 100755
diff --git a/vendor/github.com/mmcdole/goxpp/README.md b/vendor/github.com/mmcdole/goxpp/README.md
old mode 100644
new mode 100755
index 1f38b65..ef23b5b
--- a/vendor/github.com/mmcdole/goxpp/README.md
+++ b/vendor/github.com/mmcdole/goxpp/README.md
@@ -1,6 +1,7 @@
# goxpp
[](https://travis-ci.org/mmcdole/goxpp) [](https://coveralls.io/github/mmcdole/goxpp?branch=master) [](http://doge.mit-license.org)
+[](https://godoc.org/github.com/mmcdole/goxpp)
The `goxpp` library is an XML parser library that is loosely based on the [Java XMLPullParser](http://www.xmlpull.org/v1/download/unpacked/doc/quick_intro.html). This library allows you to easily parse arbitary XML content using a pull parser. You can think of `goxpp` as a lightweight wrapper around Go's XML `Decoder` that provides a set of functions that make it easier to parse XML content than using the raw decoder itself.
diff --git a/vendor/github.com/mmcdole/goxpp/xpp.go b/vendor/github.com/mmcdole/goxpp/xpp.go
old mode 100644
new mode 100755
diff --git a/vendor/github.com/ncw/rclone/COPYING b/vendor/github.com/ncw/rclone/COPYING
new file mode 100755
index 0000000..8c27c67
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/COPYING
@@ -0,0 +1,20 @@
+Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
diff --git a/vendor/github.com/ncw/rclone/backend/drive/drive.go b/vendor/github.com/ncw/rclone/backend/drive/drive.go
new file mode 100755
index 0000000..c818de8
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/backend/drive/drive.go
@@ -0,0 +1,2088 @@
+// Package drive interfaces with the Google Drive object storage system
+package drive
+
+// FIXME need to deal with some corner cases
+// * multiple files with the same name
+// * files can be in multiple directories
+// * can have directory loops
+// * files with / in name
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/ncw/rclone/fs/config"
+ "github.com/ncw/rclone/fs/config/configmap"
+ "github.com/ncw/rclone/fs/config/configstruct"
+ "github.com/ncw/rclone/fs/config/obscure"
+ "github.com/ncw/rclone/fs/fserrors"
+ "github.com/ncw/rclone/fs/fshttp"
+ "github.com/ncw/rclone/fs/hash"
+ "github.com/ncw/rclone/fs/walk"
+ "github.com/ncw/rclone/lib/dircache"
+ "github.com/ncw/rclone/lib/oauthutil"
+ "github.com/ncw/rclone/lib/pacer"
+ "github.com/pkg/errors"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ drive "google.golang.org/api/drive/v3"
+ "google.golang.org/api/googleapi"
+)
+
+// Constants
+const (
+ rcloneClientID = "202264815644.apps.googleusercontent.com"
+ rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
+ driveFolderType = "application/vnd.google-apps.folder"
+ timeFormatIn = time.RFC3339
+ timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
+ minSleep = 10 * time.Millisecond
+ defaultExtensions = "docx,xlsx,pptx,svg"
+ scopePrefix = "https://www.googleapis.com/auth/"
+ defaultScope = "drive"
+ // chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
+ // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
+ defaultChunkSize = fs.SizeSuffix(8 * 1024 * 1024)
+)
+
+// Globals
+var (
+ // Description of how to auth for this app
+ driveConfig = &oauth2.Config{
+ Scopes: []string{scopePrefix + "drive"},
+ Endpoint: google.Endpoint,
+ ClientID: rcloneClientID,
+ ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
+ RedirectURL: oauthutil.TitleBarRedirectURL,
+ }
+ mimeTypeToExtension = map[string]string{
+ "application/epub+zip": "epub",
+ "application/msword": "doc",
+ "application/pdf": "pdf",
+ "application/rtf": "rtf",
+ "application/vnd.ms-excel": "xls",
+ "application/vnd.oasis.opendocument.presentation": "odp",
+ "application/vnd.oasis.opendocument.spreadsheet": "ods",
+ "application/vnd.oasis.opendocument.text": "odt",
+ "application/vnd.openxmlformats-officedocument.presentationml.presentation": "pptx",
+ "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx",
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx",
+ "application/x-vnd.oasis.opendocument.spreadsheet": "ods",
+ "application/zip": "zip",
+ "image/jpeg": "jpg",
+ "image/png": "png",
+ "image/svg+xml": "svg",
+ "text/csv": "csv",
+ "text/html": "html",
+ "text/plain": "txt",
+ "text/tab-separated-values": "tsv",
+ }
+ extensionToMimeType map[string]string
+ partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents"
+ exportFormatsOnce sync.Once // make sure we fetch the export formats only once
+ _exportFormats map[string][]string // allowed export mime-type conversions
+)
+
+// Register with Fs
+func init() {
+ fs.Register(&fs.RegInfo{
+ Name: "drive",
+ Description: "Google Drive",
+ NewFs: NewFs,
+ Config: func(name string, m configmap.Mapper) {
+ // Parse config into Options struct
+ opt := new(Options)
+ err := configstruct.Set(m, opt)
+ if err != nil {
+ fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
+ return
+ }
+ // Fill in the scopes
+ if opt.Scope == "" {
+ opt.Scope = defaultScope
+ }
+ driveConfig.Scopes = nil
+ for _, scope := range strings.Split(opt.Scope, ",") {
+ driveConfig.Scopes = append(driveConfig.Scopes, scopePrefix+strings.TrimSpace(scope))
+ // Set the root_folder_id if using drive.appfolder
+ if scope == "drive.appfolder" {
+ m.Set("root_folder_id", "appDataFolder")
+ }
+ }
+ if opt.ServiceAccountFile == "" {
+ err = oauthutil.Config("drive", name, m, driveConfig)
+ if err != nil {
+ log.Fatalf("Failed to configure token: %v", err)
+ }
+ }
+ err = configTeamDrive(opt, m, name)
+ if err != nil {
+ log.Fatalf("Failed to configure team drive: %v", err)
+ }
+ },
+ Options: []fs.Option{{
+ Name: config.ConfigClientID,
+ Help: "Google Application Client Id\nLeave blank normally.",
+ }, {
+ Name: config.ConfigClientSecret,
+ Help: "Google Application Client Secret\nLeave blank normally.",
+ }, {
+ Name: "scope",
+ Help: "Scope that rclone should use when requesting access from drive.",
+ Examples: []fs.OptionExample{{
+ Value: "drive",
+ Help: "Full access all files, excluding Application Data Folder.",
+ }, {
+ Value: "drive.readonly",
+ Help: "Read-only access to file metadata and file contents.",
+ }, {
+ Value: "drive.file",
+ Help: "Access to files created by rclone only.\nThese are visible in the drive website.\nFile authorization is revoked when the user deauthorizes the app.",
+ }, {
+ Value: "drive.appfolder",
+ Help: "Allows read and write access to the Application Data folder.\nThis is not visible in the drive website.",
+ }, {
+ Value: "drive.metadata.readonly",
+ Help: "Allows read-only access to file metadata but\ndoes not allow any access to read or download file content.",
+ }},
+ }, {
+ Name: "root_folder_id",
+ Help: "ID of the root folder\nLeave blank normally.\nFill in to access \"Computers\" folders. (see docs).",
+ }, {
+ Name: "service_account_file",
+ Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
+ }, {
+ Name: "service_account_credentials",
+ Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
+ Hide: fs.OptionHideBoth,
+ Advanced: true,
+ }, {
+ Name: "team_drive",
+ Help: "ID of the Team Drive",
+ Hide: fs.OptionHideBoth,
+ Advanced: true,
+ }, {
+ Name: "auth_owner_only",
+ Default: false,
+ Help: "Only consider files owned by the authenticated user.",
+ Advanced: true,
+ }, {
+ Name: "use_trash",
+ Default: true,
+ Help: "Send files to the trash instead of deleting permanently.",
+ Advanced: true,
+ }, {
+ Name: "skip_gdocs",
+ Default: false,
+ Help: "Skip google documents in all listings.",
+ Advanced: true,
+ }, {
+ Name: "shared_with_me",
+ Default: false,
+ Help: "Only show files that are shared with me",
+ Advanced: true,
+ }, {
+ Name: "trashed_only",
+ Default: false,
+ Help: "Only show files that are in the trash",
+ Advanced: true,
+ }, {
+ Name: "formats",
+ Default: defaultExtensions,
+ Help: "Comma separated list of preferred formats for downloading Google docs.",
+ Advanced: true,
+ }, {
+ Name: "use_created_date",
+ Default: false,
+ Help: "Use created date instead of modified date.",
+ Advanced: true,
+ }, {
+ Name: "list_chunk",
+ Default: 1000,
+ Help: "Size of listing chunk 100-1000. 0 to disable.",
+ Advanced: true,
+ }, {
+ Name: "impersonate",
+ Default: "",
+ Help: "Impersonate this user when using a service account.",
+ Advanced: true,
+ }, {
+ Name: "alternate_export",
+ Default: false,
+ Help: "Use alternate export URLs for google documents export.",
+ Advanced: true,
+ }, {
+ Name: "upload_cutoff",
+ Default: defaultChunkSize,
+ Help: "Cutoff for switching to chunked upload",
+ Advanced: true,
+ }, {
+ Name: "chunk_size",
+ Default: defaultChunkSize,
+ Help: "Upload chunk size. Must a power of 2 >= 256k.",
+ Advanced: true,
+ }, {
+ Name: "acknowledge_abuse",
+ Default: false,
+ Help: "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.",
+ Advanced: true,
+ }, {
+ Name: "keep_revision_forever",
+ Default: false,
+ Help: "Keep new head revision forever.",
+ Advanced: true,
+ }},
+ })
+
+ // Invert mimeTypeToExtension
+ extensionToMimeType = make(map[string]string, len(mimeTypeToExtension))
+ for mimeType, extension := range mimeTypeToExtension {
+ extensionToMimeType[extension] = mimeType
+ }
+}
+
+// Options defines the configuration for this backend
+type Options struct {
+ Scope string `config:"scope"`
+ RootFolderID string `config:"root_folder_id"`
+ ServiceAccountFile string `config:"service_account_file"`
+ ServiceAccountCredentials string `config:"service_account_credentials"`
+ TeamDriveID string `config:"team_drive"`
+ AuthOwnerOnly bool `config:"auth_owner_only"`
+ UseTrash bool `config:"use_trash"`
+ SkipGdocs bool `config:"skip_gdocs"`
+ SharedWithMe bool `config:"shared_with_me"`
+ TrashedOnly bool `config:"trashed_only"`
+ Extensions string `config:"formats"`
+ UseCreatedDate bool `config:"use_created_date"`
+ ListChunk int64 `config:"list_chunk"`
+ Impersonate string `config:"impersonate"`
+ AlternateExport bool `config:"alternate_export"`
+ UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
+ ChunkSize fs.SizeSuffix `config:"chunk_size"`
+ AcknowledgeAbuse bool `config:"acknowledge_abuse"`
+ KeepRevisionForever bool `config:"keep_revision_forever"`
+}
+
+// Fs represents a remote drive server
+type Fs struct {
+ name string // name of this remote
+ root string // the path we are working on
+ opt Options // parsed options
+ features *fs.Features // optional features
+ svc *drive.Service // the connection to the drive server
+ client *http.Client // authorized client
+ rootFolderID string // the id of the root folder
+ dirCache *dircache.DirCache // Map of directory path to directory id
+ pacer *pacer.Pacer // To pace the API calls
+ extensions []string // preferred extensions to download docs
+ isTeamDrive bool // true if this is a team drive
+}
+
+// Object describes a drive object
+type Object struct {
+ fs *Fs // what this object is part of
+ remote string // The remote path
+ id string // Drive Id of this object
+ url string // Download URL of this object
+ md5sum string // md5sum of the object
+ bytes int64 // size of the object
+ modifiedDate string // RFC3339 time it was last modified
+ isDocument bool // if set this is a Google doc
+ mimeType string
+}
+
+// ------------------------------------------------------------
+
+// Name of the remote (as passed into NewFs)
+func (f *Fs) Name() string {
+ return f.name
+}
+
+// Root of the remote (as passed into NewFs)
+func (f *Fs) Root() string {
+ return f.root
+}
+
+// String converts this Fs to a string
+func (f *Fs) String() string {
+ return fmt.Sprintf("Google drive root '%s'", f.root)
+}
+
+// Features returns the optional features of this Fs
+func (f *Fs) Features() *fs.Features {
+ return f.features
+}
+
+// shouldRetry determines whehter a given err rates being retried
+func shouldRetry(err error) (again bool, errOut error) {
+ again = false
+ if err != nil {
+ if fserrors.ShouldRetry(err) {
+ again = true
+ } else {
+ switch gerr := err.(type) {
+ case *googleapi.Error:
+ if gerr.Code >= 500 && gerr.Code < 600 {
+ // All 5xx errors should be retried
+ again = true
+ } else if len(gerr.Errors) > 0 {
+ reason := gerr.Errors[0].Reason
+ if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
+ again = true
+ }
+ }
+ }
+ }
+ }
+ return again, err
+}
+
+// parseParse parses a drive 'url'
+func parseDrivePath(path string) (root string, err error) {
+ root = strings.Trim(path, "/")
+ return
+}
+
+// User function to process a File item from list
+//
+// Should return true to finish processing
+type listFn func(*drive.File) bool
+
+func containsString(slice []string, s string) bool {
+ for _, e := range slice {
+ if e == s {
+ return true
+ }
+ }
+ return false
+}
+
+// Lists the directory required calling the user function on each item found
+//
+// If the user fn ever returns true then it early exits with found = true
+//
+// Search params: https://developers.google.com/drive/search-parameters
+func (f *Fs) list(dirIDs []string, title string, directoriesOnly bool, filesOnly bool, includeAll bool, fn listFn) (found bool, err error) {
+ var query []string
+ if !includeAll {
+ q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
+ if f.opt.TrashedOnly {
+ q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
+ }
+ query = append(query, q)
+ }
+ // Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
+ // We must not filter with parent when we try list "ROOT" with drive-shared-with-me
+ // If we need to list file inside those shared folders, we must search it without sharedWithMe
+ parentsQuery := bytes.NewBufferString("(")
+ for _, dirID := range dirIDs {
+ if dirID == "" {
+ continue
+ }
+ if parentsQuery.Len() > 1 {
+ _, _ = parentsQuery.WriteString(" or ")
+ }
+ if f.opt.SharedWithMe && dirID == f.rootFolderID {
+ _, _ = parentsQuery.WriteString("sharedWithMe=true")
+ } else {
+ _, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
+ }
+ }
+ if parentsQuery.Len() > 1 {
+ _ = parentsQuery.WriteByte(')')
+ query = append(query, parentsQuery.String())
+ }
+ stem := ""
+ if title != "" {
+ // Escaping the backslash isn't documented but seems to work
+ searchTitle := strings.Replace(title, `\`, `\\`, -1)
+ searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
+ // Convert / to / for search
+ searchTitle = strings.Replace(searchTitle, "/", "/", -1)
+
+ handleGdocs := !directoriesOnly && !f.opt.SkipGdocs
+ // if the search title contains an extension and the extension is in the export extensions add a search
+ // for the filename without the extension.
+ // assume that export extensions don't contain escape sequences and only have one part (not .tar.gz)
+ if ext := path.Ext(searchTitle); handleGdocs && len(ext) > 0 && containsString(f.extensions, ext[1:]) {
+ stem = title[:len(title)-len(ext)]
+ query = append(query, fmt.Sprintf("(name='%s' or name='%s')", searchTitle, searchTitle[:len(title)-len(ext)]))
+ } else {
+ query = append(query, fmt.Sprintf("name='%s'", searchTitle))
+ }
+ }
+ if directoriesOnly {
+ query = append(query, fmt.Sprintf("mimeType='%s'", driveFolderType))
+ }
+ if filesOnly {
+ query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
+ }
+ list := f.svc.Files.List()
+ if len(query) > 0 {
+ list.Q(strings.Join(query, " and "))
+ // fmt.Printf("list Query = %q\n", query)
+ }
+ if f.opt.ListChunk > 0 {
+ list.PageSize(f.opt.ListChunk)
+ }
+ if f.isTeamDrive {
+ list.TeamDriveId(f.opt.TeamDriveID)
+ list.SupportsTeamDrives(true)
+ list.IncludeTeamDriveItems(true)
+ list.Corpora("teamDrive")
+ }
+ // If using appDataFolder then need to add Spaces
+ if f.rootFolderID == "appDataFolder" {
+ list.Spaces("appDataFolder")
+ }
+
+ var fields = partialFields
+
+ if f.opt.AuthOwnerOnly {
+ fields += ",owners"
+ }
+
+ fields = fmt.Sprintf("files(%s),nextPageToken", fields)
+
+OUTER:
+ for {
+ var files *drive.FileList
+ err = f.pacer.Call(func() (bool, error) {
+ files, err = list.Fields(googleapi.Field(fields)).Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return false, errors.Wrap(err, "couldn't list directory")
+ }
+ for _, item := range files.Files {
+ // Convert / to / for listing purposes
+ item.Name = strings.Replace(item.Name, "/", "/", -1)
+ // Check the case of items is correct since
+ // the `=` operator is case insensitive.
+
+ if title != "" && title != item.Name {
+ if stem == "" || stem != item.Name {
+ continue
+ }
+ _, exportName, _, _ := f.findExportFormat(item)
+ if exportName == "" || exportName != title {
+ continue
+ }
+ }
+ if fn(item) {
+ found = true
+ break OUTER
+ }
+ }
+ if files.NextPageToken == "" {
+ break
+ }
+ list.PageToken(files.NextPageToken)
+ }
+ return
+}
+
+// Returns true of x is a power of 2 or zero
+func isPowerOfTwo(x int64) bool {
+ switch {
+ case x == 0:
+ return true
+ case x < 0:
+ return false
+ default:
+ return (x & (x - 1)) == 0
+ }
+}
+
+// parseExtensions parses drive export extensions from a string
+func (f *Fs) parseExtensions(extensions string) error {
+ for _, extension := range strings.Split(extensions, ",") {
+ extension = strings.ToLower(strings.TrimSpace(extension))
+ if _, found := extensionToMimeType[extension]; !found {
+ return errors.Errorf("couldn't find mime type for extension %q", extension)
+ }
+ found := false
+ for _, existingExtension := range f.extensions {
+ if extension == existingExtension {
+ found = true
+ break
+ }
+ }
+ if !found {
+ f.extensions = append(f.extensions, extension)
+ }
+ }
+ return nil
+}
+
+// Figure out if the user wants to use a team drive
+func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
+ if opt.TeamDriveID == "" {
+ fmt.Printf("Configure this as a team drive?\n")
+ } else {
+ fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
+ }
+ if !config.ConfirmWithDefault(false) {
+ return nil
+ }
+ client, err := createOAuthClient(opt, name, m)
+ if err != nil {
+ return errors.Wrap(err, "config team drive failed to create oauth client")
+ }
+ svc, err := drive.New(client)
+ if err != nil {
+ return errors.Wrap(err, "config team drive failed to make drive client")
+ }
+ fmt.Printf("Fetching team drive list...\n")
+ var driveIDs, driveNames []string
+ listTeamDrives := svc.Teamdrives.List().PageSize(100)
+ for {
+ var teamDrives *drive.TeamDriveList
+ err = newPacer().Call(func() (bool, error) {
+ teamDrives, err = listTeamDrives.Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return errors.Wrap(err, "list team drives failed")
+ }
+ for _, drive := range teamDrives.TeamDrives {
+ driveIDs = append(driveIDs, drive.Id)
+ driveNames = append(driveNames, drive.Name)
+ }
+ if teamDrives.NextPageToken == "" {
+ break
+ }
+ listTeamDrives.PageToken(teamDrives.NextPageToken)
+ }
+ var driveID string
+ if len(driveIDs) == 0 {
+ fmt.Printf("No team drives found in your account")
+ } else {
+ driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
+ }
+ m.Set("team_drive", driveID)
+ opt.TeamDriveID = driveID
+ return nil
+}
+
+// newPacer makes a pacer configured for drive
+func newPacer() *pacer.Pacer {
+ return pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer)
+}
+
+func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
+ conf, err := google.JWTConfigFromJSON(credentialsData, driveConfig.Scopes...)
+ if err != nil {
+ return nil, errors.Wrap(err, "error processing credentials")
+ }
+ if opt.Impersonate != "" {
+ conf.Subject = opt.Impersonate
+ }
+ ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
+ return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
+}
+
+func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Client, error) {
+ var oAuthClient *http.Client
+ var err error
+
+ // try loading service account credentials from env variable, then from a file
+ if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
+ loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
+ if err != nil {
+ return nil, errors.Wrap(err, "error opening service account credentials file")
+ }
+ opt.ServiceAccountCredentials = string(loadedCreds)
+ }
+ if opt.ServiceAccountCredentials != "" {
+ oAuthClient, err = getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials))
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to create oauth client from service account")
+ }
+ } else {
+ oAuthClient, _, err = oauthutil.NewClient(name, m, driveConfig)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to create oauth client")
+ }
+ }
+
+ return oAuthClient, nil
+}
+
+// NewFs contstructs an Fs from the path, container:path
+func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
+ // Parse config into Options struct
+ opt := new(Options)
+ err := configstruct.Set(m, opt)
+ if err != nil {
+ return nil, err
+ }
+ if !isPowerOfTwo(int64(opt.ChunkSize)) {
+ return nil, errors.Errorf("drive: chunk size %v isn't a power of two", opt.ChunkSize)
+ }
+ if opt.ChunkSize < 256*1024 {
+ return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", opt.ChunkSize)
+ }
+
+ oAuthClient, err := createOAuthClient(opt, name, m)
+ if err != nil {
+ return nil, errors.Wrap(err, "drive: failed when making oauth client")
+ }
+
+ root, err := parseDrivePath(path)
+ if err != nil {
+ return nil, err
+ }
+
+ f := &Fs{
+ name: name,
+ root: root,
+ opt: *opt,
+ pacer: newPacer(),
+ }
+ f.isTeamDrive = opt.TeamDriveID != ""
+ f.features = (&fs.Features{
+ DuplicateFiles: true,
+ ReadMimeType: true,
+ WriteMimeType: true,
+ CanHaveEmptyDirectories: true,
+ }).Fill(f)
+
+ // Create a new authorized Drive client.
+ f.client = oAuthClient
+ f.svc, err = drive.New(f.client)
+ if err != nil {
+ return nil, errors.Wrap(err, "couldn't create Drive client")
+ }
+
+ // set root folder for a team drive or query the user root folder
+ if f.isTeamDrive {
+ f.rootFolderID = f.opt.TeamDriveID
+ } else {
+ f.rootFolderID = "root"
+ }
+
+ // override root folder if set in the config
+ if opt.RootFolderID != "" {
+ f.rootFolderID = opt.RootFolderID
+ }
+
+ f.dirCache = dircache.New(root, f.rootFolderID, f)
+
+ // Parse extensions
+ err = f.parseExtensions(opt.Extensions)
+ if err != nil {
+ return nil, err
+ }
+ err = f.parseExtensions(defaultExtensions) // make sure there are some sensible ones on there
+ if err != nil {
+ return nil, err
+ }
+
+ // Find the current root
+ err = f.dirCache.FindRoot(false)
+ if err != nil {
+ // Assume it is a file
+ newRoot, remote := dircache.SplitPath(root)
+ tempF := *f
+ tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
+ tempF.root = newRoot
+ // Make new Fs which is the parent
+ err = tempF.dirCache.FindRoot(false)
+ if err != nil {
+ // No root so return old f
+ return f, nil
+ }
+ _, err := tempF.NewObject(remote)
+ if err != nil {
+ // unable to list folder so return old f
+ return f, nil
+ }
+ // XXX: update the old f here instead of returning tempF, since
+ // `features` were already filled with functions having *f as a receiver.
+ // See https://github.com/ncw/rclone/issues/2182
+ f.dirCache = tempF.dirCache
+ f.root = tempF.root
+ return f, fs.ErrorIsFile
+ }
+ // fmt.Printf("Root id %s", f.dirCache.RootID())
+ return f, nil
+}
+
+// Return an Object from a path
+//
+// If it can't be found it returns the error fs.ErrorObjectNotFound.
+func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
+ o := &Object{
+ fs: f,
+ remote: remote,
+ }
+ if info != nil {
+ o.setMetaData(info)
+ } else {
+ err := o.readMetaData() // reads info and meta, returning an error
+ if err != nil {
+ return nil, err
+ }
+ }
+ return o, nil
+}
+
+// NewObject finds the Object at remote. If it can't be found
+// it returns the error fs.ErrorObjectNotFound.
+func (f *Fs) NewObject(remote string) (fs.Object, error) {
+ return f.newObjectWithInfo(remote, nil)
+}
+
+// FindLeaf finds a directory of name leaf in the folder with ID pathID
+func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
+ // Find the leaf in pathID
+ found, err = f.list([]string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
+ if item.Name == leaf {
+ pathIDOut = item.Id
+ return true
+ }
+ if !f.opt.SkipGdocs {
+ _, exportName, _, _ := f.findExportFormat(item)
+ if exportName == leaf {
+ pathIDOut = item.Id
+ return true
+ }
+ }
+ return false
+ })
+ return pathIDOut, found, err
+}
+
+// CreateDir makes a directory with pathID as parent and name leaf
+func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
+ // fmt.Println("Making", path)
+ // Define the metadata for the directory we are going to create.
+ createInfo := &drive.File{
+ Name: leaf,
+ Description: leaf,
+ MimeType: driveFolderType,
+ Parents: []string{pathID},
+ }
+ var info *drive.File
+ err = f.pacer.Call(func() (bool, error) {
+ info, err = f.svc.Files.Create(createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return "", err
+ }
+ return info.Id, nil
+}
+
+// isAuthOwned checks if any of the item owners is the authenticated owner
+func isAuthOwned(item *drive.File) bool {
+ for _, owner := range item.Owners {
+ if owner.Me {
+ return true
+ }
+ }
+ return false
+}
+
+// exportFormats returns the export formats from drive, fetching them
+// if necessary.
+//
+// if the fetch fails then it will not export any drive formats
+func (f *Fs) exportFormats() map[string][]string {
+ exportFormatsOnce.Do(func() {
+ var about *drive.About
+ var err error
+ err = f.pacer.Call(func() (bool, error) {
+ about, err = f.svc.About.Get().Fields("exportFormats").Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ fs.Errorf(f, "Failed to get Drive exportFormats: %v", err)
+ _exportFormats = map[string][]string{}
+ return
+ }
+ _exportFormats = about.ExportFormats
+ })
+ return _exportFormats
+}
+
+// findExportFormat works out the optimum extension and mime-type
+// for this item.
+//
+// Look through the extensions and find the first format that can be
+// converted. If none found then return "", ""
+func (f *Fs) findExportFormat(item *drive.File) (extension, filename, mimeType string, isDocument bool) {
+ exportMimeTypes, isDocument := f.exportFormats()[item.MimeType]
+ if isDocument {
+ for _, extension := range f.extensions {
+ mimeType := extensionToMimeType[extension]
+ for _, emt := range exportMimeTypes {
+ if emt == mimeType {
+ return extension, fmt.Sprintf("%s.%s", item.Name, extension), mimeType, true
+ }
+ }
+ }
+ }
+
+ // else return empty
+ return "", "", "", isDocument
+}
+
+// List the objects and directories in dir into entries. The
+// entries can be returned in any order but should be for a
+// complete directory.
+//
+// dir should be "" to list the root, and should not have
+// trailing slashes.
+//
+// This should return ErrDirNotFound if the directory isn't
+// found.
+func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
+ err = f.dirCache.FindRoot(false)
+ if err != nil {
+ return nil, err
+ }
+ directoryID, err := f.dirCache.FindDir(dir, false)
+ if err != nil {
+ return nil, err
+ }
+
+ var iErr error
+ _, err = f.list([]string{directoryID}, "", false, false, false, func(item *drive.File) bool {
+ entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
+ if err != nil {
+ return true
+ }
+ if entry != nil {
+ entries = append(entries, entry)
+ }
+ return false
+ })
+ if err != nil {
+ return nil, err
+ }
+ if iErr != nil {
+ return nil, iErr
+ }
+ return entries, nil
+}
+
+// listRRunner will read dirIDs from the in channel, perform the file listing an call cb with each DirEntry.
+//
+// In each cycle, will wait up to 10ms to read up to grouping entries from the in channel.
+// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
+// nil is send to the out channel and the function returns.
+func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan string, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
+ var dirs []string
+
+ for dir := range in {
+ dirs = append(dirs[:0], dir)
+ wait := time.After(10 * time.Millisecond)
+ waitloop:
+ for i := 1; i < grouping; i++ {
+ select {
+ case d, ok := <-in:
+ if !ok {
+ break waitloop
+ }
+ dirs = append(dirs, d)
+ case <-wait:
+ break waitloop
+ }
+ }
+ var iErr error
+ _, err := f.list(dirs, "", false, false, false, func(item *drive.File) bool {
+ parentPath := ""
+ if len(item.Parents) > 0 {
+ p, ok := f.dirCache.GetInv(item.Parents[0])
+ if ok {
+ parentPath = p
+ }
+ }
+ remote := path.Join(parentPath, item.Name)
+ entry, err := f.itemToDirEntry(remote, item)
+ if err != nil {
+ iErr = err
+ return true
+ }
+
+ err = cb(entry)
+ if err != nil {
+ iErr = err
+ return true
+ }
+ return false
+ })
+ for range dirs {
+ wg.Done()
+ }
+
+ if iErr != nil {
+ out <- iErr
+ return
+ }
+
+ if err != nil {
+ out <- err
+ return
+ }
+ }
+ out <- nil
+}
+
+// ListR lists the objects and directories of the Fs starting
+// from dir recursively into out.
+//
+// dir should be "" to start from the root, and should not
+// have trailing slashes.
+//
+// This should return ErrDirNotFound if the directory isn't
+// found.
+//
+// It should call callback for each tranche of entries read.
+// These need not be returned in any particular order. If
+// callback returns an error then the listing will stop
+// immediately.
+//
+// Don't implement this unless you have a more efficient way
+// of listing recursively that doing a directory traversal.
+func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
+ const (
+ grouping = 50
+ inputBuffer = 1000
+ )
+
+ err = f.dirCache.FindRoot(false)
+ if err != nil {
+ return err
+ }
+ directoryID, err := f.dirCache.FindDir(dir, false)
+ if err != nil {
+ return err
+ }
+
+ mu := sync.Mutex{} // protects in and overflow
+ wg := sync.WaitGroup{}
+ in := make(chan string, inputBuffer)
+ out := make(chan error, fs.Config.Checkers)
+ list := walk.NewListRHelper(callback)
+ overfflow := []string{}
+
+ cb := func(entry fs.DirEntry) error {
+ mu.Lock()
+ defer mu.Unlock()
+ if d, isDir := entry.(*fs.Dir); isDir && in != nil {
+ select {
+ case in <- d.ID():
+ wg.Add(1)
+ default:
+ overfflow = append(overfflow, d.ID())
+ }
+ }
+ return list.Add(entry)
+ }
+
+ wg.Add(1)
+ in <- directoryID
+
+ for i := 0; i < fs.Config.Checkers; i++ {
+ go f.listRRunner(&wg, in, out, cb, grouping)
+ }
+ go func() {
+ // wait until the all directories are processed
+ wg.Wait()
+ // if the input channel overflowed add the collected entries to the channel now
+ for len(overfflow) > 0 {
+ mu.Lock()
+ l := len(overfflow)
+ // only fill half of the channel to prevent entries beeing put into overfflow again
+ if l > inputBuffer/2 {
+ l = inputBuffer / 2
+ }
+ wg.Add(l)
+ for _, d := range overfflow[:l] {
+ in <- d
+ }
+ overfflow = overfflow[l:]
+ mu.Unlock()
+
+ // wait again for the completion of all directories
+ wg.Wait()
+ }
+ mu.Lock()
+ if in != nil {
+ // notify all workers to exit
+ close(in)
+ in = nil
+ }
+ mu.Unlock()
+ }()
+ // wait until the all workers to finish
+ for i := 0; i < fs.Config.Checkers; i++ {
+ e := <-out
+ mu.Lock()
+ // if one worker returns an error early, close the input so all other workers exit
+ if e != nil && in != nil {
+ err = e
+ close(in)
+ in = nil
+ }
+ mu.Unlock()
+ }
+
+ close(out)
+ if err != nil {
+ return err
+ }
+
+ return list.Flush()
+}
+
+func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error) {
+ switch {
+ case item.MimeType == driveFolderType:
+ // cache the directory ID for later lookups
+ f.dirCache.Put(remote, item.Id)
+ when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
+ d := fs.NewDir(remote, when).SetID(item.Id)
+ return d, nil
+ case f.opt.AuthOwnerOnly && !isAuthOwned(item):
+ // ignore object
+ case item.Md5Checksum != "" || item.Size > 0:
+ // If item has MD5 sum or a length it is a file stored on drive
+ o, err := f.newObjectWithInfo(remote, item)
+ if err != nil {
+ return nil, err
+ }
+ return o, nil
+ case f.opt.SkipGdocs:
+ fs.Debugf(remote, "Skipping google document type %q", item.MimeType)
+ default:
+ // If item MimeType is in the ExportFormats then it is a google doc
+ extension, _, exportMimeType, isDocument := f.findExportFormat(item)
+ if !isDocument {
+ fs.Debugf(remote, "Ignoring unknown document type %q", item.MimeType)
+ break
+ }
+ if extension == "" {
+ fs.Debugf(remote, "No export formats found for %q", item.MimeType)
+ break
+ }
+ o, err := f.newObjectWithInfo(remote+"."+extension, item)
+ if err != nil {
+ return nil, err
+ }
+ obj := o.(*Object)
+ obj.url = fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, item.Id, url.QueryEscape(exportMimeType))
+ if f.opt.AlternateExport {
+ switch item.MimeType {
+ case "application/vnd.google-apps.drawing":
+ obj.url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", item.Id, extension)
+ case "application/vnd.google-apps.document":
+ obj.url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", item.Id, extension)
+ case "application/vnd.google-apps.spreadsheet":
+ obj.url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", item.Id, extension)
+ case "application/vnd.google-apps.presentation":
+ obj.url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", item.Id, extension)
+ }
+ }
+ obj.isDocument = true
+ obj.mimeType = exportMimeType
+ obj.bytes = -1
+ return o, nil
+ }
+ return nil, nil
+}
+
+// Creates a drive.File info from the parameters passed in and a half
+// finished Object which must have setMetaData called on it
+//
+// Used to create new objects
+func (f *Fs) createFileInfo(remote string, modTime time.Time, size int64) (*Object, *drive.File, error) {
+ // Temporary Object under construction
+ o := &Object{
+ fs: f,
+ remote: remote,
+ bytes: size,
+ }
+
+ leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Define the metadata for the file we are going to create.
+ createInfo := &drive.File{
+ Name: leaf,
+ Description: leaf,
+ Parents: []string{directoryID},
+ MimeType: fs.MimeTypeFromName(remote),
+ ModifiedTime: modTime.Format(timeFormatOut),
+ }
+ return o, createInfo, nil
+}
+
+// Put the object
+//
+// Copy the reader in to the new object which is returned
+//
+// The new object may have been created if an error is returned
+func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
+ exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
+ switch err {
+ case nil:
+ return exisitingObj, exisitingObj.Update(in, src, options...)
+ case fs.ErrorObjectNotFound:
+ // Not found so create it
+ return f.PutUnchecked(in, src, options...)
+ default:
+ return nil, err
+ }
+}
+
+// PutStream uploads to the remote path with the modTime given of indeterminate size
+func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
+ return f.Put(in, src, options...)
+}
+
+// PutUnchecked uploads the object
+//
+// This will create a duplicate if we upload a new file without
+// checking to see if there is one already - use Put() for that.
+func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
+ remote := src.Remote()
+ size := src.Size()
+ modTime := src.ModTime()
+
+ o, createInfo, err := f.createFileInfo(remote, modTime, size)
+ if err != nil {
+ return nil, err
+ }
+
+ var info *drive.File
+ if size == 0 || size < int64(f.opt.UploadCutoff) {
+ // Make the API request to upload metadata and file data.
+ // Don't retry, return a retry error instead
+ err = f.pacer.CallNoRetry(func() (bool, error) {
+ info, err = f.svc.Files.Create(createInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(f.opt.KeepRevisionForever).Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return o, err
+ }
+ } else {
+ // Upload the file in chunks
+ info, err = f.Upload(in, size, createInfo.MimeType, "", createInfo, remote)
+ if err != nil {
+ return o, err
+ }
+ }
+ o.setMetaData(info)
+ return o, nil
+}
+
+// MergeDirs merges the contents of all the directories passed
+// in into the first one and rmdirs the other directories.
+func (f *Fs) MergeDirs(dirs []fs.Directory) error {
+ if len(dirs) < 2 {
+ return nil
+ }
+ dstDir := dirs[0]
+ for _, srcDir := range dirs[1:] {
+ // list the the objects
+ infos := []*drive.File{}
+ _, err := f.list([]string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool {
+ infos = append(infos, info)
+ return false
+ })
+ if err != nil {
+ return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
+ }
+ // move them into place
+ for _, info := range infos {
+ fs.Infof(srcDir, "merging %q", info.Name)
+ // Move the file into the destination
+ err = f.pacer.Call(func() (bool, error) {
+ _, err = f.svc.Files.Update(info.Id, nil).RemoveParents(srcDir.ID()).AddParents(dstDir.ID()).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return errors.Wrapf(err, "MergDirs move failed on %q in %v", info.Name, srcDir)
+ }
+ }
+ // rmdir (into trash) the now empty source directory
+ fs.Infof(srcDir, "removing empty directory")
+ err = f.rmdir(srcDir.ID(), true)
+ if err != nil {
+ return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir)
+ }
+ }
+ return nil
+}
+
+// Mkdir creates the container if it doesn't exist
+func (f *Fs) Mkdir(dir string) error {
+ err := f.dirCache.FindRoot(true)
+ if err != nil {
+ return err
+ }
+ if dir != "" {
+ _, err = f.dirCache.FindDir(dir, true)
+ }
+ return err
+}
+
+// Rmdir deletes a directory unconditionally by ID
+func (f *Fs) rmdir(directoryID string, useTrash bool) error {
+ return f.pacer.Call(func() (bool, error) {
+ var err error
+ if useTrash {
+ info := drive.File{
+ Trashed: true,
+ }
+ _, err = f.svc.Files.Update(directoryID, &info).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
+ } else {
+ err = f.svc.Files.Delete(directoryID).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
+ }
+ return shouldRetry(err)
+ })
+}
+
+// Rmdir deletes a directory
+//
+// Returns an error if it isn't empty
+func (f *Fs) Rmdir(dir string) error {
+ root := path.Join(f.root, dir)
+ dc := f.dirCache
+ directoryID, err := dc.FindDir(dir, false)
+ if err != nil {
+ return err
+ }
+ var trashedFiles = false
+ found, err := f.list([]string{directoryID}, "", false, false, true, func(item *drive.File) bool {
+ if !item.Trashed {
+ fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
+ return true
+ }
+ fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name)
+ trashedFiles = true
+ return false
+ })
+ if err != nil {
+ return err
+ }
+ if found {
+ return errors.Errorf("directory not empty")
+ }
+ if root != "" {
+ // trash the directory if it had trashed files
+ // in or the user wants to trash, otherwise
+ // delete it.
+ err = f.rmdir(directoryID, trashedFiles || f.opt.UseTrash)
+ if err != nil {
+ return err
+ }
+ }
+ f.dirCache.FlushDir(dir)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// Precision of the object storage system
+func (f *Fs) Precision() time.Duration {
+ return time.Millisecond
+}
+
+// Copy src to this remote using server side copy operations.
+//
+// This is stored with the remote path given
+//
+// It returns the destination Object and a possible error
+//
+// Will only be called if src.Fs().Name() == f.Name()
+//
+// If it isn't possible then return fs.ErrorCantCopy
+func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
+ srcObj, ok := src.(*Object)
+ if !ok {
+ fs.Debugf(src, "Can't copy - not same remote type")
+ return nil, fs.ErrorCantCopy
+ }
+ if srcObj.isDocument {
+ return nil, errors.New("can't copy a Google document")
+ }
+
+ o, createInfo, err := f.createFileInfo(remote, srcObj.ModTime(), srcObj.bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ var info *drive.File
+ err = o.fs.pacer.Call(func() (bool, error) {
+ info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(f.opt.KeepRevisionForever).Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ o.setMetaData(info)
+ return o, nil
+}
+
+// Purge deletes all the files and the container
+//
+// Optional interface: Only implement this if you have a way of
+// deleting all the files quicker than just running Remove() on the
+// result of List()
+func (f *Fs) Purge() error {
+ if f.root == "" {
+ return errors.New("can't purge root directory")
+ }
+ err := f.dirCache.FindRoot(false)
+ if err != nil {
+ return err
+ }
+ err = f.pacer.Call(func() (bool, error) {
+ if f.opt.UseTrash {
+ info := drive.File{
+ Trashed: true,
+ }
+ _, err = f.svc.Files.Update(f.dirCache.RootID(), &info).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
+ } else {
+ err = f.svc.Files.Delete(f.dirCache.RootID()).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
+ }
+ return shouldRetry(err)
+ })
+ f.dirCache.ResetRoot()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// CleanUp empties the trash
+func (f *Fs) CleanUp() error {
+ err := f.pacer.Call(func() (bool, error) {
+ err := f.svc.Files.EmptyTrash().Do()
+ return shouldRetry(err)
+ })
+
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// About gets quota information
+func (f *Fs) About() (*fs.Usage, error) {
+ if f.isTeamDrive {
+ // Teamdrives don't appear to have a usage API so just return empty
+ return &fs.Usage{}, nil
+ }
+ var about *drive.About
+ var err error
+ err = f.pacer.Call(func() (bool, error) {
+ about, err = f.svc.About.Get().Fields("storageQuota").Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to get Drive storageQuota")
+ }
+ q := about.StorageQuota
+ usage := &fs.Usage{
+ Used: fs.NewUsageValue(q.UsageInDrive), // bytes in use
+ Trashed: fs.NewUsageValue(q.UsageInDriveTrash), // bytes in trash
+ Other: fs.NewUsageValue(q.Usage - q.UsageInDrive), // other usage eg gmail in drive
+ }
+ if q.Limit > 0 {
+ usage.Total = fs.NewUsageValue(q.Limit) // quota of bytes that can be used
+ usage.Free = fs.NewUsageValue(q.Limit - q.Usage) // bytes which can be uploaded before reaching the quota
+ }
+ return usage, nil
+}
+
+// Move src to this remote using server side move operations.
+//
+// This is stored with the remote path given
+//
+// It returns the destination Object and a possible error
+//
+// Will only be called if src.Fs().Name() == f.Name()
+//
+// If it isn't possible then return fs.ErrorCantMove
+func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
+ srcObj, ok := src.(*Object)
+ if !ok {
+ fs.Debugf(src, "Can't move - not same remote type")
+ return nil, fs.ErrorCantMove
+ }
+ if srcObj.isDocument {
+ return nil, errors.New("can't move a Google document")
+ }
+ _, srcParentID, err := srcObj.fs.dirCache.FindPath(src.Remote(), false)
+ if err != nil {
+ return nil, err
+ }
+
+ // Temporary Object under construction
+ dstObj, dstInfo, err := f.createFileInfo(remote, srcObj.ModTime(), srcObj.bytes)
+ if err != nil {
+ return nil, err
+ }
+ dstParents := strings.Join(dstInfo.Parents, ",")
+ dstInfo.Parents = nil
+
+ // Do the move
+ var info *drive.File
+ err = f.pacer.Call(func() (bool, error) {
+ info, err = f.svc.Files.Update(srcObj.id, dstInfo).RemoveParents(srcParentID).AddParents(dstParents).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ dstObj.setMetaData(info)
+ return dstObj, nil
+}
+
+// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
+func (f *Fs) PublicLink(remote string) (link string, err error) {
+ id, err := f.dirCache.FindDir(remote, false)
+ if err == nil {
+ fs.Debugf(f, "attempting to share directory '%s'", remote)
+ } else {
+ fs.Debugf(f, "attempting to share single file '%s'", remote)
+ o := &Object{
+ fs: f,
+ remote: remote,
+ }
+ if err = o.readMetaData(); err != nil {
+ return
+ }
+ id = o.id
+ }
+
+ permission := &drive.Permission{
+ AllowFileDiscovery: false,
+ Role: "reader",
+ Type: "anyone",
+ }
+
+ err = f.pacer.Call(func() (bool, error) {
+ // TODO: On TeamDrives this might fail if lacking permissions to change ACLs.
+ // Need to either check `canShare` attribute on the object or see if a sufficient permission is already present.
+ _, err = f.svc.Permissions.Create(id, permission).Fields(googleapi.Field("id")).SupportsTeamDrives(f.isTeamDrive).Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("https://drive.google.com/open?id=%s", id), nil
+}
+
+// DirMove moves src, srcRemote to this remote at dstRemote
+// using server side move operations.
+//
+// Will only be called if src.Fs().Name() == f.Name()
+//
+// If it isn't possible then return fs.ErrorCantDirMove
+//
+// If destination exists then return fs.ErrorDirExists
+func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
+ srcFs, ok := src.(*Fs)
+ if !ok {
+ fs.Debugf(srcFs, "Can't move directory - not same remote type")
+ return fs.ErrorCantDirMove
+ }
+ srcPath := path.Join(srcFs.root, srcRemote)
+ dstPath := path.Join(f.root, dstRemote)
+
+ // Refuse to move to or from the root
+ if srcPath == "" || dstPath == "" {
+ fs.Debugf(src, "DirMove error: Can't move root")
+ return errors.New("can't move root directory")
+ }
+
+ // find the root src directory
+ err := srcFs.dirCache.FindRoot(false)
+ if err != nil {
+ return err
+ }
+
+ // find the root dst directory
+ if dstRemote != "" {
+ err = f.dirCache.FindRoot(true)
+ if err != nil {
+ return err
+ }
+ } else {
+ if f.dirCache.FoundRoot() {
+ return fs.ErrorDirExists
+ }
+ }
+
+ // Find ID of dst parent, creating subdirs if necessary
+ var leaf, dstDirectoryID string
+ findPath := dstRemote
+ if dstRemote == "" {
+ findPath = f.root
+ }
+ leaf, dstDirectoryID, err = f.dirCache.FindPath(findPath, true)
+ if err != nil {
+ return err
+ }
+
+ // Check destination does not exist
+ if dstRemote != "" {
+ _, err = f.dirCache.FindDir(dstRemote, false)
+ if err == fs.ErrorDirNotFound {
+ // OK
+ } else if err != nil {
+ return err
+ } else {
+ return fs.ErrorDirExists
+ }
+ }
+
+ // Find ID of src parent
+ var srcDirectoryID string
+ if srcRemote == "" {
+ srcDirectoryID, err = srcFs.dirCache.RootParentID()
+ } else {
+ _, srcDirectoryID, err = srcFs.dirCache.FindPath(srcRemote, false)
+ }
+ if err != nil {
+ return err
+ }
+
+ // Find ID of src
+ srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
+ if err != nil {
+ return err
+ }
+
+ // Do the move
+ patch := drive.File{
+ Name: leaf,
+ }
+ err = f.pacer.Call(func() (bool, error) {
+ _, err = f.svc.Files.Update(srcID, &patch).RemoveParents(srcDirectoryID).AddParents(dstDirectoryID).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return err
+ }
+ srcFs.dirCache.FlushDir(srcRemote)
+ return nil
+}
+
+// ChangeNotify calls the passed function with a path that has had changes.
+// If the implementation uses polling, it should adhere to the given interval.
+//
+// Automatically restarts itself in case of unexpected behaviour of the remote.
+//
+// Close the returned channel to stop being notified.
+func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
+ quit := make(chan bool)
+ go func() {
+ select {
+ case <-quit:
+ return
+ default:
+ for {
+ f.changeNotifyRunner(notifyFunc, pollInterval)
+ fs.Debugf(f, "Notify listener service ran into issues, restarting shortly.")
+ time.Sleep(pollInterval)
+ }
+ }
+ }()
+ return quit
+}
+
+func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) {
+ var err error
+ var startPageToken *drive.StartPageToken
+ err = f.pacer.Call(func() (bool, error) {
+ startPageToken, err = f.svc.Changes.GetStartPageToken().SupportsTeamDrives(f.isTeamDrive).Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ fs.Debugf(f, "Failed to get StartPageToken: %v", err)
+ return
+ }
+ pageToken := startPageToken.StartPageToken
+
+ for {
+ fs.Debugf(f, "Checking for changes on remote")
+ var changeList *drive.ChangeList
+
+ err = f.pacer.Call(func() (bool, error) {
+ changesCall := f.svc.Changes.List(pageToken).Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))")
+ if f.opt.ListChunk > 0 {
+ changesCall.PageSize(f.opt.ListChunk)
+ }
+ if f.isTeamDrive {
+ changesCall.TeamDriveId(f.opt.TeamDriveID)
+ changesCall.SupportsTeamDrives(true)
+ changesCall.IncludeTeamDriveItems(true)
+ }
+ changeList, err = changesCall.Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ fs.Debugf(f, "Failed to get Changes: %v", err)
+ return
+ }
+
+ type entryType struct {
+ path string
+ entryType fs.EntryType
+ }
+ var pathsToClear []entryType
+ for _, change := range changeList.Changes {
+ if path, ok := f.dirCache.GetInv(change.FileId); ok {
+ if change.File != nil && change.File.MimeType != driveFolderType {
+ pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
+ } else {
+ pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory})
+ }
+ continue
+ }
+
+ if change.File != nil {
+ changeType := fs.EntryDirectory
+ if change.File.MimeType != driveFolderType {
+ changeType = fs.EntryObject
+ }
+
+ // translate the parent dir of this object
+ if len(change.File.Parents) > 0 {
+ if path, ok := f.dirCache.GetInv(change.File.Parents[0]); ok {
+ // and append the drive file name to compute the full file name
+ if len(path) > 0 {
+ path = path + "/" + change.File.Name
+ } else {
+ path = change.File.Name
+ }
+ // this will now clear the actual file too
+ pathsToClear = append(pathsToClear, entryType{path: path, entryType: changeType})
+ }
+ } else { // a true root object that is changed
+ pathsToClear = append(pathsToClear, entryType{path: change.File.Name, entryType: changeType})
+ }
+ }
+ }
+
+ visitedPaths := make(map[string]bool)
+ for _, entry := range pathsToClear {
+ if _, ok := visitedPaths[entry.path]; ok {
+ continue
+ }
+ visitedPaths[entry.path] = true
+ notifyFunc(entry.path, entry.entryType)
+ }
+
+ if changeList.NewStartPageToken != "" {
+ pageToken = changeList.NewStartPageToken
+ fs.Debugf(f, "All changes were processed. Waiting for more.")
+ time.Sleep(pollInterval)
+ } else if changeList.NextPageToken != "" {
+ pageToken = changeList.NextPageToken
+ fs.Debugf(f, "There are more changes pending, checking now.")
+ } else {
+ fs.Debugf(f, "Did not get any page token, something went wrong! %+v", changeList)
+ return
+ }
+ }
+}
+
+// DirCacheFlush resets the directory cache - used in testing as an
+// optional interface
+func (f *Fs) DirCacheFlush() {
+ f.dirCache.ResetRoot()
+}
+
+// Hashes returns the supported hash sets.
+func (f *Fs) Hashes() hash.Set {
+ return hash.Set(hash.MD5)
+}
+
+// ------------------------------------------------------------
+
+// Fs returns the parent Fs
+func (o *Object) Fs() fs.Info {
+ return o.fs
+}
+
+// Return a string version
+func (o *Object) String() string {
+ if o == nil {
+ return ""
+ }
+ return o.remote
+}
+
+// Remote returns the remote path
+func (o *Object) Remote() string {
+ return o.remote
+}
+
+// Hash returns the Md5sum of an object returning a lowercase hex string
+func (o *Object) Hash(t hash.Type) (string, error) {
+ if t != hash.MD5 {
+ return "", hash.ErrUnsupported
+ }
+ return o.md5sum, nil
+}
+
+// Size returns the size of an object in bytes
+func (o *Object) Size() int64 {
+ return o.bytes
+}
+
+// setMetaData sets the fs data from a drive.File
+func (o *Object) setMetaData(info *drive.File) {
+ o.id = info.Id
+ o.url = fmt.Sprintf("%sfiles/%s?alt=media", o.fs.svc.BasePath, info.Id)
+ o.md5sum = strings.ToLower(info.Md5Checksum)
+ o.bytes = info.Size
+ if o.fs.opt.UseCreatedDate {
+ o.modifiedDate = info.CreatedTime
+ } else {
+ o.modifiedDate = info.ModifiedTime
+ }
+ o.mimeType = info.MimeType
+}
+
+// setGdocsMetaData only sets the gdocs related fields
+func (o *Object) setGdocsMetaData(info *drive.File, extension, exportMimeType string) {
+ o.url = fmt.Sprintf("%sfiles/%s/export?mimeType=%s", o.fs.svc.BasePath, info.Id, url.QueryEscape(exportMimeType))
+ if o.fs.opt.AlternateExport {
+ switch info.MimeType {
+ case "application/vnd.google-apps.drawing":
+ o.url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", info.Id, extension)
+ case "application/vnd.google-apps.document":
+ o.url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", info.Id, extension)
+ case "application/vnd.google-apps.spreadsheet":
+ o.url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", info.Id, extension)
+ case "application/vnd.google-apps.presentation":
+ o.url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", info.Id, extension)
+ }
+ }
+ o.isDocument = true
+ o.mimeType = exportMimeType
+ o.bytes = -1
+}
+
+// readMetaData gets the info if it hasn't already been fetched
+func (o *Object) readMetaData() (err error) {
+ if o.id != "" {
+ return nil
+ }
+
+ leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false)
+ if err != nil {
+ if err == fs.ErrorDirNotFound {
+ return fs.ErrorObjectNotFound
+ }
+ return err
+ }
+
+ found, err := o.fs.list([]string{directoryID}, leaf, false, true, false, func(item *drive.File) bool {
+ if item.Name == leaf {
+ o.setMetaData(item)
+ return true
+ }
+ if !o.fs.opt.SkipGdocs {
+ extension, exportName, exportMimeType, _ := o.fs.findExportFormat(item)
+ if exportName == leaf {
+ o.setMetaData(item)
+ o.setGdocsMetaData(item, extension, exportMimeType)
+ return true
+ }
+ }
+ return false
+ })
+ if err != nil {
+ return err
+ }
+ if !found {
+ return fs.ErrorObjectNotFound
+ }
+ return nil
+}
+
+// ModTime returns the modification time of the object
+//
+//
+// It attempts to read the objects mtime and if that isn't present the
+// LastModified returned in the http headers
+func (o *Object) ModTime() time.Time {
+ err := o.readMetaData()
+ if err != nil {
+ fs.Debugf(o, "Failed to read metadata: %v", err)
+ return time.Now()
+ }
+ modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
+ if err != nil {
+ fs.Debugf(o, "Failed to read mtime from object: %v", err)
+ return time.Now()
+ }
+ return modTime
+}
+
+// SetModTime sets the modification time of the drive fs object
+func (o *Object) SetModTime(modTime time.Time) error {
+ err := o.readMetaData()
+ if err != nil {
+ return err
+ }
+ // New metadata
+ updateInfo := &drive.File{
+ ModifiedTime: modTime.Format(timeFormatOut),
+ }
+ // Set modified date
+ var info *drive.File
+ err = o.fs.pacer.Call(func() (bool, error) {
+ info, err = o.fs.svc.Files.Update(o.id, updateInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return err
+ }
+ // Update info from read data
+ o.setMetaData(info)
+ return nil
+}
+
+// Storable returns a boolean as to whether this object is storable
+func (o *Object) Storable() bool {
+ return true
+}
+
+// httpResponse gets an http.Response object for the object o.url
+// using the method passed in
+func (o *Object) httpResponse(method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
+ if o.url == "" {
+ return nil, nil, errors.New("forbidden to download - check sharing permission")
+ }
+ if o.isDocument {
+ for _, o := range options {
+ // https://developers.google.com/drive/v3/web/manage-downloads#partial_download
+ if _, ok := o.(*fs.RangeOption); ok {
+ return nil, nil, errors.New("partial downloads are not supported while exporting Google Documents")
+ }
+ }
+ }
+ req, err = http.NewRequest(method, o.url, nil)
+ if err != nil {
+ return req, nil, err
+ }
+ fs.OpenOptionAddHTTPHeaders(req.Header, options)
+ err = o.fs.pacer.Call(func() (bool, error) {
+ res, err = o.fs.client.Do(req)
+ if err == nil {
+ err = googleapi.CheckResponse(res)
+ if err != nil {
+ _ = res.Body.Close() // ignore error
+ }
+ }
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return req, nil, err
+ }
+ return req, res, nil
+}
+
+// openFile represents an Object open for reading
+type openFile struct {
+ o *Object // Object we are reading for
+ in io.ReadCloser // reading from here
+ bytes int64 // number of bytes read on this connection
+ eof bool // whether we have read end of file
+ errored bool // whether we have encountered an error during reading
+}
+
+// Read bytes from the object - see io.Reader
+func (file *openFile) Read(p []byte) (n int, err error) {
+ n, err = file.in.Read(p)
+ file.bytes += int64(n)
+ if err != nil && err != io.EOF {
+ file.errored = true
+ }
+ if err == io.EOF {
+ file.eof = true
+ }
+ return
+}
+
+// Close the object and update bytes read
+func (file *openFile) Close() (err error) {
+ // If end of file, update bytes read
+ if file.eof && !file.errored {
+ fs.Debugf(file.o, "Updating size of doc after download to %v", file.bytes)
+ file.o.bytes = file.bytes
+ }
+ return file.in.Close()
+}
+
+// Check it satisfies the interfaces
+var _ io.ReadCloser = &openFile{}
+
+// Checks to see if err is a googleapi.Error with of type what
+func isGoogleError(err error, what string) bool {
+ if gerr, ok := err.(*googleapi.Error); ok {
+ for _, error := range gerr.Errors {
+ if error.Reason == what {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// Open an object for read
+func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
+ _, res, err := o.httpResponse("GET", options)
+ if err != nil {
+ if isGoogleError(err, "cannotDownloadAbusiveFile") {
+ if o.fs.opt.AcknowledgeAbuse {
+ // Retry acknowledging abuse
+ if strings.ContainsRune(o.url, '?') {
+ o.url += "&"
+ } else {
+ o.url += "?"
+ }
+ o.url += "acknowledgeAbuse=true"
+ _, res, err = o.httpResponse("GET", options)
+ } else {
+ err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
+ }
+ }
+ if err != nil {
+ return nil, errors.Wrap(err, "open file failed")
+ }
+ }
+ // If it is a document, update the size with what we are
+ // reading as it can change from the HEAD in the listing to
+ // this GET. This stops rclone marking the transfer as
+ // corrupted.
+ if o.isDocument {
+ return &openFile{o: o, in: res.Body}, nil
+ }
+ return res.Body, nil
+}
+
+// Update the already existing object
+//
+// Copy the reader into the object updating modTime and size
+//
+// The new object may have been created if an error is returned
+func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
+ size := src.Size()
+ modTime := src.ModTime()
+ if o.isDocument {
+ return errors.New("can't update a google document")
+ }
+ updateInfo := &drive.File{
+ MimeType: fs.MimeType(src),
+ ModifiedTime: modTime.Format(timeFormatOut),
+ }
+
+ // Make the API request to upload metadata and file data.
+ var err error
+ var info *drive.File
+ if size == 0 || size < int64(o.fs.opt.UploadCutoff) {
+ // Don't retry, return a retry error instead
+ err = o.fs.pacer.CallNoRetry(func() (bool, error) {
+ info, err = o.fs.svc.Files.Update(o.id, updateInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).KeepRevisionForever(o.fs.opt.KeepRevisionForever).Do()
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return err
+ }
+ } else {
+ // Upload the file in chunks
+ info, err = o.fs.Upload(in, size, updateInfo.MimeType, o.id, updateInfo, o.remote)
+ if err != nil {
+ return err
+ }
+ }
+ o.setMetaData(info)
+ return nil
+}
+
+// Remove an object
+func (o *Object) Remove() error {
+ if o.isDocument {
+ return errors.New("can't delete a google document")
+ }
+ var err error
+ err = o.fs.pacer.Call(func() (bool, error) {
+ if o.fs.opt.UseTrash {
+ info := drive.File{
+ Trashed: true,
+ }
+ _, err = o.fs.svc.Files.Update(o.id, &info).Fields("").SupportsTeamDrives(o.fs.isTeamDrive).Do()
+ } else {
+ err = o.fs.svc.Files.Delete(o.id).Fields("").SupportsTeamDrives(o.fs.isTeamDrive).Do()
+ }
+ return shouldRetry(err)
+ })
+ return err
+}
+
+// MimeType of an Object if known, "" otherwise
+func (o *Object) MimeType() string {
+ err := o.readMetaData()
+ if err != nil {
+ fs.Debugf(o, "Failed to read metadata: %v", err)
+ return ""
+ }
+ return o.mimeType
+}
+
+// ID returns the ID of the Object if known, or "" if not
+func (o *Object) ID() string {
+ return o.id
+}
+
+// Check the interfaces are satisfied
+var (
+ _ fs.Fs = (*Fs)(nil)
+ _ fs.Purger = (*Fs)(nil)
+ _ fs.CleanUpper = (*Fs)(nil)
+ _ fs.PutStreamer = (*Fs)(nil)
+ _ fs.Copier = (*Fs)(nil)
+ _ fs.Mover = (*Fs)(nil)
+ _ fs.DirMover = (*Fs)(nil)
+ _ fs.DirCacheFlusher = (*Fs)(nil)
+ _ fs.ChangeNotifier = (*Fs)(nil)
+ _ fs.PutUncheckeder = (*Fs)(nil)
+ _ fs.PublicLinker = (*Fs)(nil)
+ _ fs.ListRer = (*Fs)(nil)
+ _ fs.MergeDirser = (*Fs)(nil)
+ _ fs.Abouter = (*Fs)(nil)
+ _ fs.Object = (*Object)(nil)
+ _ fs.MimeTyper = (*Object)(nil)
+ _ fs.IDer = (*Object)(nil)
+)
diff --git a/vendor/github.com/ncw/rclone/backend/drive/upload.go b/vendor/github.com/ncw/rclone/backend/drive/upload.go
new file mode 100755
index 0000000..350637d
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/backend/drive/upload.go
@@ -0,0 +1,249 @@
+// Upload for drive
+//
+// Docs
+// Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable
+// Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices
+// Files insert: https://developers.google.com/drive/v2/reference/files/insert
+// Files update: https://developers.google.com/drive/v2/reference/files/update
+//
+// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
+
+package drive
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/ncw/rclone/fs/fserrors"
+ "github.com/ncw/rclone/lib/readers"
+ "github.com/pkg/errors"
+ "google.golang.org/api/drive/v3"
+ "google.golang.org/api/googleapi"
+)
+
+const (
+ // statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
+ statusResumeIncomplete = 308
+)
+
+// resumableUpload is used by the generated APIs to provide resumable uploads.
+// It is not used by developers directly.
+type resumableUpload struct {
+ f *Fs
+ remote string
+ // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
+ URI string
+ // Media is the object being uploaded.
+ Media io.Reader
+ // MediaType defines the media type, e.g. "image/jpeg".
+ MediaType string
+ // ContentLength is the full size of the object being uploaded.
+ ContentLength int64
+ // Return value
+ ret *drive.File
+}
+
+// Upload the io.Reader in of size bytes with contentType and info
+func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
+ params := make(url.Values)
+ params.Set("alt", "json")
+ params.Set("uploadType", "resumable")
+ params.Set("fields", partialFields)
+ if f.isTeamDrive {
+ params.Set("supportsTeamDrives", "true")
+ }
+ if f.opt.KeepRevisionForever {
+ params.Set("keepRevisionForever", "true")
+ }
+ urls := "https://www.googleapis.com/upload/drive/v3/files"
+ method := "POST"
+ if fileID != "" {
+ params.Set("setModifiedDate", "true")
+ urls += "/{fileId}"
+ method = "PATCH"
+ }
+ urls += "?" + params.Encode()
+ var res *http.Response
+ var err error
+ err = f.pacer.Call(func() (bool, error) {
+ var body io.Reader
+ body, err = googleapi.WithoutDataWrapper.JSONReader(info)
+ if err != nil {
+ return false, err
+ }
+ var req *http.Request
+ req, err = http.NewRequest(method, urls, body)
+ if err != nil {
+ return false, err
+ }
+ googleapi.Expand(req.URL, map[string]string{
+ "fileId": fileID,
+ })
+ req.Header.Set("Content-Type", "application/json; charset=UTF-8")
+ req.Header.Set("X-Upload-Content-Type", contentType)
+ req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
+ res, err = f.client.Do(req)
+ if err == nil {
+ defer googleapi.CloseBody(res)
+ err = googleapi.CheckResponse(res)
+ }
+ return shouldRetry(err)
+ })
+ if err != nil {
+ return nil, err
+ }
+ loc := res.Header.Get("Location")
+ rx := &resumableUpload{
+ f: f,
+ remote: remote,
+ URI: loc,
+ Media: in,
+ MediaType: contentType,
+ ContentLength: size,
+ }
+ return rx.Upload()
+}
+
+// Make an http.Request for the range passed in
+func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
+ req, _ := http.NewRequest("POST", rx.URI, body)
+ req.ContentLength = reqSize
+ if reqSize != 0 {
+ req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
+ } else {
+ req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
+ }
+ req.Header.Set("Content-Type", rx.MediaType)
+ return req
+}
+
+// rangeRE matches the transfer status response from the server. $1 is
+// the last byte index uploaded.
+var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
+
+// Query drive for the amount transferred so far
+//
+// If error is nil, then start should be valid
+func (rx *resumableUpload) transferStatus() (start int64, err error) {
+ req := rx.makeRequest(0, nil, 0)
+ res, err := rx.f.client.Do(req)
+ if err != nil {
+ return 0, err
+ }
+ defer googleapi.CloseBody(res)
+ if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
+ return rx.ContentLength, nil
+ }
+ if res.StatusCode != statusResumeIncomplete {
+ err = googleapi.CheckResponse(res)
+ if err != nil {
+ return 0, err
+ }
+ return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
+ }
+ Range := res.Header.Get("Range")
+ if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
+ start, err = strconv.ParseInt(m[1], 10, 64)
+ if err == nil {
+ return start, nil
+ }
+ }
+ return 0, errors.Errorf("unable to parse range %q", Range)
+}
+
+// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
+func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
+ _, _ = chunk.Seek(0, io.SeekStart)
+ req := rx.makeRequest(start, chunk, chunkSize)
+ res, err := rx.f.client.Do(req)
+ if err != nil {
+ return 599, err
+ }
+ defer googleapi.CloseBody(res)
+ if res.StatusCode == statusResumeIncomplete {
+ return res.StatusCode, nil
+ }
+ err = googleapi.CheckResponse(res)
+ if err != nil {
+ return res.StatusCode, err
+ }
+
+ // When the entire file upload is complete, the server
+ // responds with an HTTP 201 Created along with any metadata
+ // associated with this resource. If this request had been
+ // updating an existing entity rather than creating a new one,
+ // the HTTP response code for a completed upload would have
+ // been 200 OK.
+ //
+ // So parse the response out of the body. We aren't expecting
+ // any other 2xx codes, so we parse it unconditionaly on
+ // StatusCode
+ if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
+ return 598, err
+ }
+
+ return res.StatusCode, nil
+}
+
+// Upload uploads the chunks from the input
+// It retries each chunk using the pacer and --low-level-retries
+func (rx *resumableUpload) Upload() (*drive.File, error) {
+ start := int64(0)
+ var StatusCode int
+ var err error
+ buf := make([]byte, int(rx.f.opt.ChunkSize))
+ for start < rx.ContentLength {
+ reqSize := rx.ContentLength - start
+ if reqSize >= int64(rx.f.opt.ChunkSize) {
+ reqSize = int64(rx.f.opt.ChunkSize)
+ }
+ chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
+
+ // Transfer the chunk
+ err = rx.f.pacer.Call(func() (bool, error) {
+ fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
+ StatusCode, err = rx.transferChunk(start, chunk, reqSize)
+ again, err := shouldRetry(err)
+ if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
+ again = false
+ err = nil
+ }
+ return again, err
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ start += reqSize
+ }
+ // Resume or retry uploads that fail due to connection interruptions or
+ // any 5xx errors, including:
+ //
+ // 500 Internal Server Error
+ // 502 Bad Gateway
+ // 503 Service Unavailable
+ // 504 Gateway Timeout
+ //
+ // Use an exponential backoff strategy if any 5xx server error is
+ // returned when resuming or retrying upload requests. These errors can
+ // occur if a server is getting overloaded. Exponential backoff can help
+ // alleviate these kinds of problems during periods of high volume of
+ // requests or heavy network traffic. Other kinds of requests should not
+ // be handled by exponential backoff but you can still retry a number of
+ // them. When retrying these requests, limit the number of times you
+ // retry them. For example your code could limit to ten retries or less
+ // before reporting an error.
+ //
+ // Handle 404 Not Found errors when doing resumable uploads by starting
+ // the entire upload over from the beginning.
+ if rx.ret == nil {
+ return nil, fserrors.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
+ }
+ return rx.ret, nil
+}
diff --git a/vendor/github.com/ncw/rclone/backend/dropbox/dbhash/dbhash.go b/vendor/github.com/ncw/rclone/backend/dropbox/dbhash/dbhash.go
new file mode 100755
index 0000000..3cdc5d7
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/backend/dropbox/dbhash/dbhash.go
@@ -0,0 +1,127 @@
+// Package dbhash implements the dropbox hash as described in
+//
+// https://www.dropbox.com/developers/reference/content-hash
+package dbhash
+
+import (
+ "crypto/sha256"
+ "hash"
+)
+
+const (
+ // BlockSize of the checksum in bytes.
+ BlockSize = sha256.BlockSize
+ // Size of the checksum in bytes.
+ Size = sha256.BlockSize
+ bytesPerBlock = 4 * 1024 * 1024
+ hashReturnedError = "hash function returned error"
+)
+
+type digest struct {
+ n int // bytes written into blockHash so far
+ blockHash hash.Hash
+ totalHash hash.Hash
+ sumCalled bool
+ writtenMore bool
+}
+
+// New returns a new hash.Hash computing the Dropbox checksum.
+func New() hash.Hash {
+ d := &digest{}
+ d.Reset()
+ return d
+}
+
+// writeBlockHash writes the current block hash into the total hash
+func (d *digest) writeBlockHash() {
+ blockHash := d.blockHash.Sum(nil)
+ _, err := d.totalHash.Write(blockHash)
+ if err != nil {
+ panic(hashReturnedError)
+ }
+ // reset counters for blockhash
+ d.n = 0
+ d.blockHash.Reset()
+}
+
+// Write writes len(p) bytes from p to the underlying data stream. It returns
+// the number of bytes written from p (0 <= n <= len(p)) and any error
+// encountered that caused the write to stop early. Write must return a non-nil
+// error if it returns n < len(p). Write must not modify the slice data, even
+// temporarily.
+//
+// Implementations must not retain p.
+func (d *digest) Write(p []byte) (n int, err error) {
+ n = len(p)
+ for len(p) > 0 {
+ d.writtenMore = true
+ toWrite := bytesPerBlock - d.n
+ if toWrite > len(p) {
+ toWrite = len(p)
+ }
+ _, err = d.blockHash.Write(p[:toWrite])
+ if err != nil {
+ panic(hashReturnedError)
+ }
+ d.n += toWrite
+ p = p[toWrite:]
+ // Accumulate the total hash
+ if d.n == bytesPerBlock {
+ d.writeBlockHash()
+ }
+ }
+ return n, nil
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+//
+// TODO(ncw) Sum() can only be called once for this type of hash.
+// If you call Sum(), then Write() then Sum() it will result in
+// a panic. Calling Write() then Sum(), then Sum() is OK.
+func (d *digest) Sum(b []byte) []byte {
+ if d.sumCalled && d.writtenMore {
+ panic("digest.Sum() called more than once")
+ }
+ d.sumCalled = true
+ d.writtenMore = false
+ if d.n != 0 {
+ d.writeBlockHash()
+ }
+ return d.totalHash.Sum(b)
+}
+
+// Reset resets the Hash to its initial state.
+func (d *digest) Reset() {
+ d.n = 0
+ d.totalHash = sha256.New()
+ d.blockHash = sha256.New()
+ d.sumCalled = false
+ d.writtenMore = false
+}
+
+// Size returns the number of bytes Sum will return.
+func (d *digest) Size() int {
+ return d.totalHash.Size()
+}
+
+// BlockSize returns the hash's underlying block size.
+// The Write method must be able to accept any amount
+// of data, but it may operate more efficiently if all writes
+// are a multiple of the block size.
+func (d *digest) BlockSize() int {
+ return d.totalHash.BlockSize()
+}
+
+// Sum returns the Dropbox checksum of the data.
+func Sum(data []byte) [Size]byte {
+ var d digest
+ d.Reset()
+ _, _ = d.Write(data)
+ var out [Size]byte
+ d.Sum(out[:0])
+ return out
+}
+
+// must implement this interface
+var _ hash.Hash = (*digest)(nil)
diff --git a/vendor/github.com/ncw/rclone/backend/local/about_unix.go b/vendor/github.com/ncw/rclone/backend/local/about_unix.go
new file mode 100755
index 0000000..745f2e5
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/backend/local/about_unix.go
@@ -0,0 +1,29 @@
+// +build darwin dragonfly freebsd linux
+
+package local
+
+import (
+ "syscall"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/pkg/errors"
+)
+
+// About gets quota information
+func (f *Fs) About() (*fs.Usage, error) {
+ var s syscall.Statfs_t
+ err := syscall.Statfs(f.root, &s)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to read disk usage")
+ }
+ bs := int64(s.Bsize)
+ usage := &fs.Usage{
+ Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
+ Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
+ Free: fs.NewUsageValue(bs * int64(s.Bavail)), // bytes which can be uploaded before reaching the quota
+ }
+ return usage, nil
+}
+
+// check interface
+var _ fs.Abouter = &Fs{}
diff --git a/vendor/github.com/ncw/rclone/backend/local/about_windows.go b/vendor/github.com/ncw/rclone/backend/local/about_windows.go
new file mode 100755
index 0000000..4c9dcec
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/backend/local/about_windows.go
@@ -0,0 +1,36 @@
+// +build windows
+
+package local
+
+import (
+ "syscall"
+ "unsafe"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/pkg/errors"
+)
+
+var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
+
+// About gets quota information
+func (f *Fs) About() (*fs.Usage, error) {
+ var available, total, free int64
+ _, _, e1 := getFreeDiskSpace.Call(
+ uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
+ uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
+ uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
+ uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
+ )
+ if e1 != syscall.Errno(0) {
+ return nil, errors.Wrap(e1, "failed to read disk usage")
+ }
+ usage := &fs.Usage{
+ Total: fs.NewUsageValue(total), // quota of bytes that can be used
+ Used: fs.NewUsageValue(total - free), // bytes in use
+ Free: fs.NewUsageValue(available), // bytes which can be uploaded before reaching the quota
+ }
+ return usage, nil
+}
+
+// check interface
+var _ fs.Abouter = &Fs{}
diff --git a/vendor/github.com/ncw/rclone/backend/local/local.go b/vendor/github.com/ncw/rclone/backend/local/local.go
new file mode 100755
index 0000000..26ae8ed
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/backend/local/local.go
@@ -0,0 +1,993 @@
+// Package local provides a filesystem interface
+package local
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/ncw/rclone/fs/config/configmap"
+ "github.com/ncw/rclone/fs/config/configstruct"
+ "github.com/ncw/rclone/fs/hash"
+ "github.com/ncw/rclone/lib/readers"
+ "github.com/pkg/errors"
+)
+
+// Constants
+const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
+
+// Register with Fs
+func init() {
+ fsi := &fs.RegInfo{
+ Name: "local",
+ Description: "Local Disk",
+ NewFs: NewFs,
+ Options: []fs.Option{{
+ Name: "nounc",
+ Help: "Disable UNC (long path names) conversion on Windows",
+ Examples: []fs.OptionExample{{
+ Value: "true",
+ Help: "Disables long file names",
+ }},
+ }, {
+ Name: "copy_links",
+ Help: "Follow symlinks and copy the pointed to item.",
+ Default: false,
+ NoPrefix: true,
+ ShortOpt: "L",
+ Advanced: true,
+ }, {
+ Name: "skip_links",
+ Help: "Don't warn about skipped symlinks.",
+ Default: false,
+ NoPrefix: true,
+ Advanced: true,
+ }, {
+ Name: "no_unicode_normalization",
+ Help: "Don't apply unicode normalization to paths and filenames",
+ Default: false,
+ Advanced: true,
+ }, {
+ Name: "no_check_updated",
+ Help: "Don't check to see if the files change during upload",
+ Default: false,
+ Advanced: true,
+ }, {
+ Name: "one_file_system",
+ Help: "Don't cross filesystem boundaries (unix/macOS only).",
+ Default: false,
+ NoPrefix: true,
+ ShortOpt: "x",
+ Advanced: true,
+ }},
+ }
+ fs.Register(fsi)
+}
+
+// Options defines the configuration for this backend
+type Options struct {
+ FollowSymlinks bool `config:"copy_links"`
+ SkipSymlinks bool `config:"skip_links"`
+ NoUTFNorm bool `config:"no_unicode_normalization"`
+ NoCheckUpdated bool `config:"no_check_updated"`
+ NoUNC bool `config:"nounc"`
+ OneFileSystem bool `config:"one_file_system"`
+}
+
+// Fs represents a local filesystem rooted at root
+type Fs struct {
+ name string // the name of the remote
+ root string // The root directory (OS path)
+ opt Options // parsed config options
+ features *fs.Features // optional features
+ dev uint64 // device number of root node
+ precisionOk sync.Once // Whether we need to read the precision
+ precision time.Duration // precision of local filesystem
+ wmu sync.Mutex // used for locking access to 'warned'.
+ warned map[string]struct{} // whether we have warned about this string
+ // do os.Lstat or os.Stat
+ lstat func(name string) (os.FileInfo, error)
+ dirNames *mapper // directory name mapping
+ objectHashesMu sync.Mutex // global lock for Object.hashes
+}
+
+// Object represents a local filesystem object
+type Object struct {
+ fs *Fs // The Fs this object is part of
+ remote string // The remote path - properly UTF-8 encoded - for rclone
+ path string // The local path - may not be properly UTF-8 encoded - for OS
+ size int64 // file metadata - always present
+ mode os.FileMode
+ modTime time.Time
+ hashes map[hash.Type]string // Hashes
+}
+
+// ------------------------------------------------------------
+
+// NewFs constructs an Fs from the path
+func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
+ // Parse config into Options struct
+ opt := new(Options)
+ err := configstruct.Set(m, opt)
+ if err != nil {
+ return nil, err
+ }
+
+ if opt.NoUTFNorm {
+ fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
+ }
+
+ f := &Fs{
+ name: name,
+ opt: *opt,
+ warned: make(map[string]struct{}),
+ dev: devUnset,
+ lstat: os.Lstat,
+ dirNames: newMapper(),
+ }
+ f.root = f.cleanPath(root)
+ f.features = (&fs.Features{
+ CaseInsensitive: f.caseInsensitive(),
+ CanHaveEmptyDirectories: true,
+ }).Fill(f)
+ if opt.FollowSymlinks {
+ f.lstat = os.Stat
+ }
+
+ // Check to see if this points to a file
+ fi, err := f.lstat(f.root)
+ if err == nil {
+ f.dev = readDevice(fi, f.opt.OneFileSystem)
+ }
+ if err == nil && fi.Mode().IsRegular() {
+ // It is a file, so use the parent as the root
+ f.root = filepath.Dir(f.root)
+ // return an error with an fs which points to the parent
+ return f, fs.ErrorIsFile
+ }
+ return f, nil
+}
+
+// Name of the remote (as passed into NewFs)
+func (f *Fs) Name() string {
+ return f.name
+}
+
+// Root of the remote (as passed into NewFs)
+func (f *Fs) Root() string {
+ return f.root
+}
+
+// String converts this Fs to a string
+func (f *Fs) String() string {
+ return fmt.Sprintf("Local file system at %s", f.root)
+}
+
+// Features returns the optional features of this Fs
+func (f *Fs) Features() *fs.Features {
+ return f.features
+}
+
+// caseInsenstive returns whether the remote is case insensitive or not
+func (f *Fs) caseInsensitive() bool {
+ // FIXME not entirely accurate since you can have case
+ // sensitive Fses on darwin and case insenstive Fses on linux.
+ // Should probably check but that would involve creating a
+ // file in the remote to be most accurate which probably isn't
+ // desirable.
+ return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
+}
+
+// newObject makes a half completed Object
+//
+// if dstPath is empty then it is made from remote
+func (f *Fs) newObject(remote, dstPath string) *Object {
+ if dstPath == "" {
+ dstPath = f.cleanPath(filepath.Join(f.root, remote))
+ }
+ remote = f.cleanRemote(remote)
+ return &Object{
+ fs: f,
+ remote: remote,
+ path: dstPath,
+ }
+}
+
+// Return an Object from a path
+//
+// May return nil if an error occurred
+func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Object, error) {
+ o := f.newObject(remote, dstPath)
+ if info != nil {
+ o.setMetadata(info)
+ } else {
+ err := o.lstat()
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, fs.ErrorObjectNotFound
+ }
+ if os.IsPermission(err) {
+ return nil, fs.ErrorPermissionDenied
+ }
+ return nil, err
+ }
+ }
+ if o.mode.IsDir() {
+ return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
+ }
+ return o, nil
+}
+
+// NewObject finds the Object at remote. If it can't be found
+// it returns the error ErrorObjectNotFound.
+func (f *Fs) NewObject(remote string) (fs.Object, error) {
+ return f.newObjectWithInfo(remote, "", nil)
+}
+
+// List the objects and directories in dir into entries. The
+// entries can be returned in any order but should be for a
+// complete directory.
+//
+// dir should be "" to list the root, and should not have
+// trailing slashes.
+//
+// This should return ErrDirNotFound if the directory isn't
+// found.
+func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
+ dir = f.dirNames.Load(dir)
+ fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
+ remote := f.cleanRemote(dir)
+ _, err = os.Stat(fsDirPath)
+ if err != nil {
+ return nil, fs.ErrorDirNotFound
+ }
+
+ fd, err := os.Open(fsDirPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to open directory %q", dir)
+ }
+ defer func() {
+ cerr := fd.Close()
+ if cerr != nil && err == nil {
+ err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
+ }
+ }()
+
+ for {
+ fis, err := fd.Readdir(1024)
+ if err == io.EOF && len(fis) == 0 {
+ break
+ }
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to read directory %q", dir)
+ }
+
+ for _, fi := range fis {
+ name := fi.Name()
+ mode := fi.Mode()
+ newRemote := path.Join(remote, name)
+ newPath := filepath.Join(fsDirPath, name)
+ // Follow symlinks if required
+ if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
+ fi, err = os.Stat(newPath)
+ if err != nil {
+ return nil, err
+ }
+ mode = fi.Mode()
+ }
+ if fi.IsDir() {
+ // Ignore directories which are symlinks. These are junction points under windows which
+ // are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
+ if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
+ d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
+ entries = append(entries, d)
+ }
+ } else {
+ fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
+ if err != nil {
+ return nil, err
+ }
+ if fso.Storable() {
+ entries = append(entries, fso)
+ }
+ }
+ }
+ }
+ return entries, nil
+}
+
+// cleanRemote makes string a valid UTF-8 string for remote strings.
+//
+// Any invalid UTF-8 characters will be replaced with utf8.RuneError
+// It also normalises the UTF-8 and converts the slashes if necessary.
+func (f *Fs) cleanRemote(name string) string {
+ if !utf8.ValidString(name) {
+ f.wmu.Lock()
+ if _, ok := f.warned[name]; !ok {
+ fs.Logf(f, "Replacing invalid UTF-8 characters in %q", name)
+ f.warned[name] = struct{}{}
+ }
+ f.wmu.Unlock()
+ name = string([]rune(name))
+ }
+ name = filepath.ToSlash(name)
+ return name
+}
+
+// mapper maps raw to cleaned directory names
+type mapper struct {
+ mu sync.RWMutex // mutex to protect the below
+ m map[string]string // map of un-normalised directory names
+}
+
+func newMapper() *mapper {
+ return &mapper{
+ m: make(map[string]string),
+ }
+}
+
+// Lookup a directory name to make a local name (reverses
+// cleanDirName)
+//
+// FIXME this is temporary before we make a proper Directory object
+func (m *mapper) Load(in string) string {
+ m.mu.RLock()
+ out, ok := m.m[in]
+ m.mu.RUnlock()
+ if ok {
+ return out
+ }
+ return in
+}
+
+// Cleans a directory name recording if it needed to be altered
+//
+// FIXME this is temporary before we make a proper Directory object
+func (m *mapper) Save(in, out string) string {
+ if in != out {
+ m.mu.Lock()
+ m.m[out] = in
+ m.mu.Unlock()
+ }
+ return out
+}
+
+// Put the Object to the local filesystem
+func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
+ remote := src.Remote()
+ // Temporary Object under construction - info filled in by Update()
+ o := f.newObject(remote, "")
+ err := o.Update(in, src, options...)
+ if err != nil {
+ return nil, err
+ }
+ return o, nil
+}
+
+// PutStream uploads to the remote path with the modTime given of indeterminate size
+func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
+ return f.Put(in, src, options...)
+}
+
+// Mkdir creates the directory if it doesn't exist
+func (f *Fs) Mkdir(dir string) error {
+ // FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
+ root := f.cleanPath(filepath.Join(f.root, dir))
+ err := os.MkdirAll(root, 0777)
+ if err != nil {
+ return err
+ }
+ if dir == "" {
+ fi, err := f.lstat(root)
+ if err != nil {
+ return err
+ }
+ f.dev = readDevice(fi, f.opt.OneFileSystem)
+ }
+ return nil
+}
+
+// Rmdir removes the directory
+//
+// If it isn't empty it will return an error
+func (f *Fs) Rmdir(dir string) error {
+ root := f.cleanPath(filepath.Join(f.root, dir))
+ return os.Remove(root)
+}
+
+// Precision of the file system
+func (f *Fs) Precision() (precision time.Duration) {
+ f.precisionOk.Do(func() {
+ f.precision = f.readPrecision()
+ })
+ return f.precision
+}
+
+// Read the precision
+func (f *Fs) readPrecision() (precision time.Duration) {
+ // Default precision of 1s
+ precision = time.Second
+
+ // Create temporary file and test it
+ fd, err := ioutil.TempFile("", "rclone")
+ if err != nil {
+ // If failed return 1s
+ // fmt.Println("Failed to create temp file", err)
+ return time.Second
+ }
+ path := fd.Name()
+ // fmt.Println("Created temp file", path)
+ err = fd.Close()
+ if err != nil {
+ return time.Second
+ }
+
+ // Delete it on return
+ defer func() {
+ // fmt.Println("Remove temp file")
+ _ = os.Remove(path) // ignore error
+ }()
+
+ // Find the minimum duration we can detect
+ for duration := time.Duration(1); duration < time.Second; duration *= 10 {
+ // Current time with delta
+ t := time.Unix(time.Now().Unix(), int64(duration))
+ err := os.Chtimes(path, t, t)
+ if err != nil {
+ // fmt.Println("Failed to Chtimes", err)
+ break
+ }
+
+ // Read the actual time back
+ fi, err := os.Stat(path)
+ if err != nil {
+ // fmt.Println("Failed to Stat", err)
+ break
+ }
+
+ // If it matches - have found the precision
+ // fmt.Println("compare", fi.ModTime(), t)
+ if fi.ModTime().Equal(t) {
+ // fmt.Println("Precision detected as", duration)
+ return duration
+ }
+ }
+ return
+}
+
+// Purge deletes all the files and directories
+//
+// Optional interface: Only implement this if you have a way of
+// deleting all the files quicker than just running Remove() on the
+// result of List()
+func (f *Fs) Purge() error {
+ fi, err := f.lstat(f.root)
+ if err != nil {
+ return err
+ }
+ if !fi.Mode().IsDir() {
+ return errors.Errorf("can't purge non directory: %q", f.root)
+ }
+ return os.RemoveAll(f.root)
+}
+
+// Move src to this remote using server side move operations.
+//
+// This is stored with the remote path given
+//
+// It returns the destination Object and a possible error
+//
+// Will only be called if src.Fs().Name() == f.Name()
+//
+// If it isn't possible then return fs.ErrorCantMove
+func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
+ srcObj, ok := src.(*Object)
+ if !ok {
+ fs.Debugf(src, "Can't move - not same remote type")
+ return nil, fs.ErrorCantMove
+ }
+
+ // Temporary Object under construction
+ dstObj := f.newObject(remote, "")
+
+ // Check it is a file if it exists
+ err := dstObj.lstat()
+ if os.IsNotExist(err) {
+ // OK
+ } else if err != nil {
+ return nil, err
+ } else if !dstObj.mode.IsRegular() {
+ // It isn't a file
+ return nil, errors.New("can't move file onto non-file")
+ }
+
+ // Create destination
+ err = dstObj.mkdirAll()
+ if err != nil {
+ return nil, err
+ }
+
+ // Do the move
+ err = os.Rename(srcObj.path, dstObj.path)
+ if os.IsNotExist(err) {
+ // race condition, source was deleted in the meantime
+ return nil, err
+ } else if os.IsPermission(err) {
+ // not enough rights to write to dst
+ return nil, err
+ } else if err != nil {
+ // not quite clear, but probably trying to move a file across file system
+ // boundaries. Copying might still work.
+ fs.Debugf(src, "Can't move: %v: trying copy", err)
+ return nil, fs.ErrorCantMove
+ }
+
+ // Update the info
+ err = dstObj.lstat()
+ if err != nil {
+ return nil, err
+ }
+
+ return dstObj, nil
+}
+
+// DirMove moves src, srcRemote to this remote at dstRemote
+// using server side move operations.
+//
+// Will only be called if src.Fs().Name() == f.Name()
+//
+// If it isn't possible then return fs.ErrorCantDirMove
+//
+// If destination exists then return fs.ErrorDirExists
+func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
+ srcFs, ok := src.(*Fs)
+ if !ok {
+ fs.Debugf(srcFs, "Can't move directory - not same remote type")
+ return fs.ErrorCantDirMove
+ }
+ srcPath := f.cleanPath(filepath.Join(srcFs.root, srcRemote))
+ dstPath := f.cleanPath(filepath.Join(f.root, dstRemote))
+
+ // Check if destination exists
+ _, err := os.Lstat(dstPath)
+ if !os.IsNotExist(err) {
+ return fs.ErrorDirExists
+ }
+
+ // Create parent of destination
+ dstParentPath := filepath.Dir(dstPath)
+ err = os.MkdirAll(dstParentPath, 0777)
+ if err != nil {
+ return err
+ }
+
+ // Do the move
+ err = os.Rename(srcPath, dstPath)
+ if os.IsNotExist(err) {
+ // race condition, source was deleted in the meantime
+ return err
+ } else if os.IsPermission(err) {
+ // not enough rights to write to dst
+ return err
+ } else if err != nil {
+ // not quite clear, but probably trying to move directory across file system
+ // boundaries. Copying might still work.
+ fs.Debugf(src, "Can't move dir: %v: trying copy", err)
+ return fs.ErrorCantDirMove
+ }
+ return nil
+}
+
+// Hashes returns the supported hash sets.
+func (f *Fs) Hashes() hash.Set {
+ return hash.Supported
+}
+
+// ------------------------------------------------------------
+
+// Fs returns the parent Fs
+func (o *Object) Fs() fs.Info {
+ return o.fs
+}
+
+// Return a string version
+func (o *Object) String() string {
+ if o == nil {
+ return ""
+ }
+ return o.remote
+}
+
+// Remote returns the remote path
+func (o *Object) Remote() string {
+ return o.remote
+}
+
+// Hash returns the requested hash of a file as a lowercase hex string
+func (o *Object) Hash(r hash.Type) (string, error) {
+ // Check that the underlying file hasn't changed
+ oldtime := o.modTime
+ oldsize := o.size
+ err := o.lstat()
+ if err != nil {
+ return "", errors.Wrap(err, "hash: failed to stat")
+ }
+
+ o.fs.objectHashesMu.Lock()
+ hashes := o.hashes
+ o.fs.objectHashesMu.Unlock()
+
+ if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
+ in, err := os.Open(o.path)
+ if err != nil {
+ return "", errors.Wrap(err, "hash: failed to open")
+ }
+ hashes, err = hash.Stream(in)
+ closeErr := in.Close()
+ if err != nil {
+ return "", errors.Wrap(err, "hash: failed to read")
+ }
+ if closeErr != nil {
+ return "", errors.Wrap(closeErr, "hash: failed to close")
+ }
+ o.fs.objectHashesMu.Lock()
+ o.hashes = hashes
+ o.fs.objectHashesMu.Unlock()
+ }
+ return hashes[r], nil
+}
+
+// Size returns the size of an object in bytes
+func (o *Object) Size() int64 {
+ return o.size
+}
+
+// ModTime returns the modification time of the object
+func (o *Object) ModTime() time.Time {
+ return o.modTime
+}
+
+// SetModTime sets the modification time of the local fs object
+func (o *Object) SetModTime(modTime time.Time) error {
+ err := os.Chtimes(o.path, modTime, modTime)
+ if err != nil {
+ return err
+ }
+ // Re-read metadata
+ return o.lstat()
+}
+
+// Storable returns a boolean showing if this object is storable
+func (o *Object) Storable() bool {
+ // Check for control characters in the remote name and show non storable
+ for _, c := range o.Remote() {
+ if c >= 0x00 && c < 0x20 || c == 0x7F {
+ fs.Logf(o.fs, "Can't store file with control characters: %q", o.Remote())
+ return false
+ }
+ }
+ mode := o.mode
+ if mode&os.ModeSymlink != 0 {
+ if !o.fs.opt.SkipSymlinks {
+ fs.Logf(o, "Can't follow symlink without -L/--copy-links")
+ }
+ return false
+ } else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
+ fs.Logf(o, "Can't transfer non file/directory")
+ return false
+ } else if mode&os.ModeDir != 0 {
+ // fs.Debugf(o, "Skipping directory")
+ return false
+ }
+ return true
+}
+
+// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
+// object that is read
+type localOpenFile struct {
+ o *Object // object that is open
+ in io.ReadCloser // handle we are wrapping
+ hash *hash.MultiHasher // currently accumulating hashes
+ fd *os.File // file object reference
+}
+
+// Read bytes from the object - see io.Reader
+func (file *localOpenFile) Read(p []byte) (n int, err error) {
+ if !file.o.fs.opt.NoCheckUpdated {
+ // Check if file has the same size and modTime
+ fi, err := file.fd.Stat()
+ if err != nil {
+ return 0, errors.Wrap(err, "can't read status of source file while transferring")
+ }
+ if file.o.size != fi.Size() {
+ return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
+ }
+ if !file.o.modTime.Equal(fi.ModTime()) {
+ return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
+ }
+ }
+
+ n, err = file.in.Read(p)
+ if n > 0 {
+ // Hash routines never return an error
+ _, _ = file.hash.Write(p[:n])
+ }
+ return
+}
+
+// Close the object and update the hashes
+func (file *localOpenFile) Close() (err error) {
+ err = file.in.Close()
+ if err == nil {
+ if file.hash.Size() == file.o.Size() {
+ file.o.fs.objectHashesMu.Lock()
+ file.o.hashes = file.hash.Sums()
+ file.o.fs.objectHashesMu.Unlock()
+ }
+ }
+ return err
+}
+
+// Open an object for read
+func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
+ var offset, limit int64 = 0, -1
+ hashes := hash.Supported
+ for _, option := range options {
+ switch x := option.(type) {
+ case *fs.SeekOption:
+ offset = x.Offset
+ case *fs.RangeOption:
+ offset, limit = x.Decode(o.size)
+ case *fs.HashesOption:
+ hashes = x.Hashes
+ default:
+ if option.Mandatory() {
+ fs.Logf(o, "Unsupported mandatory option: %v", option)
+ }
+ }
+ }
+
+ fd, err := os.Open(o.path)
+ if err != nil {
+ return
+ }
+ wrappedFd := readers.NewLimitedReadCloser(fd, limit)
+ if offset != 0 {
+ // seek the object
+ _, err = fd.Seek(offset, io.SeekStart)
+ // don't attempt to make checksums
+ return wrappedFd, err
+ }
+ hash, err := hash.NewMultiHasherTypes(hashes)
+ if err != nil {
+ return nil, err
+ }
+ // Update the md5sum as we go along
+ in = &localOpenFile{
+ o: o,
+ in: wrappedFd,
+ hash: hash,
+ fd: fd,
+ }
+ return in, nil
+}
+
+// mkdirAll makes all the directories needed to store the object
+func (o *Object) mkdirAll() error {
+ dir := filepath.Dir(o.path)
+ return os.MkdirAll(dir, 0777)
+}
+
+// Update the object from in with modTime and size
+func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
+ hashes := hash.Supported
+ for _, option := range options {
+ switch x := option.(type) {
+ case *fs.HashesOption:
+ hashes = x.Hashes
+ }
+ }
+
+ err := o.mkdirAll()
+ if err != nil {
+ return err
+ }
+
+ out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return err
+ }
+
+ // Calculate the hash of the object we are reading as we go along
+ hash, err := hash.NewMultiHasherTypes(hashes)
+ if err != nil {
+ return err
+ }
+ in = io.TeeReader(in, hash)
+
+ _, err = io.Copy(out, in)
+ closeErr := out.Close()
+ if err == nil {
+ err = closeErr
+ }
+ if err != nil {
+ fs.Logf(o, "Removing partially written file on error: %v", err)
+ if removeErr := os.Remove(o.path); removeErr != nil {
+ fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
+ }
+ return err
+ }
+
+ // All successful so update the hashes
+ o.fs.objectHashesMu.Lock()
+ o.hashes = hash.Sums()
+ o.fs.objectHashesMu.Unlock()
+
+ // Set the mtime
+ err = o.SetModTime(src.ModTime())
+ if err != nil {
+ return err
+ }
+
+ // ReRead info now that we have finished
+ return o.lstat()
+}
+
+// setMetadata sets the file info from the os.FileInfo passed in
+func (o *Object) setMetadata(info os.FileInfo) {
+ // Don't overwrite the info if we don't need to
+ // this avoids upsetting the race detector
+ if o.size != info.Size() {
+ o.size = info.Size()
+ }
+ if !o.modTime.Equal(info.ModTime()) {
+ o.modTime = info.ModTime()
+ }
+ if o.mode != info.Mode() {
+ o.mode = info.Mode()
+ }
+}
+
+// Stat a Object into info
+func (o *Object) lstat() error {
+ info, err := o.fs.lstat(o.path)
+ if err == nil {
+ o.setMetadata(info)
+ }
+ return err
+}
+
+// Remove an object
+func (o *Object) Remove() error {
+ return remove(o.path)
+}
+
+// cleanPathFragment cleans an OS path fragment which is part of a
+// bigger path and not necessarily absolute
+func cleanPathFragment(s string) string {
+ if s == "" {
+ return s
+ }
+ s = filepath.Clean(s)
+ if runtime.GOOS == "windows" {
+ s = strings.Replace(s, `/`, `\`, -1)
+ }
+ return s
+}
+
+// cleanPath cleans and makes absolute the path passed in and returns
+// an OS path.
+//
+// The input might be in OS form or rclone form or a mixture, but the
+// output is in OS form.
+//
+// On windows it makes the path UNC also and replaces any characters
+// Windows can't deal with with their replacements.
+func (f *Fs) cleanPath(s string) string {
+ s = cleanPathFragment(s)
+ if runtime.GOOS == "windows" {
+ if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
+ s2, err := filepath.Abs(s)
+ if err == nil {
+ s = s2
+ }
+ }
+ if !f.opt.NoUNC {
+ // Convert to UNC
+ s = uncPath(s)
+ }
+ s = cleanWindowsName(f, s)
+ } else {
+ if !filepath.IsAbs(s) {
+ s2, err := filepath.Abs(s)
+ if err == nil {
+ s = s2
+ }
+ }
+ }
+ return s
+}
+
+// Pattern to match a windows absolute path: "c:\" and similar
+var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
+
+// uncPath converts an absolute Windows path
+// to a UNC long path.
+func uncPath(s string) string {
+ // UNC can NOT use "/", so convert all to "\"
+ s = strings.Replace(s, `/`, `\`, -1)
+
+ // If prefix is "\\", we already have a UNC path or server.
+ if strings.HasPrefix(s, `\\`) {
+ // If already long path, just keep it
+ if strings.HasPrefix(s, `\\?\`) {
+ return s
+ }
+
+ // Trim "\\" from path and add UNC prefix.
+ return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
+ }
+ if isAbsWinDrive.MatchString(s) {
+ return `\\?\` + s
+ }
+ return s
+}
+
+// cleanWindowsName will clean invalid Windows characters replacing them with _
+func cleanWindowsName(f *Fs, name string) string {
+ original := name
+ var name2 string
+ if strings.HasPrefix(name, `\\?\`) {
+ name2 = `\\?\`
+ name = strings.TrimPrefix(name, `\\?\`)
+ }
+ if strings.HasPrefix(name, `//?/`) {
+ name2 = `//?/`
+ name = strings.TrimPrefix(name, `//?/`)
+ }
+ // Colon is allowed as part of a drive name X:\
+ colonAt := strings.Index(name, ":")
+ if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
+ // Copy to name2, which is unfiltered
+ name2 += name[0 : colonAt+1]
+ name = name[colonAt+1:]
+ }
+
+ name2 += strings.Map(func(r rune) rune {
+ switch r {
+ case '<', '>', '"', '|', '?', '*', ':':
+ return '_'
+ }
+ return r
+ }, name)
+
+ if name2 != original && f != nil {
+ f.wmu.Lock()
+ if _, ok := f.warned[name]; !ok {
+ fs.Logf(f, "Replacing invalid characters in %q to %q", name, name2)
+ f.warned[name] = struct{}{}
+ }
+ f.wmu.Unlock()
+ }
+ return name2
+}
+
+// Check the interfaces are satisfied
+var (
+ _ fs.Fs = &Fs{}
+ _ fs.Purger = &Fs{}
+ _ fs.PutStreamer = &Fs{}
+ _ fs.Mover = &Fs{}
+ _ fs.DirMover = &Fs{}
+ _ fs.Object = &Object{}
+)
diff --git a/vendor/github.com/ncw/rclone/backend/local/read_device_other.go b/vendor/github.com/ncw/rclone/backend/local/read_device_other.go
new file mode 100755
index 0000000..c3fc4f4
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/backend/local/read_device_other.go
@@ -0,0 +1,13 @@
+// Device reading functions
+
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
+
+package local
+
+import "os"
+
+// readDevice turns a valid os.FileInfo into a device number,
+// returning devUnset if it fails.
+func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
+ return devUnset
+}
diff --git a/vendor/github.com/ncw/rclone/backend/local/read_device_unix.go b/vendor/github.com/ncw/rclone/backend/local/read_device_unix.go
new file mode 100755
index 0000000..1b2b0c5
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/backend/local/read_device_unix.go
@@ -0,0 +1,26 @@
+// Device reading functions
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package local
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/ncw/rclone/fs"
+)
+
+// readDevice turns a valid os.FileInfo into a device number,
+// returning devUnset if it fails.
+func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
+ if !oneFileSystem {
+ return devUnset
+ }
+ statT, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys())
+ return devUnset
+ }
+ return uint64(statT.Dev)
+}
diff --git a/vendor/github.com/ncw/rclone/backend/local/remove_other.go b/vendor/github.com/ncw/rclone/backend/local/remove_other.go
new file mode 100755
index 0000000..760e2cf
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/backend/local/remove_other.go
@@ -0,0 +1,10 @@
+//+build !windows
+
+package local
+
+import "os"
+
+// Removes name, retrying on a sharing violation
+func remove(name string) error {
+ return os.Remove(name)
+}
diff --git a/vendor/github.com/ncw/rclone/backend/local/remove_windows.go b/vendor/github.com/ncw/rclone/backend/local/remove_windows.go
new file mode 100755
index 0000000..bf28b2c
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/backend/local/remove_windows.go
@@ -0,0 +1,38 @@
+//+build windows
+
+package local
+
+import (
+ "os"
+ "syscall"
+ "time"
+
+ "github.com/ncw/rclone/fs"
+)
+
+const (
+ ERROR_SHARING_VIOLATION syscall.Errno = 32
+)
+
+// Removes name, retrying on a sharing violation
+func remove(name string) (err error) {
+ const maxTries = 10
+ var sleepTime = 1 * time.Millisecond
+ for i := 0; i < maxTries; i++ {
+ err = os.Remove(name)
+ if err == nil {
+ break
+ }
+ pathErr, ok := err.(*os.PathError)
+ if !ok {
+ break
+ }
+ if pathErr.Err != ERROR_SHARING_VIOLATION {
+ break
+ }
+ fs.Logf(name, "Remove detected sharing violation - retry %d/%d sleeping %v", i+1, maxTries, sleepTime)
+ time.Sleep(sleepTime)
+ sleepTime <<= 1
+ }
+ return err
+}
diff --git a/vendor/github.com/ncw/rclone/backend/onedrive/quickxorhash/quickxorhash.go b/vendor/github.com/ncw/rclone/backend/onedrive/quickxorhash/quickxorhash.go
new file mode 100755
index 0000000..b7976b7
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/backend/onedrive/quickxorhash/quickxorhash.go
@@ -0,0 +1,202 @@
+// Package quickxorhash provides the quickXorHash algorithm which is a
+// quick, simple non-cryptographic hash algorithm that works by XORing
+// the bytes in a circular-shifting fashion.
+//
+// It is used by Microsoft Onedrive for Business to hash data.
+//
+// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
+package quickxorhash
+
+// This code was ported from the code snippet linked from
+// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
+// Which has the copyright
+
+// ------------------------------------------------------------------------------
+// Copyright (c) 2016 Microsoft Corporation
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+// ------------------------------------------------------------------------------
+
+import (
+ "hash"
+)
+
+const (
+ // BlockSize is the preferred size for hashing
+ BlockSize = 64
+ // Size of the output checksum
+ Size = 20
+ bitsInLastCell = 32
+ shift = 11
+ widthInBits = 8 * Size
+ dataSize = (widthInBits-1)/64 + 1
+)
+
+type quickXorHash struct {
+ data [dataSize]uint64
+ lengthSoFar uint64
+ shiftSoFar int
+}
+
+// New returns a new hash.Hash computing the quickXorHash checksum.
+func New() hash.Hash {
+ return &quickXorHash{}
+}
+
+// Write (via the embedded io.Writer interface) adds more data to the running hash.
+// It never returns an error.
+//
+// Write writes len(p) bytes from p to the underlying data stream. It returns
+// the number of bytes written from p (0 <= n <= len(p)) and any error
+// encountered that caused the write to stop early. Write must return a non-nil
+// error if it returns n < len(p). Write must not modify the slice data, even
+// temporarily.
+//
+// Implementations must not retain p.
+func (q *quickXorHash) Write(p []byte) (n int, err error) {
+ currentshift := q.shiftSoFar
+
+ // The bitvector where we'll start xoring
+ vectorArrayIndex := currentshift / 64
+
+ // The position within the bit vector at which we begin xoring
+ vectorOffset := currentshift % 64
+ iterations := len(p)
+ if iterations > widthInBits {
+ iterations = widthInBits
+ }
+
+ for i := 0; i < iterations; i++ {
+ isLastCell := vectorArrayIndex == len(q.data)-1
+ var bitsInVectorCell int
+ if isLastCell {
+ bitsInVectorCell = bitsInLastCell
+ } else {
+ bitsInVectorCell = 64
+ }
+
+ // There's at least 2 bitvectors before we reach the end of the array
+ if vectorOffset <= bitsInVectorCell-8 {
+ for j := i; j < len(p); j += widthInBits {
+ q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
+ }
+ } else {
+ index1 := vectorArrayIndex
+ var index2 int
+ if isLastCell {
+ index2 = 0
+ } else {
+ index2 = vectorArrayIndex + 1
+ }
+ low := byte(bitsInVectorCell - vectorOffset)
+
+ xoredByte := byte(0)
+ for j := i; j < len(p); j += widthInBits {
+ xoredByte ^= p[j]
+ }
+ q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
+ q.data[index2] ^= uint64(xoredByte) >> low
+ }
+ vectorOffset += shift
+ for vectorOffset >= bitsInVectorCell {
+ if isLastCell {
+ vectorArrayIndex = 0
+ } else {
+ vectorArrayIndex = vectorArrayIndex + 1
+ }
+ vectorOffset -= bitsInVectorCell
+ }
+ }
+
+ // Update the starting position in a circular shift pattern
+ q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
+
+ q.lengthSoFar += uint64(len(p))
+
+ return len(p), nil
+}
+
+// Calculate the current checksum
+func (q *quickXorHash) checkSum() (h [Size]byte) {
+ // Output the data as little endian bytes
+ ph := 0
+ for _, d := range q.data[:len(q.data)-1] {
+ _ = h[ph+7] // bounds check
+ h[ph+0] = byte(d >> (8 * 0))
+ h[ph+1] = byte(d >> (8 * 1))
+ h[ph+2] = byte(d >> (8 * 2))
+ h[ph+3] = byte(d >> (8 * 3))
+ h[ph+4] = byte(d >> (8 * 4))
+ h[ph+5] = byte(d >> (8 * 5))
+ h[ph+6] = byte(d >> (8 * 6))
+ h[ph+7] = byte(d >> (8 * 7))
+ ph += 8
+ }
+ // remaining 32 bits
+ d := q.data[len(q.data)-1]
+ h[Size-4] = byte(d >> (8 * 0))
+ h[Size-3] = byte(d >> (8 * 1))
+ h[Size-2] = byte(d >> (8 * 2))
+ h[Size-1] = byte(d >> (8 * 3))
+
+ // XOR the file length with the least significant bits in little endian format
+ d = q.lengthSoFar
+ h[Size-8] ^= byte(d >> (8 * 0))
+ h[Size-7] ^= byte(d >> (8 * 1))
+ h[Size-6] ^= byte(d >> (8 * 2))
+ h[Size-5] ^= byte(d >> (8 * 3))
+ h[Size-4] ^= byte(d >> (8 * 4))
+ h[Size-3] ^= byte(d >> (8 * 5))
+ h[Size-2] ^= byte(d >> (8 * 6))
+ h[Size-1] ^= byte(d >> (8 * 7))
+
+ return h
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (q *quickXorHash) Sum(b []byte) []byte {
+ hash := q.checkSum()
+ return append(b, hash[:]...)
+}
+
+// Reset resets the Hash to its initial state.
+func (q *quickXorHash) Reset() {
+ *q = quickXorHash{}
+}
+
+// Size returns the number of bytes Sum will return.
+func (q *quickXorHash) Size() int {
+ return Size
+}
+
+// BlockSize returns the hash's underlying block size.
+// The Write method must be able to accept any amount
+// of data, but it may operate more efficiently if all writes
+// are a multiple of the block size.
+func (q *quickXorHash) BlockSize() int {
+ return BlockSize
+}
+
+// Sum returns the quickXorHash checksum of the data.
+func Sum(data []byte) [Size]byte {
+ var d quickXorHash
+ _, _ = d.Write(data)
+ return d.checkSum()
+}
diff --git a/vendor/github.com/ncw/rclone/backend/s3/s3.go b/vendor/github.com/ncw/rclone/backend/s3/s3.go
new file mode 100755
index 0000000..2679f71
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/backend/s3/s3.go
@@ -0,0 +1,1493 @@
+// Package s3 provides an interface to Amazon S3 oject storage
+package s3
+
+// FIXME need to prevent anything but ListDir working for s3://
+
+/*
+Progress of port to aws-sdk
+
+ * Don't really need o.meta at all?
+
+What happens if you CTRL-C a multipart upload
+ * get an incomplete upload
+ * disappears when you delete the bucket
+*/
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "net/http"
+ "path"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/aws/aws-sdk-go/service/s3/s3manager"
+ "github.com/ncw/rclone/fs"
+ "github.com/ncw/rclone/fs/config/configmap"
+ "github.com/ncw/rclone/fs/config/configstruct"
+ "github.com/ncw/rclone/fs/fshttp"
+ "github.com/ncw/rclone/fs/hash"
+ "github.com/ncw/rclone/fs/walk"
+ "github.com/ncw/rclone/lib/rest"
+ "github.com/ncw/swift"
+ "github.com/pkg/errors"
+)
+
+// Register with Fs
+func init() {
+ fs.Register(&fs.RegInfo{
+ Name: "s3",
+ Description: "Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)",
+ NewFs: NewFs,
+ Options: []fs.Option{{
+ Name: fs.ConfigProvider,
+ Help: "Choose your S3 provider.",
+ Examples: []fs.OptionExample{{
+ Value: "AWS",
+ Help: "Amazon Web Services (AWS) S3",
+ }, {
+ Value: "Ceph",
+ Help: "Ceph Object Storage",
+ }, {
+ Value: "DigitalOcean",
+ Help: "Digital Ocean Spaces",
+ }, {
+ Value: "Dreamhost",
+ Help: "Dreamhost DreamObjects",
+ }, {
+ Value: "IBMCOS",
+ Help: "IBM COS S3",
+ }, {
+ Value: "Minio",
+ Help: "Minio Object Storage",
+ }, {
+ Value: "Wasabi",
+ Help: "Wasabi Object Storage",
+ }, {
+ Value: "Other",
+ Help: "Any other S3 compatible provider",
+ }},
+ }, {
+ Name: "env_auth",
+ Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.",
+ Default: false,
+ Examples: []fs.OptionExample{{
+ Value: "false",
+ Help: "Enter AWS credentials in the next step",
+ }, {
+ Value: "true",
+ Help: "Get AWS credentials from the environment (env vars or IAM)",
+ }},
+ }, {
+ Name: "access_key_id",
+ Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.",
+ }, {
+ Name: "secret_access_key",
+ Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
+ }, {
+ Name: "region",
+ Help: "Region to connect to.",
+ Provider: "AWS",
+ Examples: []fs.OptionExample{{
+ Value: "us-east-1",
+ Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
+ }, {
+ Value: "us-east-2",
+ Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
+ }, {
+ Value: "us-west-2",
+ Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
+ }, {
+ Value: "us-west-1",
+ Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
+ }, {
+ Value: "ca-central-1",
+ Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
+ }, {
+ Value: "eu-west-1",
+ Help: "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1.",
+ }, {
+ Value: "eu-west-2",
+ Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
+ }, {
+ Value: "eu-central-1",
+ Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
+ }, {
+ Value: "ap-southeast-1",
+ Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
+ }, {
+ Value: "ap-southeast-2",
+ Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
+ }, {
+ Value: "ap-northeast-1",
+ Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
+ }, {
+ Value: "ap-northeast-2",
+ Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
+ }, {
+ Value: "ap-south-1",
+ Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
+ }, {
+ Value: "sa-east-1",
+ Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
+ }},
+ }, {
+ Name: "region",
+ Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
+ Provider: "!AWS",
+ Examples: []fs.OptionExample{{
+ Value: "",
+ Help: "Use this if unsure. Will use v4 signatures and an empty region.",
+ }, {
+ Value: "other-v2-signature",
+ Help: "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.",
+ }},
+ }, {
+ Name: "endpoint",
+ Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.",
+ Provider: "AWS",
+ }, {
+ Name: "endpoint",
+ Help: "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.",
+ Provider: "IBMCOS",
+ Examples: []fs.OptionExample{{
+ Value: "s3-api.us-geo.objectstorage.softlayer.net",
+ Help: "US Cross Region Endpoint",
+ }, {
+ Value: "s3-api.dal.us-geo.objectstorage.softlayer.net",
+ Help: "US Cross Region Dallas Endpoint",
+ }, {
+ Value: "s3-api.wdc-us-geo.objectstorage.softlayer.net",
+ Help: "US Cross Region Washington DC Endpoint",
+ }, {
+ Value: "s3-api.sjc-us-geo.objectstorage.softlayer.net",
+ Help: "US Cross Region San Jose Endpoint",
+ }, {
+ Value: "s3-api.us-geo.objectstorage.service.networklayer.com",
+ Help: "US Cross Region Private Endpoint",
+ }, {
+ Value: "s3-api.dal-us-geo.objectstorage.service.networklayer.com",
+ Help: "US Cross Region Dallas Private Endpoint",
+ }, {
+ Value: "s3-api.wdc-us-geo.objectstorage.service.networklayer.com",
+ Help: "US Cross Region Washington DC Private Endpoint",
+ }, {
+ Value: "s3-api.sjc-us-geo.objectstorage.service.networklayer.com",
+ Help: "US Cross Region San Jose Private Endpoint",
+ }, {
+ Value: "s3.us-east.objectstorage.softlayer.net",
+ Help: "US Region East Endpoint",
+ }, {
+ Value: "s3.us-east.objectstorage.service.networklayer.com",
+ Help: "US Region East Private Endpoint",
+ }, {
+ Value: "s3.us-south.objectstorage.softlayer.net",
+ Help: "US Region South Endpoint",
+ }, {
+ Value: "s3.us-south.objectstorage.service.networklayer.com",
+ Help: "US Region South Private Endpoint",
+ }, {
+ Value: "s3.eu-geo.objectstorage.softlayer.net",
+ Help: "EU Cross Region Endpoint",
+ }, {
+ Value: "s3.fra-eu-geo.objectstorage.softlayer.net",
+ Help: "EU Cross Region Frankfurt Endpoint",
+ }, {
+ Value: "s3.mil-eu-geo.objectstorage.softlayer.net",
+ Help: "EU Cross Region Milan Endpoint",
+ }, {
+ Value: "s3.ams-eu-geo.objectstorage.softlayer.net",
+ Help: "EU Cross Region Amsterdam Endpoint",
+ }, {
+ Value: "s3.eu-geo.objectstorage.service.networklayer.com",
+ Help: "EU Cross Region Private Endpoint",
+ }, {
+ Value: "s3.fra-eu-geo.objectstorage.service.networklayer.com",
+ Help: "EU Cross Region Frankfurt Private Endpoint",
+ }, {
+ Value: "s3.mil-eu-geo.objectstorage.service.networklayer.com",
+ Help: "EU Cross Region Milan Private Endpoint",
+ }, {
+ Value: "s3.ams-eu-geo.objectstorage.service.networklayer.com",
+ Help: "EU Cross Region Amsterdam Private Endpoint",
+ }, {
+ Value: "s3.eu-gb.objectstorage.softlayer.net",
+ Help: "Great Britan Endpoint",
+ }, {
+ Value: "s3.eu-gb.objectstorage.service.networklayer.com",
+ Help: "Great Britan Private Endpoint",
+ }, {
+ Value: "s3.ap-geo.objectstorage.softlayer.net",
+ Help: "APAC Cross Regional Endpoint",
+ }, {
+ Value: "s3.tok-ap-geo.objectstorage.softlayer.net",
+ Help: "APAC Cross Regional Tokyo Endpoint",
+ }, {
+ Value: "s3.hkg-ap-geo.objectstorage.softlayer.net",
+ Help: "APAC Cross Regional HongKong Endpoint",
+ }, {
+ Value: "s3.seo-ap-geo.objectstorage.softlayer.net",
+ Help: "APAC Cross Regional Seoul Endpoint",
+ }, {
+ Value: "s3.ap-geo.objectstorage.service.networklayer.com",
+ Help: "APAC Cross Regional Private Endpoint",
+ }, {
+ Value: "s3.tok-ap-geo.objectstorage.service.networklayer.com",
+ Help: "APAC Cross Regional Tokyo Private Endpoint",
+ }, {
+ Value: "s3.hkg-ap-geo.objectstorage.service.networklayer.com",
+ Help: "APAC Cross Regional HongKong Private Endpoint",
+ }, {
+ Value: "s3.seo-ap-geo.objectstorage.service.networklayer.com",
+ Help: "APAC Cross Regional Seoul Private Endpoint",
+ }, {
+ Value: "s3.mel01.objectstorage.softlayer.net",
+ Help: "Melbourne Single Site Endpoint",
+ }, {
+ Value: "s3.mel01.objectstorage.service.networklayer.com",
+ Help: "Melbourne Single Site Private Endpoint",
+ }, {
+ Value: "s3.tor01.objectstorage.softlayer.net",
+ Help: "Toronto Single Site Endpoint",
+ }, {
+ Value: "s3.tor01.objectstorage.service.networklayer.com",
+ Help: "Toronto Single Site Private Endpoint",
+ }},
+ }, {
+ Name: "endpoint",
+ Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
+ Provider: "!AWS,IBMCOS",
+ Examples: []fs.OptionExample{{
+ Value: "objects-us-west-1.dream.io",
+ Help: "Dream Objects endpoint",
+ Provider: "Dreamhost",
+ }, {
+ Value: "nyc3.digitaloceanspaces.com",
+ Help: "Digital Ocean Spaces New York 3",
+ Provider: "DigitalOcean",
+ }, {
+ Value: "ams3.digitaloceanspaces.com",
+ Help: "Digital Ocean Spaces Amsterdam 3",
+ Provider: "DigitalOcean",
+ }, {
+ Value: "sgp1.digitaloceanspaces.com",
+ Help: "Digital Ocean Spaces Singapore 1",
+ Provider: "DigitalOcean",
+ }, {
+ Value: "s3.wasabisys.com",
+ Help: "Wasabi Object Storage",
+ Provider: "Wasabi",
+ }},
+ }, {
+ Name: "location_constraint",
+ Help: "Location constraint - must be set to match the Region.\nUsed when creating buckets only.",
+ Provider: "AWS",
+ Examples: []fs.OptionExample{{
+ Value: "",
+ Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
+ }, {
+ Value: "us-east-2",
+ Help: "US East (Ohio) Region.",
+ }, {
+ Value: "us-west-2",
+ Help: "US West (Oregon) Region.",
+ }, {
+ Value: "us-west-1",
+ Help: "US West (Northern California) Region.",
+ }, {
+ Value: "ca-central-1",
+ Help: "Canada (Central) Region.",
+ }, {
+ Value: "eu-west-1",
+ Help: "EU (Ireland) Region.",
+ }, {
+ Value: "eu-west-2",
+ Help: "EU (London) Region.",
+ }, {
+ Value: "EU",
+ Help: "EU Region.",
+ }, {
+ Value: "ap-southeast-1",
+ Help: "Asia Pacific (Singapore) Region.",
+ }, {
+ Value: "ap-southeast-2",
+ Help: "Asia Pacific (Sydney) Region.",
+ }, {
+ Value: "ap-northeast-1",
+ Help: "Asia Pacific (Tokyo) Region.",
+ }, {
+ Value: "ap-northeast-2",
+ Help: "Asia Pacific (Seoul)",
+ }, {
+ Value: "ap-south-1",
+ Help: "Asia Pacific (Mumbai)",
+ }, {
+ Value: "sa-east-1",
+ Help: "South America (Sao Paulo) Region.",
+ }},
+ }, {
+ Name: "location_constraint",
+ Help: "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter",
+ Provider: "IBMCOS",
+ Examples: []fs.OptionExample{{
+ Value: "us-standard",
+ Help: "US Cross Region Standard",
+ }, {
+ Value: "us-vault",
+ Help: "US Cross Region Vault",
+ }, {
+ Value: "us-cold",
+ Help: "US Cross Region Cold",
+ }, {
+ Value: "us-flex",
+ Help: "US Cross Region Flex",
+ }, {
+ Value: "us-east-standard",
+ Help: "US East Region Standard",
+ }, {
+ Value: "us-east-vault",
+ Help: "US East Region Vault",
+ }, {
+ Value: "us-east-cold",
+ Help: "US East Region Cold",
+ }, {
+ Value: "us-east-flex",
+ Help: "US East Region Flex",
+ }, {
+ Value: "us-south-standard",
+ Help: "US Sout hRegion Standard",
+ }, {
+ Value: "us-south-vault",
+ Help: "US South Region Vault",
+ }, {
+ Value: "us-south-cold",
+ Help: "US South Region Cold",
+ }, {
+ Value: "us-south-flex",
+ Help: "US South Region Flex",
+ }, {
+ Value: "eu-standard",
+ Help: "EU Cross Region Standard",
+ }, {
+ Value: "eu-vault",
+ Help: "EU Cross Region Vault",
+ }, {
+ Value: "eu-cold",
+ Help: "EU Cross Region Cold",
+ }, {
+ Value: "eu-flex",
+ Help: "EU Cross Region Flex",
+ }, {
+ Value: "eu-gb-standard",
+ Help: "Great Britan Standard",
+ }, {
+ Value: "eu-gb-vault",
+ Help: "Great Britan Vault",
+ }, {
+ Value: "eu-gb-cold",
+ Help: "Great Britan Cold",
+ }, {
+ Value: "eu-gb-flex",
+ Help: "Great Britan Flex",
+ }, {
+ Value: "ap-standard",
+ Help: "APAC Standard",
+ }, {
+ Value: "ap-vault",
+ Help: "APAC Vault",
+ }, {
+ Value: "ap-cold",
+ Help: "APAC Cold",
+ }, {
+ Value: "ap-flex",
+ Help: "APAC Flex",
+ }, {
+ Value: "mel01-standard",
+ Help: "Melbourne Standard",
+ }, {
+ Value: "mel01-vault",
+ Help: "Melbourne Vault",
+ }, {
+ Value: "mel01-cold",
+ Help: "Melbourne Cold",
+ }, {
+ Value: "mel01-flex",
+ Help: "Melbourne Flex",
+ }, {
+ Value: "tor01-standard",
+ Help: "Toronto Standard",
+ }, {
+ Value: "tor01-vault",
+ Help: "Toronto Vault",
+ }, {
+ Value: "tor01-cold",
+ Help: "Toronto Cold",
+ }, {
+ Value: "tor01-flex",
+ Help: "Toronto Flex",
+ }},
+ }, {
+ Name: "location_constraint",
+ Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
+ Provider: "!AWS,IBMCOS",
+ }, {
+ Name: "acl",
+ Help: "Canned ACL used when creating buckets and/or storing objects in S3.\nFor more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl",
+ Examples: []fs.OptionExample{{
+ Value: "private",
+ Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
+ Provider: "!IBMCOS",
+ }, {
+ Value: "public-read",
+ Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
+ Provider: "!IBMCOS",
+ }, {
+ Value: "public-read-write",
+ Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
+ Provider: "!IBMCOS",
+ }, {
+ Value: "authenticated-read",
+ Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
+ Provider: "!IBMCOS",
+ }, {
+ Value: "bucket-owner-read",
+ Help: "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
+ Provider: "!IBMCOS",
+ }, {
+ Value: "bucket-owner-full-control",
+ Help: "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
+ Provider: "!IBMCOS",
+ }, {
+ Value: "private",
+ Help: "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS",
+ Provider: "IBMCOS",
+ }, {
+ Value: "public-read",
+ Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS",
+ Provider: "IBMCOS",
+ }, {
+ Value: "public-read-write",
+ Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS",
+ Provider: "IBMCOS",
+ }, {
+ Value: "authenticated-read",
+ Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
+ Provider: "IBMCOS",
+ }},
+ }, {
+ Name: "server_side_encryption",
+ Help: "The server-side encryption algorithm used when storing this object in S3.",
+ Provider: "AWS",
+ Examples: []fs.OptionExample{{
+ Value: "",
+ Help: "None",
+ }, {
+ Value: "AES256",
+ Help: "AES256",
+ }, {
+ Value: "aws:kms",
+ Help: "aws:kms",
+ }},
+ }, {
+ Name: "sse_kms_key_id",
+ Help: "If using KMS ID you must provide the ARN of Key.",
+ Provider: "AWS",
+ Examples: []fs.OptionExample{{
+ Value: "",
+ Help: "None",
+ }, {
+ Value: "arn:aws:kms:us-east-1:*",
+ Help: "arn:aws:kms:*",
+ }},
+ }, {
+ Name: "storage_class",
+ Help: "The storage class to use when storing objects in S3.",
+ Provider: "AWS",
+ Examples: []fs.OptionExample{{
+ Value: "",
+ Help: "Default",
+ }, {
+ Value: "STANDARD",
+ Help: "Standard storage class",
+ }, {
+ Value: "REDUCED_REDUNDANCY",
+ Help: "Reduced redundancy storage class",
+ }, {
+ Value: "STANDARD_IA",
+ Help: "Standard Infrequent Access storage class",
+ }, {
+ Value: "ONEZONE_IA",
+ Help: "One Zone Infrequent Access storage class",
+ }},
+ }, {
+ Name: "chunk_size",
+ Help: "Chunk size to use for uploading",
+ Default: fs.SizeSuffix(s3manager.MinUploadPartSize),
+ Advanced: true,
+ }, {
+ Name: "disable_checksum",
+ Help: "Don't store MD5 checksum with object metadata",
+ Default: false,
+ Advanced: true,
+ }, {
+ Name: "session_token",
+ Help: "An AWS session token",
+ Hide: fs.OptionHideBoth,
+ Advanced: true,
+ }, {
+ Name: "upload_concurrency",
+ Help: "Concurrency for multipart uploads.",
+ Default: 2,
+ Advanced: true,
+ }, {
+ Name: "force_path_style",
+ Help: "If true use path style access if false use virtual hosted style.\nSome providers (eg Aliyun OSS or Netease COS) require this.",
+ Default: true,
+ Advanced: true,
+ }},
+ })
+}
+
+// Constants
+const (
+ metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
+ metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
+ listChunkSize = 1000 // number of items to read at once
+ maxRetries = 10 // number of retries to make of operations
+ maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
+ maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
+)
+
+// Options defines the configuration for this backend
+type Options struct {
+ Provider string `config:"provider"`
+ EnvAuth bool `config:"env_auth"`
+ AccessKeyID string `config:"access_key_id"`
+ SecretAccessKey string `config:"secret_access_key"`
+ Region string `config:"region"`
+ Endpoint string `config:"endpoint"`
+ LocationConstraint string `config:"location_constraint"`
+ ACL string `config:"acl"`
+ ServerSideEncryption string `config:"server_side_encryption"`
+ SSEKMSKeyID string `config:"sse_kms_key_id"`
+ StorageClass string `config:"storage_class"`
+ ChunkSize fs.SizeSuffix `config:"chunk_size"`
+ DisableChecksum bool `config:"disable_checksum"`
+ SessionToken string `config:"session_token"`
+ UploadConcurrency int `config:"upload_concurrency"`
+ ForcePathStyle bool `config:"force_path_style"`
+}
+
+// Fs represents a remote s3 server
+type Fs struct {
+ name string // the name of the remote
+ root string // root of the bucket - ignore all objects above this
+ opt Options // parsed options
+ features *fs.Features // optional features
+ c *s3.S3 // the connection to the s3 server
+ ses *session.Session // the s3 session
+ bucket string // the bucket we are working on
+ bucketOKMu sync.Mutex // mutex to protect bucket OK
+ bucketOK bool // true if we have created the bucket
+ bucketDeleted bool // true if we have deleted the bucket
+}
+
+// Object describes a s3 object
+type Object struct {
+ // Will definitely have everything but meta which may be nil
+ //
+ // List will read everything but meta & mimeType - to fill
+ // that in you need to call readMetaData
+ fs *Fs // what this object is part of
+ remote string // The remote path
+ etag string // md5sum of the object
+ bytes int64 // size of the object
+ lastModified time.Time // Last modified
+ meta map[string]*string // The object metadata if known - may be nil
+ mimeType string // MimeType of object - may be ""
+}
+
+// ------------------------------------------------------------
+
+// Name of the remote (as passed into NewFs)
+func (f *Fs) Name() string {
+ return f.name
+}
+
+// Root of the remote (as passed into NewFs)
+func (f *Fs) Root() string {
+ if f.root == "" {
+ return f.bucket
+ }
+ return f.bucket + "/" + f.root
+}
+
+// String converts this Fs to a string
+func (f *Fs) String() string {
+ if f.root == "" {
+ return fmt.Sprintf("S3 bucket %s", f.bucket)
+ }
+ return fmt.Sprintf("S3 bucket %s path %s", f.bucket, f.root)
+}
+
+// Features returns the optional features of this Fs
+func (f *Fs) Features() *fs.Features {
+ return f.features
+}
+
+// Pattern to match a s3 path
+var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
+
+// parseParse parses a s3 'url'
+func s3ParsePath(path string) (bucket, directory string, err error) {
+ parts := matcher.FindStringSubmatch(path)
+ if parts == nil {
+ err = errors.Errorf("couldn't parse bucket out of s3 path %q", path)
+ } else {
+ bucket, directory = parts[1], parts[2]
+ directory = strings.Trim(directory, "/")
+ }
+ return
+}
+
+// s3Connection makes a connection to s3
+func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
+ // Make the auth
+ v := credentials.Value{
+ AccessKeyID: opt.AccessKeyID,
+ SecretAccessKey: opt.SecretAccessKey,
+ SessionToken: opt.SessionToken,
+ }
+
+ lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
+ def := defaults.Get()
+ def.Config.HTTPClient = lowTimeoutClient
+
+ // first provider to supply a credential set "wins"
+ providers := []credentials.Provider{
+ // use static credentials if they're present (checked by provider)
+ &credentials.StaticProvider{Value: v},
+
+ // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
+ // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
+ &credentials.EnvProvider{},
+
+ // A SharedCredentialsProvider retrieves credentials
+ // from the current user's home directory. It checks
+ // AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too.
+ &credentials.SharedCredentialsProvider{},
+
+ // Pick up IAM role if we're in an ECS task
+ defaults.RemoteCredProvider(*def.Config, def.Handlers),
+
+ // Pick up IAM role in case we're on EC2
+ &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.New(session.New(), &aws.Config{
+ HTTPClient: lowTimeoutClient,
+ }),
+ ExpiryWindow: 3,
+ },
+ }
+ cred := credentials.NewChainCredentials(providers)
+
+ switch {
+ case opt.EnvAuth:
+ // No need for empty checks if "env_auth" is true
+ case v.AccessKeyID == "" && v.SecretAccessKey == "":
+ // if no access key/secret and iam is explicitly disabled then fall back to anon interaction
+ cred = credentials.AnonymousCredentials
+ case v.AccessKeyID == "":
+ return nil, nil, errors.New("access_key_id not found")
+ case v.SecretAccessKey == "":
+ return nil, nil, errors.New("secret_access_key not found")
+ }
+
+ if opt.Region == "" && opt.Endpoint == "" {
+ opt.Endpoint = "https://s3.amazonaws.com/"
+ }
+ if opt.Region == "" {
+ opt.Region = "us-east-1"
+ }
+ awsConfig := aws.NewConfig().
+ WithRegion(opt.Region).
+ WithMaxRetries(maxRetries).
+ WithCredentials(cred).
+ WithEndpoint(opt.Endpoint).
+ WithHTTPClient(fshttp.NewClient(fs.Config)).
+ WithS3ForcePathStyle(opt.ForcePathStyle)
+ // awsConfig.WithLogLevel(aws.LogDebugWithSigning)
+ ses := session.New()
+ c := s3.New(ses, awsConfig)
+ if opt.Region == "other-v2-signature" {
+ fs.Debugf(nil, "Using v2 auth")
+ signer := func(req *request.Request) {
+ // Ignore AnonymousCredentials object
+ if req.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+ sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
+ }
+ c.Handlers.Sign.Clear()
+ c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
+ c.Handlers.Sign.PushBack(signer)
+ }
+ return c, ses, nil
+}
+
+// NewFs constructs an Fs from the path, bucket:path
+func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
+ // Parse config into Options struct
+ opt := new(Options)
+ err := configstruct.Set(m, opt)
+ if err != nil {
+ return nil, err
+ }
+ if opt.ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) {
+ return nil, errors.Errorf("s3 chunk size (%v) must be >= %v", opt.ChunkSize, fs.SizeSuffix(s3manager.MinUploadPartSize))
+ }
+ bucket, directory, err := s3ParsePath(root)
+ if err != nil {
+ return nil, err
+ }
+ c, ses, err := s3Connection(opt)
+ if err != nil {
+ return nil, err
+ }
+ f := &Fs{
+ name: name,
+ root: directory,
+ opt: *opt,
+ c: c,
+ bucket: bucket,
+ ses: ses,
+ }
+ f.features = (&fs.Features{
+ ReadMimeType: true,
+ WriteMimeType: true,
+ BucketBased: true,
+ }).Fill(f)
+ if f.root != "" {
+ f.root += "/"
+ // Check to see if the object exists
+ req := s3.HeadObjectInput{
+ Bucket: &f.bucket,
+ Key: &directory,
+ }
+ _, err = f.c.HeadObject(&req)
+ if err == nil {
+ f.root = path.Dir(directory)
+ if f.root == "." {
+ f.root = ""
+ } else {
+ f.root += "/"
+ }
+ // return an error with an fs which points to the parent
+ return f, fs.ErrorIsFile
+ }
+ }
+ // f.listMultipartUploads()
+ return f, nil
+}
+
+// Return an Object from a path
+//
+//If it can't be found it returns the error ErrorObjectNotFound.
+func (f *Fs) newObjectWithInfo(remote string, info *s3.Object) (fs.Object, error) {
+ o := &Object{
+ fs: f,
+ remote: remote,
+ }
+ if info != nil {
+ // Set info but not meta
+ if info.LastModified == nil {
+ fs.Logf(o, "Failed to read last modified")
+ o.lastModified = time.Now()
+ } else {
+ o.lastModified = *info.LastModified
+ }
+ o.etag = aws.StringValue(info.ETag)
+ o.bytes = aws.Int64Value(info.Size)
+ } else {
+ err := o.readMetaData() // reads info and meta, returning an error
+ if err != nil {
+ return nil, err
+ }
+ }
+ return o, nil
+}
+
+// NewObject finds the Object at remote. If it can't be found
+// it returns the error fs.ErrorObjectNotFound.
+func (f *Fs) NewObject(remote string) (fs.Object, error) {
+ return f.newObjectWithInfo(remote, nil)
+}
+
+// listFn is called from list to handle an object.
+type listFn func(remote string, object *s3.Object, isDirectory bool) error
+
+// list the objects into the function supplied
+//
+// dir is the starting directory, "" for root
+//
+// Set recurse to read sub directories
+func (f *Fs) list(dir string, recurse bool, fn listFn) error {
+ root := f.root
+ if dir != "" {
+ root += dir + "/"
+ }
+ maxKeys := int64(listChunkSize)
+ delimiter := ""
+ if !recurse {
+ delimiter = "/"
+ }
+ var marker *string
+ for {
+ // FIXME need to implement ALL loop
+ req := s3.ListObjectsInput{
+ Bucket: &f.bucket,
+ Delimiter: &delimiter,
+ Prefix: &root,
+ MaxKeys: &maxKeys,
+ Marker: marker,
+ }
+ resp, err := f.c.ListObjects(&req)
+ if err != nil {
+ if awsErr, ok := err.(awserr.RequestFailure); ok {
+ if awsErr.StatusCode() == http.StatusNotFound {
+ err = fs.ErrorDirNotFound
+ }
+ }
+ return err
+ }
+ rootLength := len(f.root)
+ if !recurse {
+ for _, commonPrefix := range resp.CommonPrefixes {
+ if commonPrefix.Prefix == nil {
+ fs.Logf(f, "Nil common prefix received")
+ continue
+ }
+ remote := *commonPrefix.Prefix
+ if !strings.HasPrefix(remote, f.root) {
+ fs.Logf(f, "Odd name received %q", remote)
+ continue
+ }
+ remote = remote[rootLength:]
+ if strings.HasSuffix(remote, "/") {
+ remote = remote[:len(remote)-1]
+ }
+ err = fn(remote, &s3.Object{Key: &remote}, true)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ for _, object := range resp.Contents {
+ key := aws.StringValue(object.Key)
+ if !strings.HasPrefix(key, f.root) {
+ fs.Logf(f, "Odd name received %q", key)
+ continue
+ }
+ remote := key[rootLength:]
+ // is this a directory marker?
+ if (strings.HasSuffix(remote, "/") || remote == "") && *object.Size == 0 {
+ if recurse && remote != "" {
+ // add a directory in if --fast-list since will have no prefixes
+ remote = remote[:len(remote)-1]
+ err = fn(remote, &s3.Object{Key: &remote}, true)
+ if err != nil {
+ return err
+ }
+ }
+ continue // skip directory marker
+ }
+ err = fn(remote, object, false)
+ if err != nil {
+ return err
+ }
+ }
+ if !aws.BoolValue(resp.IsTruncated) {
+ break
+ }
+ // Use NextMarker if set, otherwise use last Key
+ if resp.NextMarker == nil || *resp.NextMarker == "" {
+ if len(resp.Contents) == 0 {
+ return errors.New("s3 protocol error: received listing with IsTruncated set, no NextMarker and no Contents")
+ }
+ marker = resp.Contents[len(resp.Contents)-1].Key
+ } else {
+ marker = resp.NextMarker
+ }
+ }
+ return nil
+}
+
+// Convert a list item into a DirEntry
+func (f *Fs) itemToDirEntry(remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) {
+ if isDirectory {
+ size := int64(0)
+ if object.Size != nil {
+ size = *object.Size
+ }
+ d := fs.NewDir(remote, time.Time{}).SetSize(size)
+ return d, nil
+ }
+ o, err := f.newObjectWithInfo(remote, object)
+ if err != nil {
+ return nil, err
+ }
+ return o, nil
+}
+
+// mark the bucket as being OK
+func (f *Fs) markBucketOK() {
+ if f.bucket != "" {
+ f.bucketOKMu.Lock()
+ f.bucketOK = true
+ f.bucketDeleted = false
+ f.bucketOKMu.Unlock()
+ }
+}
+
+// listDir lists files and directories to out
+func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
+ // List the objects and directories
+ err = f.list(dir, false, func(remote string, object *s3.Object, isDirectory bool) error {
+ entry, err := f.itemToDirEntry(remote, object, isDirectory)
+ if err != nil {
+ return err
+ }
+ if entry != nil {
+ entries = append(entries, entry)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ // bucket must be present if listing succeeded
+ f.markBucketOK()
+ return entries, nil
+}
+
+// listBuckets lists the buckets to out
+func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
+ if dir != "" {
+ return nil, fs.ErrorListBucketRequired
+ }
+ req := s3.ListBucketsInput{}
+ resp, err := f.c.ListBuckets(&req)
+ if err != nil {
+ return nil, err
+ }
+ for _, bucket := range resp.Buckets {
+ d := fs.NewDir(aws.StringValue(bucket.Name), aws.TimeValue(bucket.CreationDate))
+ entries = append(entries, d)
+ }
+ return entries, nil
+}
+
+// List the objects and directories in dir into entries. The
+// entries can be returned in any order but should be for a
+// complete directory.
+//
+// dir should be "" to list the root, and should not have
+// trailing slashes.
+//
+// This should return ErrDirNotFound if the directory isn't
+// found.
+func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
+ if f.bucket == "" {
+ return f.listBuckets(dir)
+ }
+ return f.listDir(dir)
+}
+
+// ListR lists the objects and directories of the Fs starting
+// from dir recursively into out.
+//
+// dir should be "" to start from the root, and should not
+// have trailing slashes.
+//
+// This should return ErrDirNotFound if the directory isn't
+// found.
+//
+// It should call callback for each tranche of entries read.
+// These need not be returned in any particular order. If
+// callback returns an error then the listing will stop
+// immediately.
+//
+// Don't implement this unless you have a more efficient way
+// of listing recursively that doing a directory traversal.
+func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
+ if f.bucket == "" {
+ return fs.ErrorListBucketRequired
+ }
+ list := walk.NewListRHelper(callback)
+ err = f.list(dir, true, func(remote string, object *s3.Object, isDirectory bool) error {
+ entry, err := f.itemToDirEntry(remote, object, isDirectory)
+ if err != nil {
+ return err
+ }
+ return list.Add(entry)
+ })
+ if err != nil {
+ return err
+ }
+ // bucket must be present if listing succeeded
+ f.markBucketOK()
+ return list.Flush()
+}
+
+// Put the Object into the bucket
+func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
+ // Temporary Object under construction
+ fs := &Object{
+ fs: f,
+ remote: src.Remote(),
+ }
+ return fs, fs.Update(in, src, options...)
+}
+
+// PutStream uploads to the remote path with the modTime given of indeterminate size
+func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
+ return f.Put(in, src, options...)
+}
+
+// Check if the bucket exists
+//
+// NB this can return incorrect results if called immediately after bucket deletion
+func (f *Fs) dirExists() (bool, error) {
+ req := s3.HeadBucketInput{
+ Bucket: &f.bucket,
+ }
+ _, err := f.c.HeadBucket(&req)
+ if err == nil {
+ return true, nil
+ }
+ if err, ok := err.(awserr.RequestFailure); ok {
+ if err.StatusCode() == http.StatusNotFound {
+ return false, nil
+ }
+ }
+ return false, err
+}
+
+// Mkdir creates the bucket if it doesn't exist
+func (f *Fs) Mkdir(dir string) error {
+ f.bucketOKMu.Lock()
+ defer f.bucketOKMu.Unlock()
+ if f.bucketOK {
+ return nil
+ }
+ if !f.bucketDeleted {
+ exists, err := f.dirExists()
+ if err == nil {
+ f.bucketOK = exists
+ }
+ if err != nil || exists {
+ return err
+ }
+ }
+ req := s3.CreateBucketInput{
+ Bucket: &f.bucket,
+ ACL: &f.opt.ACL,
+ }
+ if f.opt.LocationConstraint != "" {
+ req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
+ LocationConstraint: &f.opt.LocationConstraint,
+ }
+ }
+ _, err := f.c.CreateBucket(&req)
+ if err, ok := err.(awserr.Error); ok {
+ if err.Code() == "BucketAlreadyOwnedByYou" {
+ err = nil
+ }
+ }
+ if err == nil {
+ f.bucketOK = true
+ f.bucketDeleted = false
+ }
+ return err
+}
+
+// Rmdir deletes the bucket if the fs is at the root
+//
+// Returns an error if it isn't empty
+func (f *Fs) Rmdir(dir string) error {
+ f.bucketOKMu.Lock()
+ defer f.bucketOKMu.Unlock()
+ if f.root != "" || dir != "" {
+ return nil
+ }
+ req := s3.DeleteBucketInput{
+ Bucket: &f.bucket,
+ }
+ _, err := f.c.DeleteBucket(&req)
+ if err == nil {
+ f.bucketOK = false
+ f.bucketDeleted = true
+ }
+ return err
+}
+
+// Precision of the remote
+func (f *Fs) Precision() time.Duration {
+ return time.Nanosecond
+}
+
+// pathEscape escapes s as for a URL path. It uses rest.URLPathEscape
+// but also escapes '+' for S3 and Digital Ocean spaces compatibility
+func pathEscape(s string) string {
+ return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
+}
+
+// Copy src to this remote using server side copy operations.
+//
+// This is stored with the remote path given
+//
+// It returns the destination Object and a possible error
+//
+// Will only be called if src.Fs().Name() == f.Name()
+//
+// If it isn't possible then return fs.ErrorCantCopy
+func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
+ err := f.Mkdir("")
+ if err != nil {
+ return nil, err
+ }
+ srcObj, ok := src.(*Object)
+ if !ok {
+ fs.Debugf(src, "Can't copy - not same remote type")
+ return nil, fs.ErrorCantCopy
+ }
+ srcFs := srcObj.fs
+ key := f.root + remote
+ source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote)
+ req := s3.CopyObjectInput{
+ Bucket: &f.bucket,
+ Key: &key,
+ CopySource: &source,
+ MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
+ }
+ _, err = f.c.CopyObject(&req)
+ if err != nil {
+ return nil, err
+ }
+ return f.NewObject(remote)
+}
+
+// Hashes returns the supported hash sets.
+func (f *Fs) Hashes() hash.Set {
+ return hash.Set(hash.MD5)
+}
+
+// ------------------------------------------------------------
+
+// Fs returns the parent Fs
+func (o *Object) Fs() fs.Info {
+ return o.fs
+}
+
+// Return a string version
+func (o *Object) String() string {
+ if o == nil {
+ return ""
+ }
+ return o.remote
+}
+
+// Remote returns the remote path
+func (o *Object) Remote() string {
+ return o.remote
+}
+
+var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
+
+// Hash returns the Md5sum of an object returning a lowercase hex string
+func (o *Object) Hash(t hash.Type) (string, error) {
+ if t != hash.MD5 {
+ return "", hash.ErrUnsupported
+ }
+ hash := strings.Trim(strings.ToLower(o.etag), `"`)
+ // Check the etag is a valid md5sum
+ if !matchMd5.MatchString(hash) {
+ err := o.readMetaData()
+ if err != nil {
+ return "", err
+ }
+
+ if md5sum, ok := o.meta[metaMD5Hash]; ok {
+ md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sum)
+ if err != nil {
+ return "", err
+ }
+ hash = hex.EncodeToString(md5sumBytes)
+ } else {
+ hash = ""
+ }
+ }
+ return hash, nil
+}
+
+// Size returns the size of an object in bytes
+func (o *Object) Size() int64 {
+ return o.bytes
+}
+
+// readMetaData gets the metadata if it hasn't already been fetched
+//
+// it also sets the info
+func (o *Object) readMetaData() (err error) {
+ if o.meta != nil {
+ return nil
+ }
+ key := o.fs.root + o.remote
+ req := s3.HeadObjectInput{
+ Bucket: &o.fs.bucket,
+ Key: &key,
+ }
+ resp, err := o.fs.c.HeadObject(&req)
+ if err != nil {
+ if awsErr, ok := err.(awserr.RequestFailure); ok {
+ if awsErr.StatusCode() == http.StatusNotFound {
+ return fs.ErrorObjectNotFound
+ }
+ }
+ return err
+ }
+ var size int64
+ // Ignore missing Content-Length assuming it is 0
+ // Some versions of ceph do this due their apache proxies
+ if resp.ContentLength != nil {
+ size = *resp.ContentLength
+ }
+ o.etag = aws.StringValue(resp.ETag)
+ o.bytes = size
+ o.meta = resp.Metadata
+ if resp.LastModified == nil {
+ fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
+ o.lastModified = time.Now()
+ } else {
+ o.lastModified = *resp.LastModified
+ }
+ o.mimeType = aws.StringValue(resp.ContentType)
+ return nil
+}
+
+// ModTime returns the modification time of the object
+//
+// It attempts to read the objects mtime and if that isn't present the
+// LastModified returned in the http headers
+func (o *Object) ModTime() time.Time {
+ if fs.Config.UseServerModTime {
+ return o.lastModified
+ }
+ err := o.readMetaData()
+ if err != nil {
+ fs.Logf(o, "Failed to read metadata: %v", err)
+ return time.Now()
+ }
+ // read mtime out of metadata if available
+ d, ok := o.meta[metaMtime]
+ if !ok || d == nil {
+ // fs.Debugf(o, "No metadata")
+ return o.lastModified
+ }
+ modTime, err := swift.FloatStringToTime(*d)
+ if err != nil {
+ fs.Logf(o, "Failed to read mtime from object: %v", err)
+ return o.lastModified
+ }
+ return modTime
+}
+
+// SetModTime sets the modification time of the local fs object
+func (o *Object) SetModTime(modTime time.Time) error {
+ err := o.readMetaData()
+ if err != nil {
+ return err
+ }
+ o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
+
+ if o.bytes >= maxSizeForCopy {
+ fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
+ return nil
+ }
+
+ // Guess the content type
+ mimeType := fs.MimeType(o)
+
+ // Copy the object to itself to update the metadata
+ key := o.fs.root + o.remote
+ sourceKey := o.fs.bucket + "/" + key
+ directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
+ req := s3.CopyObjectInput{
+ Bucket: &o.fs.bucket,
+ ACL: &o.fs.opt.ACL,
+ Key: &key,
+ ContentType: &mimeType,
+ CopySource: aws.String(pathEscape(sourceKey)),
+ Metadata: o.meta,
+ MetadataDirective: &directive,
+ }
+ _, err = o.fs.c.CopyObject(&req)
+ return err
+}
+
+// Storable raturns a boolean indicating if this object is storable
+func (o *Object) Storable() bool {
+ return true
+}
+
+// Open an object for read
+func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
+ key := o.fs.root + o.remote
+ req := s3.GetObjectInput{
+ Bucket: &o.fs.bucket,
+ Key: &key,
+ }
+ for _, option := range options {
+ switch option.(type) {
+ case *fs.RangeOption, *fs.SeekOption:
+ _, value := option.Header()
+ req.Range = &value
+ default:
+ if option.Mandatory() {
+ fs.Logf(o, "Unsupported mandatory option: %v", option)
+ }
+ }
+ }
+ resp, err := o.fs.c.GetObject(&req)
+ if err, ok := err.(awserr.RequestFailure); ok {
+ if err.Code() == "InvalidObjectState" {
+ return nil, errors.Errorf("Object in GLACIER, restore first: %v", key)
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp.Body, nil
+}
+
+// Update the Object from in with modTime and size
+func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
+ err := o.fs.Mkdir("")
+ if err != nil {
+ return err
+ }
+ modTime := src.ModTime()
+ size := src.Size()
+
+ uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
+ u.Concurrency = o.fs.opt.UploadConcurrency
+ u.LeavePartsOnError = false
+ u.S3 = o.fs.c
+ u.PartSize = int64(o.fs.opt.ChunkSize)
+
+ if size == -1 {
+ // Make parts as small as possible while still being able to upload to the
+ // S3 file size limit. Rounded up to nearest MB.
+ u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
+ return
+ }
+ // Adjust PartSize until the number of parts is small enough.
+ if size/u.PartSize >= s3manager.MaxUploadParts {
+ // Calculate partition size rounded up to the nearest MB
+ u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
+ }
+ })
+
+ // Set the mtime in the meta data
+ metadata := map[string]*string{
+ metaMtime: aws.String(swift.TimeToFloatString(modTime)),
+ }
+
+ if !o.fs.opt.DisableChecksum && size > uploader.PartSize {
+ hash, err := src.Hash(hash.MD5)
+
+ if err == nil && matchMd5.MatchString(hash) {
+ hashBytes, err := hex.DecodeString(hash)
+
+ if err == nil {
+ metadata[metaMD5Hash] = aws.String(base64.StdEncoding.EncodeToString(hashBytes))
+ }
+ }
+ }
+
+ // Guess the content type
+ mimeType := fs.MimeType(src)
+
+ key := o.fs.root + o.remote
+ req := s3manager.UploadInput{
+ Bucket: &o.fs.bucket,
+ ACL: &o.fs.opt.ACL,
+ Key: &key,
+ Body: in,
+ ContentType: &mimeType,
+ Metadata: metadata,
+ //ContentLength: &size,
+ }
+ if o.fs.opt.ServerSideEncryption != "" {
+ req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
+ }
+ if o.fs.opt.SSEKMSKeyID != "" {
+ req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
+ }
+ if o.fs.opt.StorageClass != "" {
+ req.StorageClass = &o.fs.opt.StorageClass
+ }
+ _, err = uploader.Upload(&req)
+ if err != nil {
+ return err
+ }
+
+ // Read the metadata from the newly created object
+ o.meta = nil // wipe old metadata
+ err = o.readMetaData()
+ return err
+}
+
+// Remove an object
+func (o *Object) Remove() error {
+ key := o.fs.root + o.remote
+ req := s3.DeleteObjectInput{
+ Bucket: &o.fs.bucket,
+ Key: &key,
+ }
+ _, err := o.fs.c.DeleteObject(&req)
+ return err
+}
+
+// MimeType of an Object if known, "" otherwise
+func (o *Object) MimeType() string {
+ err := o.readMetaData()
+ if err != nil {
+ fs.Logf(o, "Failed to read metadata: %v", err)
+ return ""
+ }
+ return o.mimeType
+}
+
+// Check the interfaces are satisfied
+var (
+ _ fs.Fs = &Fs{}
+ _ fs.Copier = &Fs{}
+ _ fs.PutStreamer = &Fs{}
+ _ fs.ListRer = &Fs{}
+ _ fs.Object = &Object{}
+ _ fs.MimeTyper = &Object{}
+)
diff --git a/vendor/github.com/ncw/rclone/backend/s3/v2sign.go b/vendor/github.com/ncw/rclone/backend/s3/v2sign.go
new file mode 100755
index 0000000..23048ed
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/backend/s3/v2sign.go
@@ -0,0 +1,115 @@
+// v2 signing
+
+package s3
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "encoding/base64"
+ "net/http"
+ "sort"
+ "strings"
+ "time"
+)
+
+// URL parameters that need to be added to the signature
+var s3ParamsToSign = map[string]struct{}{
+ "acl": {},
+ "location": {},
+ "logging": {},
+ "notification": {},
+ "partNumber": {},
+ "policy": {},
+ "requestPayment": {},
+ "torrent": {},
+ "uploadId": {},
+ "uploads": {},
+ "versionId": {},
+ "versioning": {},
+ "versions": {},
+ "response-content-type": {},
+ "response-content-language": {},
+ "response-expires": {},
+ "response-cache-control": {},
+ "response-content-disposition": {},
+ "response-content-encoding": {},
+}
+
+// sign signs requests using v2 auth
+//
+// Cobbled together from goamz and aws-sdk-go
+func sign(AccessKey, SecretKey string, req *http.Request) {
+ // Set date
+ date := time.Now().UTC().Format(time.RFC1123)
+ req.Header.Set("Date", date)
+
+ // Sort out URI
+ uri := req.URL.Opaque
+ if uri != "" {
+ if strings.HasPrefix(uri, "//") {
+ // Strip off //host/uri
+ uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
+ req.URL.Opaque = uri // reset to plain URI otherwise Ceph gets confused
+ }
+ } else {
+ uri = req.URL.Path
+ }
+ if uri == "" {
+ uri = "/"
+ }
+
+ // Look through headers of interest
+ var md5 string
+ var contentType string
+ var headersToSign []string
+ for k, v := range req.Header {
+ k = strings.ToLower(k)
+ switch k {
+ case "content-md5":
+ md5 = v[0]
+ case "content-type":
+ contentType = v[0]
+ default:
+ if strings.HasPrefix(k, "x-amz-") {
+ vall := strings.Join(v, ",")
+ headersToSign = append(headersToSign, k+":"+vall)
+ }
+ }
+ }
+ // Make headers of interest into canonical string
+ var joinedHeadersToSign string
+ if len(headersToSign) > 0 {
+ sort.StringSlice(headersToSign).Sort()
+ joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
+ }
+
+ // Look for query parameters which need to be added to the signature
+ params := req.URL.Query()
+ var queriesToSign []string
+ for k, vs := range params {
+ if _, ok := s3ParamsToSign[k]; ok {
+ for _, v := range vs {
+ if v == "" {
+ queriesToSign = append(queriesToSign, k)
+ } else {
+ queriesToSign = append(queriesToSign, k+"="+v)
+ }
+ }
+ }
+ }
+ // Add query parameters to URI
+ if len(queriesToSign) > 0 {
+ sort.StringSlice(queriesToSign).Sort()
+ uri += "?" + strings.Join(queriesToSign, "&")
+ }
+
+ // Make signature
+ payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
+ hash := hmac.New(sha1.New, []byte(SecretKey))
+ _, _ = hash.Write([]byte(payload))
+ signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
+ base64.StdEncoding.Encode(signature, hash.Sum(nil))
+
+ // Set signature in request
+ req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
+}
diff --git a/vendor/github.com/ncw/rclone/cmd/serve/httplib/http_new.go b/vendor/github.com/ncw/rclone/cmd/serve/httplib/http_new.go
new file mode 100755
index 0000000..04f5342
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/cmd/serve/httplib/http_new.go
@@ -0,0 +1,21 @@
+// HTTP parts go1.8+
+
+//+build go1.8
+
+package httplib
+
+import (
+ "net/http"
+ "time"
+)
+
+// Initialise the http.Server for post go1.8
+func initServer(s *http.Server) {
+ s.ReadHeaderTimeout = 10 * time.Second // time to send the headers
+ s.IdleTimeout = 60 * time.Second // time to keep idle connections open
+}
+
+// closeServer closes the server in a non graceful way
+func closeServer(s *http.Server) error {
+ return s.Close()
+}
diff --git a/vendor/github.com/ncw/rclone/cmd/serve/httplib/http_old.go b/vendor/github.com/ncw/rclone/cmd/serve/httplib/http_old.go
new file mode 100755
index 0000000..66c9351
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/cmd/serve/httplib/http_old.go
@@ -0,0 +1,18 @@
+// HTTP parts pre go1.8
+
+//+build !go1.8
+
+package httplib
+
+import (
+ "net/http"
+)
+
+// Initialise the http.Server for pre go1.8
+func initServer(s *http.Server) {
+}
+
+// closeServer closes the server in a non graceful way
+func closeServer(s *http.Server) error {
+ return nil
+}
diff --git a/vendor/github.com/ncw/rclone/cmd/serve/httplib/httplib.go b/vendor/github.com/ncw/rclone/cmd/serve/httplib/httplib.go
new file mode 100755
index 0000000..c10c949
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/cmd/serve/httplib/httplib.go
@@ -0,0 +1,256 @@
+// Package httplib provides common functionality for http servers
+package httplib
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "time"
+
+ auth "github.com/abbot/go-http-auth"
+ "github.com/ncw/rclone/fs"
+)
+
+// Globals
+var ()
+
+// Help contains text describing the http server to add to the command
+// help.
+var Help = `
+### Server options
+
+Use --addr to specify which IP address and port the server should
+listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
+IPs. By default it only listens on localhost. You can use port
+:0 to let the OS choose an available port.
+
+If you set --addr to listen on a public or LAN accessible IP address
+then using Authentication is advised - see the next section for info.
+
+--server-read-timeout and --server-write-timeout can be used to
+control the timeouts on the server. Note that this is the total time
+for a transfer.
+
+--max-header-bytes controls the maximum number of bytes the server will
+accept in the HTTP header.
+
+#### Authentication
+
+By default this will serve files without needing a login.
+
+You can either use an htpasswd file which can take lots of users, or
+set a single username and password with the --user and --pass flags.
+
+Use --htpasswd /path/to/htpasswd to provide an htpasswd file. This is
+in standard apache format and supports MD5, SHA1 and BCrypt for basic
+authentication. Bcrypt is recommended.
+
+To create an htpasswd file:
+
+ touch htpasswd
+ htpasswd -B htpasswd user
+ htpasswd -B htpasswd anotherUser
+
+The password file can be updated while rclone is running.
+
+Use --realm to set the authentication realm.
+
+#### SSL/TLS
+
+By default this will serve over http. If you want you can serve over
+https. You will need to supply the --cert and --key flags. If you
+wish to do client side certificate validation then you will need to
+supply --client-ca also.
+
+--cert should be a either a PEM encoded certificate or a concatenation
+of that with the CA certificate. --key should be the PEM encoded
+private key and --client-ca should be the PEM encoded client
+certificate authority certificate.
+`
+
+// Options contains options for the http Server
+type Options struct {
+ ListenAddr string // Port to listen on
+ ServerReadTimeout time.Duration // Timeout for server reading data
+ ServerWriteTimeout time.Duration // Timeout for server writing data
+ MaxHeaderBytes int // Maximum size of request header
+ SslCert string // SSL PEM key (concatenation of certificate and CA certificate)
+ SslKey string // SSL PEM Private key
+ ClientCA string // Client certificate authority to verify clients with
+ HtPasswd string // htpasswd file - if not provided no authentication is done
+ Realm string // realm for authentication
+ BasicUser string // single username for basic auth if not using Htpasswd
+ BasicPass string // password for BasicUser
+}
+
+// DefaultOpt is the default values used for Options
+var DefaultOpt = Options{
+ ListenAddr: "localhost:8080",
+ Realm: "rclone",
+ ServerReadTimeout: 1 * time.Hour,
+ ServerWriteTimeout: 1 * time.Hour,
+ MaxHeaderBytes: 4096,
+}
+
+// Server contains info about the running http server
+type Server struct {
+ Opt Options
+ handler http.Handler // original handler
+ listener net.Listener
+ waitChan chan struct{} // for waiting on the listener to close
+ httpServer *http.Server
+ basicPassHashed string
+ useSSL bool // if server is configured for SSL/TLS
+}
+
+// singleUserProvider provides the encrypted password for a single user
+func (s *Server) singleUserProvider(user, realm string) string {
+ if user == s.Opt.BasicUser {
+ return s.basicPassHashed
+ }
+ return ""
+}
+
+// NewServer creates an http server. The opt can be nil in which case
+// the default options will be used.
+func NewServer(handler http.Handler, opt *Options) *Server {
+ s := &Server{
+ handler: handler,
+ }
+
+ // Make a copy of the options
+ if opt != nil {
+ s.Opt = *opt
+ } else {
+ s.Opt = DefaultOpt
+ }
+
+ // Use htpasswd if required on everything
+ if s.Opt.HtPasswd != "" || s.Opt.BasicUser != "" {
+ var secretProvider auth.SecretProvider
+ if s.Opt.HtPasswd != "" {
+ fs.Infof(nil, "Using %q as htpasswd storage", s.Opt.HtPasswd)
+ secretProvider = auth.HtpasswdFileProvider(s.Opt.HtPasswd)
+ } else {
+ fs.Infof(nil, "Using --user %s --pass XXXX as authenticated user", s.Opt.BasicUser)
+ s.basicPassHashed = string(auth.MD5Crypt([]byte(s.Opt.BasicPass), []byte("dlPL2MqE"), []byte("$1$")))
+ secretProvider = s.singleUserProvider
+ }
+ authenticator := auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
+ handler = auth.JustCheck(authenticator, handler.ServeHTTP)
+ }
+
+ s.useSSL = s.Opt.SslKey != ""
+ if (s.Opt.SslCert != "") != s.useSSL {
+ log.Fatalf("Need both -cert and -key to use SSL")
+ }
+
+ // FIXME make a transport?
+ s.httpServer = &http.Server{
+ Addr: s.Opt.ListenAddr,
+ Handler: handler,
+ ReadTimeout: s.Opt.ServerReadTimeout,
+ WriteTimeout: s.Opt.ServerWriteTimeout,
+ MaxHeaderBytes: s.Opt.MaxHeaderBytes,
+ TLSConfig: &tls.Config{
+ MinVersion: tls.VersionTLS10, // disable SSL v3.0 and earlier
+ },
+ }
+ // go version specific initialisation
+ initServer(s.httpServer)
+
+ if s.Opt.ClientCA != "" {
+ if !s.useSSL {
+ log.Fatalf("Can't use --client-ca without --cert and --key")
+ }
+ certpool := x509.NewCertPool()
+ pem, err := ioutil.ReadFile(s.Opt.ClientCA)
+ if err != nil {
+ log.Fatalf("Failed to read client certificate authority: %v", err)
+ }
+ if !certpool.AppendCertsFromPEM(pem) {
+ log.Fatalf("Can't parse client certificate authority")
+ }
+ s.httpServer.TLSConfig.ClientCAs = certpool
+ s.httpServer.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
+ }
+
+ return s
+}
+
+// Serve runs the server - returns an error only if
+// the listener was not started; does not block, so
+// use s.Wait() to block on the listener indefinitely.
+func (s *Server) Serve() error {
+ ln, err := net.Listen("tcp", s.httpServer.Addr)
+ if err != nil {
+ return err
+ }
+ s.listener = ln
+ s.waitChan = make(chan struct{})
+ go func() {
+ var err error
+ if s.useSSL {
+ // hacky hack to get this to work with old Go versions, which
+ // don't have ServeTLS on http.Server; see PR #2194.
+ type tlsServer interface {
+ ServeTLS(ln net.Listener, cert, key string) error
+ }
+ srvIface := interface{}(s.httpServer)
+ if tlsSrv, ok := srvIface.(tlsServer); ok {
+ // yay -- we get easy TLS support with HTTP/2
+ err = tlsSrv.ServeTLS(s.listener, s.Opt.SslCert, s.Opt.SslKey)
+ } else {
+ // oh well -- we can still do TLS but might not have HTTP/2
+ tlsConfig := new(tls.Config)
+ tlsConfig.Certificates = make([]tls.Certificate, 1)
+ tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(s.Opt.SslCert, s.Opt.SslKey)
+ if err != nil {
+ log.Printf("Error loading key pair: %v", err)
+ }
+ tlsLn := tls.NewListener(s.listener, tlsConfig)
+ err = s.httpServer.Serve(tlsLn)
+ }
+ } else {
+ err = s.httpServer.Serve(s.listener)
+ }
+ if err != nil {
+ log.Printf("Error on serving HTTP server: %v", err)
+ }
+ }()
+ return nil
+}
+
+// Wait blocks while the listener is open.
+func (s *Server) Wait() {
+ <-s.waitChan
+}
+
+// Close shuts the running server down
+func (s *Server) Close() {
+ err := closeServer(s.httpServer)
+ if err != nil {
+ log.Printf("Error on closing HTTP server: %v", err)
+ return
+ }
+ close(s.waitChan)
+}
+
+// URL returns the serving address of this server
+func (s *Server) URL() string {
+ proto := "http"
+ if s.useSSL {
+ proto = "https"
+ }
+ addr := s.Opt.ListenAddr
+ if s.listener != nil {
+ // prefer actual listener address; required if using 0-port
+ // (i.e. port assigned by operating system)
+ addr = s.listener.Addr().String()
+ }
+ return fmt.Sprintf("%s://%s/", proto, addr)
+}
diff --git a/vendor/github.com/ncw/rclone/fs/accounting/accounting.go b/vendor/github.com/ncw/rclone/fs/accounting/accounting.go
new file mode 100755
index 0000000..398588d
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/accounting/accounting.go
@@ -0,0 +1,391 @@
+// Package accounting providers an accounting and limiting reader
+package accounting
+
+import (
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/ncw/rclone/fs/asyncreader"
+ "github.com/ncw/rclone/fs/fserrors"
+ "github.com/pkg/errors"
+)
+
+// ErrorMaxTransferLimitReached is returned from Read when the max
+// transfer limit is reached.
+var ErrorMaxTransferLimitReached = fserrors.FatalError(errors.New("Max transfer limit reached as set by --max-transfer"))
+
+// Account limits and accounts for one transfer
+type Account struct {
+ // The mutex is to make sure Read() and Close() aren't called
+ // concurrently. Unfortunately the persistent connection loop
+ // in http transport calls Read() after Do() returns on
+ // CancelRequest so this race can happen when it apparently
+ // shouldn't.
+ mu sync.Mutex
+ in io.Reader
+ origIn io.ReadCloser
+ close io.Closer
+ size int64
+ name string
+ statmu sync.Mutex // Separate mutex for stat values.
+ bytes int64 // Total number of bytes read
+ max int64 // if >=0 the max number of bytes to transfer
+ start time.Time // Start time of first read
+ lpTime time.Time // Time of last average measurement
+ lpBytes int // Number of bytes read since last measurement
+ avg float64 // Moving average of last few measurements in bytes/s
+ closed bool // set if the file is closed
+ exit chan struct{} // channel that will be closed when transfer is finished
+ withBuf bool // is using a buffered in
+}
+
+const averagePeriod = 16 // period to do exponentially weighted averages over
+
+// NewAccountSizeName makes a Account reader for an io.ReadCloser of
+// the given size and name
+func NewAccountSizeName(in io.ReadCloser, size int64, name string) *Account {
+ acc := &Account{
+ in: in,
+ close: in,
+ origIn: in,
+ size: size,
+ name: name,
+ exit: make(chan struct{}),
+ avg: 0,
+ lpTime: time.Now(),
+ max: int64(fs.Config.MaxTransfer),
+ }
+ go acc.averageLoop()
+ Stats.inProgress.set(acc.name, acc)
+ return acc
+}
+
+// NewAccount makes a Account reader for an object
+func NewAccount(in io.ReadCloser, obj fs.Object) *Account {
+ return NewAccountSizeName(in, obj.Size(), obj.Remote())
+}
+
+// WithBuffer - If the file is above a certain size it adds an Async reader
+func (acc *Account) WithBuffer() *Account {
+ acc.withBuf = true
+ var buffers int
+ if acc.size >= int64(fs.Config.BufferSize) || acc.size == -1 {
+ buffers = int(int64(fs.Config.BufferSize) / asyncreader.BufferSize)
+ } else {
+ buffers = int(acc.size / asyncreader.BufferSize)
+ }
+ // On big files add a buffer
+ if buffers > 0 {
+ rc, err := asyncreader.New(acc.origIn, buffers)
+ if err != nil {
+ fs.Errorf(acc.name, "Failed to make buffer: %v", err)
+ } else {
+ acc.in = rc
+ acc.close = rc
+ }
+ }
+ return acc
+}
+
+// GetReader returns the underlying io.ReadCloser under any Buffer
+func (acc *Account) GetReader() io.ReadCloser {
+ acc.mu.Lock()
+ defer acc.mu.Unlock()
+ return acc.origIn
+}
+
+// GetAsyncReader returns the current AsyncReader or nil if Account is unbuffered
+func (acc *Account) GetAsyncReader() *asyncreader.AsyncReader {
+ acc.mu.Lock()
+ defer acc.mu.Unlock()
+ if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
+ return asyncIn
+ }
+ return nil
+}
+
+// StopBuffering stops the async buffer doing any more buffering
+func (acc *Account) StopBuffering() {
+ if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
+ asyncIn.Abandon()
+ }
+}
+
+// UpdateReader updates the underlying io.ReadCloser stopping the
+// asynb buffer (if any) and re-adding it
+func (acc *Account) UpdateReader(in io.ReadCloser) {
+ acc.mu.Lock()
+ acc.StopBuffering()
+ acc.in = in
+ acc.close = in
+ acc.origIn = in
+ acc.WithBuffer()
+ acc.mu.Unlock()
+}
+
+// averageLoop calculates averages for the stats in the background
+func (acc *Account) averageLoop() {
+ tick := time.NewTicker(time.Second)
+ var period float64
+ defer tick.Stop()
+ for {
+ select {
+ case now := <-tick.C:
+ acc.statmu.Lock()
+ // Add average of last second.
+ elapsed := now.Sub(acc.lpTime).Seconds()
+ avg := float64(acc.lpBytes) / elapsed
+ // Soft start the moving average
+ if period < averagePeriod {
+ period++
+ }
+ acc.avg = (avg + (period-1)*acc.avg) / period
+ acc.lpBytes = 0
+ acc.lpTime = now
+ // Unlock stats
+ acc.statmu.Unlock()
+ case <-acc.exit:
+ return
+ }
+ }
+}
+
+// read bytes from the io.Reader passed in and account them
+func (acc *Account) read(in io.Reader, p []byte) (n int, err error) {
+ acc.statmu.Lock()
+ if acc.max >= 0 && Stats.GetBytes() >= acc.max {
+ acc.statmu.Unlock()
+ return 0, ErrorMaxTransferLimitReached
+ }
+ // Set start time.
+ if acc.start.IsZero() {
+ acc.start = time.Now()
+ }
+ acc.statmu.Unlock()
+
+ n, err = in.Read(p)
+
+ // Update Stats
+ acc.statmu.Lock()
+ acc.lpBytes += n
+ acc.bytes += int64(n)
+ acc.statmu.Unlock()
+
+ Stats.Bytes(int64(n))
+
+ limitBandwidth(n)
+ return
+}
+
+// Read bytes from the object - see io.Reader
+func (acc *Account) Read(p []byte) (n int, err error) {
+ acc.mu.Lock()
+ defer acc.mu.Unlock()
+ return acc.read(acc.in, p)
+}
+
+// Close the object
+func (acc *Account) Close() error {
+ acc.mu.Lock()
+ defer acc.mu.Unlock()
+ if acc.closed {
+ return nil
+ }
+ acc.closed = true
+ close(acc.exit)
+ Stats.inProgress.clear(acc.name)
+ return acc.close.Close()
+}
+
+// progress returns bytes read as well as the size.
+// Size can be <= 0 if the size is unknown.
+func (acc *Account) progress() (bytes, size int64) {
+ if acc == nil {
+ return 0, 0
+ }
+ acc.statmu.Lock()
+ bytes, size = acc.bytes, acc.size
+ acc.statmu.Unlock()
+ return bytes, size
+}
+
+// speed returns the speed of the current file transfer
+// in bytes per second, as well a an exponentially weighted moving average
+// If no read has completed yet, 0 is returned for both values.
+func (acc *Account) speed() (bps, current float64) {
+ if acc == nil {
+ return 0, 0
+ }
+ acc.statmu.Lock()
+ defer acc.statmu.Unlock()
+ if acc.bytes == 0 {
+ return 0, 0
+ }
+ // Calculate speed from first read.
+ total := float64(time.Now().Sub(acc.start)) / float64(time.Second)
+ bps = float64(acc.bytes) / total
+ current = acc.avg
+ return
+}
+
+// eta returns the ETA of the current operation,
+// rounded to full seconds.
+// If the ETA cannot be determined 'ok' returns false.
+func (acc *Account) eta() (etaDuration time.Duration, ok bool) {
+ if acc == nil {
+ return 0, false
+ }
+ acc.statmu.Lock()
+ defer acc.statmu.Unlock()
+ return eta(acc.bytes, acc.size, acc.avg)
+}
+
+// String produces stats for this file
+func (acc *Account) String() string {
+ a, b := acc.progress()
+ _, cur := acc.speed()
+ eta, etaok := acc.eta()
+ etas := "-"
+ if etaok {
+ if eta > 0 {
+ etas = fmt.Sprintf("%v", eta)
+ } else {
+ etas = "0s"
+ }
+ }
+ name := []rune(acc.name)
+ if fs.Config.StatsFileNameLength > 0 {
+ if len(name) > fs.Config.StatsFileNameLength {
+ where := len(name) - fs.Config.StatsFileNameLength
+ name = append([]rune{'.', '.', '.'}, name[where:]...)
+ }
+ }
+
+ if fs.Config.DataRateUnit == "bits" {
+ cur = cur * 8
+ }
+
+ percentageDone := 0
+ if b > 0 {
+ percentageDone = int(100 * float64(a) / float64(b))
+ }
+
+ done := fmt.Sprintf("%2d%% /%s", percentageDone, fs.SizeSuffix(b))
+
+ return fmt.Sprintf("%45s: %s, %s/s, %s",
+ string(name),
+ done,
+ fs.SizeSuffix(cur),
+ etas,
+ )
+}
+
+// RemoteStats produces stats for this file
+func (acc *Account) RemoteStats() (out map[string]interface{}) {
+ out = make(map[string]interface{})
+ a, b := acc.progress()
+ out["bytes"] = a
+ out["size"] = b
+ spd, cur := acc.speed()
+ out["speed"] = spd
+ out["speedAvg"] = cur
+
+ eta, etaok := acc.eta()
+ out["eta"] = nil
+ if etaok {
+ if eta > 0 {
+ out["eta"] = eta.Seconds()
+ } else {
+ out["eta"] = 0
+ }
+ }
+ out["name"] = acc.name
+
+ percentageDone := 0
+ if b > 0 {
+ percentageDone = int(100 * float64(a) / float64(b))
+ }
+ out["percentage"] = percentageDone
+
+ return out
+}
+
+// OldStream returns the top io.Reader
+func (acc *Account) OldStream() io.Reader {
+ acc.mu.Lock()
+ defer acc.mu.Unlock()
+ return acc.in
+}
+
+// SetStream updates the top io.Reader
+func (acc *Account) SetStream(in io.Reader) {
+ acc.mu.Lock()
+ acc.in = in
+ acc.mu.Unlock()
+}
+
+// WrapStream wraps an io Reader so it will be accounted in the same
+// way as account
+func (acc *Account) WrapStream(in io.Reader) io.Reader {
+ return &accountStream{
+ acc: acc,
+ in: in,
+ }
+}
+
+// accountStream accounts a single io.Reader into a parent *Account
+type accountStream struct {
+ acc *Account
+ in io.Reader
+}
+
+// OldStream return the underlying stream
+func (a *accountStream) OldStream() io.Reader {
+ return a.in
+}
+
+// SetStream set the underlying stream
+func (a *accountStream) SetStream(in io.Reader) {
+ a.in = in
+}
+
+// WrapStream wrap in in an accounter
+func (a *accountStream) WrapStream(in io.Reader) io.Reader {
+ return a.acc.WrapStream(in)
+}
+
+// Read bytes from the object - see io.Reader
+func (a *accountStream) Read(p []byte) (n int, err error) {
+ return a.acc.read(a.in, p)
+}
+
+// Accounter accounts a stream allowing the accounting to be removed and re-added
+type Accounter interface {
+ io.Reader
+ OldStream() io.Reader
+ SetStream(io.Reader)
+ WrapStream(io.Reader) io.Reader
+}
+
+// WrapFn wraps an io.Reader (for accounting purposes usually)
+type WrapFn func(io.Reader) io.Reader
+
+// UnWrap unwraps a reader returning unwrapped and wrap, a function to
+// wrap it back up again. If `in` is an Accounter then this function
+// will take the accounting unwrapped and wrap will put it back on
+// again the new Reader passed in.
+//
+// This allows functions which wrap io.Readers to move the accounting
+// to the end of the wrapped chain of readers. This is very important
+// if buffering is being introduced and if the Reader might be wrapped
+// again.
+func UnWrap(in io.Reader) (unwrapped io.Reader, wrap WrapFn) {
+ acc, ok := in.(Accounter)
+ if !ok {
+ return in, func(r io.Reader) io.Reader { return r }
+ }
+ return acc.OldStream(), acc.WrapStream
+}
diff --git a/vendor/github.com/ncw/rclone/fs/accounting/accounting_other.go b/vendor/github.com/ncw/rclone/fs/accounting/accounting_other.go
new file mode 100755
index 0000000..9a9cb29
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/accounting/accounting_other.go
@@ -0,0 +1,10 @@
+// Accounting and limiting reader
+// Non-unix specific functions.
+
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
+
+package accounting
+
+// startSignalHandler() is Unix specific and does nothing under non-Unix
+// platforms.
+func startSignalHandler() {}
diff --git a/vendor/github.com/ncw/rclone/fs/accounting/accounting_unix.go b/vendor/github.com/ncw/rclone/fs/accounting/accounting_unix.go
new file mode 100755
index 0000000..20d659d
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/accounting/accounting_unix.go
@@ -0,0 +1,36 @@
+// Accounting and limiting reader
+// Unix specific functions.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package accounting
+
+import (
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/ncw/rclone/fs"
+)
+
+// startSignalHandler() sets a signal handler to catch SIGUSR2 and toggle throttling.
+func startSignalHandler() {
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, syscall.SIGUSR2)
+
+ go func() {
+ // This runs forever, but blocks until the signal is received.
+ for {
+ <-signals
+ tokenBucketMu.Lock()
+ bwLimitToggledOff = !bwLimitToggledOff
+ tokenBucket, prevTokenBucket = prevTokenBucket, tokenBucket
+ s := "disabled"
+ if tokenBucket != nil {
+ s = "enabled"
+ }
+ tokenBucketMu.Unlock()
+ fs.Logf(nil, "Bandwidth limit %s by user", s)
+ }
+ }()
+}
diff --git a/vendor/github.com/ncw/rclone/fs/accounting/inprogress.go b/vendor/github.com/ncw/rclone/fs/accounting/inprogress.go
new file mode 100755
index 0000000..2f9e014
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/accounting/inprogress.go
@@ -0,0 +1,41 @@
+package accounting
+
+import (
+ "sync"
+
+ "github.com/ncw/rclone/fs"
+)
+
+// inProgress holds a synchronized map of in progress transfers
+type inProgress struct {
+ mu sync.Mutex
+ m map[string]*Account
+}
+
+// newInProgress makes a new inProgress object
+func newInProgress() *inProgress {
+ return &inProgress{
+ m: make(map[string]*Account, fs.Config.Transfers),
+ }
+}
+
+// set marks the name as in progress
+func (ip *inProgress) set(name string, acc *Account) {
+ ip.mu.Lock()
+ defer ip.mu.Unlock()
+ ip.m[name] = acc
+}
+
+// clear marks the name as no longer in progress
+func (ip *inProgress) clear(name string) {
+ ip.mu.Lock()
+ defer ip.mu.Unlock()
+ delete(ip.m, name)
+}
+
+// get gets the account for name, of nil if not found
+func (ip *inProgress) get(name string) *Account {
+ ip.mu.Lock()
+ defer ip.mu.Unlock()
+ return ip.m[name]
+}
diff --git a/vendor/github.com/ncw/rclone/fs/accounting/stats.go b/vendor/github.com/ncw/rclone/fs/accounting/stats.go
new file mode 100755
index 0000000..6f65805
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/accounting/stats.go
@@ -0,0 +1,406 @@
+package accounting
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/ncw/rclone/fs/rc"
+)
+
+var (
+ // Stats is global statistics counter
+ Stats = NewStats()
+)
+
+func init() {
+ // Set the function pointer up in fs
+ fs.CountError = Stats.Error
+
+ rc.Add(rc.Call{
+ Path: "core/stats",
+ Fn: Stats.RemoteStats,
+ Title: "Returns stats about current transfers.",
+ Help: `
+This returns all available stats
+
+ rclone rc core/stats
+
+Returns the following values:
+
+` + "```" + `
+{
+ "speed": average speed in bytes/sec since start of the process,
+ "bytes": total transferred bytes since the start of the process,
+ "errors": number of errors,
+ "checks": number of checked files,
+ "transfers": number of transferred files,
+ "deletes" : number of deleted files,
+ "elapsedTime": time in seconds since the start of the process,
+ "lastError": last occurred error,
+ "transferring": an array of currently active file transfers:
+ [
+ {
+ "bytes": total transferred bytes for this file,
+ "eta": estimated time in seconds until file transfer completion
+ "name": name of the file,
+ "percentage": progress of the file transfer in percent,
+ "speed": speed in bytes/sec,
+ "speedAvg": speed in bytes/sec as an exponentially weighted moving average,
+ "size": size of the file in bytes
+ }
+ ],
+ "checking": an array of names of currently active file checks
+ []
+}
+` + "```" + `
+Values for "transferring", "checking" and "lastError" are only assigned if data is available.
+The value for "eta" is null if an eta cannot be determined.
+`,
+ })
+}
+
+// StatsInfo accounts all transfers
+type StatsInfo struct {
+ mu sync.RWMutex
+ bytes int64
+ errors int64
+ lastError error
+ checks int64
+ checking *stringSet
+ checkQueue int
+ checkQueueSize int64
+ transfers int64
+ transferring *stringSet
+ transferQueue int
+ transferQueueSize int64
+ renameQueue int
+ renameQueueSize int64
+ deletes int64
+ start time.Time
+ inProgress *inProgress
+}
+
+// NewStats cretates an initialised StatsInfo
+func NewStats() *StatsInfo {
+ return &StatsInfo{
+ checking: newStringSet(fs.Config.Checkers),
+ transferring: newStringSet(fs.Config.Transfers),
+ start: time.Now(),
+ inProgress: newInProgress(),
+ }
+}
+
+// RemoteStats returns stats for rc
+func (s *StatsInfo) RemoteStats(in rc.Params) (out rc.Params, err error) {
+ out = make(rc.Params)
+ s.mu.RLock()
+ dt := time.Now().Sub(s.start)
+ dtSeconds := dt.Seconds()
+ speed := 0.0
+ if dt > 0 {
+ speed = float64(s.bytes) / dtSeconds
+ }
+ out["speed"] = speed
+ out["bytes"] = s.bytes
+ out["errors"] = s.errors
+ out["checks"] = s.checks
+ out["transfers"] = s.transfers
+ out["deletes"] = s.deletes
+ out["elapsedTime"] = dtSeconds
+ s.mu.RUnlock()
+ if !s.checking.empty() {
+ var c []string
+ s.checking.mu.RLock()
+ defer s.checking.mu.RUnlock()
+ for name := range s.checking.items {
+ c = append(c, name)
+ }
+ out["checking"] = c
+ }
+ if !s.transferring.empty() {
+ var t []interface{}
+ s.transferring.mu.RLock()
+ defer s.transferring.mu.RUnlock()
+ for name := range s.transferring.items {
+ if acc := s.inProgress.get(name); acc != nil {
+ t = append(t, acc.RemoteStats())
+ } else {
+ t = append(t, name)
+ }
+ }
+ out["transferring"] = t
+ }
+ if s.errors > 0 {
+ out["lastError"] = s.lastError
+ }
+ return out, nil
+}
+
+// eta returns the ETA of the current operation,
+// rounded to full seconds.
+// If the ETA cannot be determined 'ok' returns false.
+func eta(size, total int64, rate float64) (eta time.Duration, ok bool) {
+ if total <= 0 || size < 0 || rate <= 0 {
+ return 0, false
+ }
+ remaining := total - size
+ if remaining < 0 {
+ return 0, false
+ }
+ seconds := float64(remaining) / rate
+ return time.Second * time.Duration(seconds), true
+}
+
+// etaString returns the ETA of the current operation,
+// rounded to full seconds.
+// If the ETA cannot be determined it returns "-"
+func etaString(done, total int64, rate float64) string {
+ d, ok := eta(done, total, rate)
+ if !ok {
+ return "-"
+ }
+ return d.String()
+}
+
+// percent returns a/b as a percentage rounded to the nearest integer
+// as a string
+//
+// if the percentage is invalid it returns "-"
+func percent(a int64, b int64) string {
+ if a < 0 || b <= 0 {
+ return "-"
+ }
+ return fmt.Sprintf("%d%%", int(float64(a)*100/float64(b)+0.5))
+}
+
+// String convert the StatsInfo to a string for printing
+func (s *StatsInfo) String() string {
+ // checking and transferring have their own locking so read
+ // here before lock to prevent deadlock on GetBytes
+ transferring, checking := s.transferring.count(), s.checking.count()
+ transferringBytesDone, transferringBytesTotal := s.transferring.progress()
+
+ s.mu.RLock()
+
+ dt := time.Now().Sub(s.start)
+ dtSeconds := dt.Seconds()
+ speed := 0.0
+ if dt > 0 {
+ speed = float64(s.bytes) / dtSeconds
+ }
+ dtRounded := dt - (dt % (time.Second / 10))
+
+ if fs.Config.DataRateUnit == "bits" {
+ speed = speed * 8
+ }
+
+ var (
+ totalChecks = int64(s.checkQueue) + s.checks + int64(checking)
+ totalTransfer = int64(s.transferQueue) + s.transfers + int64(transferring)
+ // note that s.bytes already includes transferringBytesDone so
+ // we take it off here to avoid double counting
+ totalSize = s.transferQueueSize + s.bytes + transferringBytesTotal - transferringBytesDone
+ currentSize = s.bytes
+ buf = &bytes.Buffer{}
+ xfrchkString = ""
+ )
+
+ if !fs.Config.StatsOneLine {
+ _, _ = fmt.Fprintf(buf, "\nTransferred: ")
+ } else {
+ xfrchk := []string{}
+ if totalTransfer > 0 && s.transferQueue > 0 {
+ xfrchk = append(xfrchk, fmt.Sprintf("xfr#%d/%d", s.transfers, totalTransfer))
+ }
+ if totalChecks > 0 && s.checkQueue > 0 {
+ xfrchk = append(xfrchk, fmt.Sprintf("chk#%d/%d", s.checks, totalChecks))
+ }
+ if len(xfrchk) > 0 {
+ xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", "))
+ }
+ }
+
+ _, _ = fmt.Fprintf(buf, "%10s / %s, %s, %s, ETA %s%s",
+ fs.SizeSuffix(s.bytes),
+ fs.SizeSuffix(totalSize).Unit("Bytes"),
+ percent(s.bytes, totalSize),
+ fs.SizeSuffix(speed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"),
+ etaString(currentSize, totalSize, speed),
+ xfrchkString,
+ )
+
+ if !fs.Config.StatsOneLine {
+ _, _ = fmt.Fprintf(buf, `
+Errors: %10d
+Checks: %10d / %d, %s
+Transferred: %10d / %d, %s
+Elapsed time: %10v
+`,
+ s.errors,
+ s.checks, totalChecks, percent(s.checks, totalChecks),
+ s.transfers, totalTransfer, percent(s.transfers, totalTransfer),
+ dtRounded)
+ }
+
+ // checking and transferring have their own locking so unlock
+ // here to prevent deadlock on GetBytes
+ s.mu.RUnlock()
+
+ // Add per transfer stats if required
+ if !fs.Config.StatsOneLine {
+ if !s.checking.empty() {
+ _, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking)
+ }
+ if !s.transferring.empty() {
+ _, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring)
+ }
+ }
+
+ return buf.String()
+}
+
+// Log outputs the StatsInfo to the log
+func (s *StatsInfo) Log() {
+ fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "%v\n", s)
+}
+
+// Bytes updates the stats for bytes bytes
+func (s *StatsInfo) Bytes(bytes int64) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.bytes += bytes
+}
+
+// GetBytes returns the number of bytes transferred so far
+func (s *StatsInfo) GetBytes() int64 {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.bytes
+}
+
+// Errors updates the stats for errors
+func (s *StatsInfo) Errors(errors int64) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.errors += errors
+}
+
+// GetErrors reads the number of errors
+func (s *StatsInfo) GetErrors() int64 {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.errors
+}
+
+// GetLastError returns the lastError
+func (s *StatsInfo) GetLastError() error {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.lastError
+}
+
+// Deletes updates the stats for deletes
+func (s *StatsInfo) Deletes(deletes int64) int64 {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.deletes += deletes
+ return s.deletes
+}
+
+// ResetCounters sets the counters (bytes, checks, errors, transfers) to 0
+func (s *StatsInfo) ResetCounters() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.bytes = 0
+ s.errors = 0
+ s.checks = 0
+ s.transfers = 0
+ s.deletes = 0
+}
+
+// ResetErrors sets the errors count to 0
+func (s *StatsInfo) ResetErrors() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.errors = 0
+}
+
+// Errored returns whether there have been any errors
+func (s *StatsInfo) Errored() bool {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.errors != 0
+}
+
+// Error adds a single error into the stats and assigns lastError
+func (s *StatsInfo) Error(err error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.errors++
+ s.lastError = err
+}
+
+// Checking adds a check into the stats
+func (s *StatsInfo) Checking(remote string) {
+ s.checking.add(remote)
+}
+
+// DoneChecking removes a check from the stats
+func (s *StatsInfo) DoneChecking(remote string) {
+ s.checking.del(remote)
+ s.mu.Lock()
+ s.checks++
+ s.mu.Unlock()
+}
+
+// GetTransfers reads the number of transfers
+func (s *StatsInfo) GetTransfers() int64 {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.transfers
+}
+
+// Transferring adds a transfer into the stats
+func (s *StatsInfo) Transferring(remote string) {
+ s.transferring.add(remote)
+}
+
+// DoneTransferring removes a transfer from the stats
+//
+// if ok is true then it increments the transfers count
+func (s *StatsInfo) DoneTransferring(remote string, ok bool) {
+ s.transferring.del(remote)
+ if ok {
+ s.mu.Lock()
+ s.transfers++
+ s.mu.Unlock()
+ }
+}
+
+// SetCheckQueue sets the number of queued checks
+func (s *StatsInfo) SetCheckQueue(n int, size int64) {
+ s.mu.Lock()
+ s.checkQueue = n
+ s.checkQueueSize = size
+ s.mu.Unlock()
+}
+
+// SetTransferQueue sets the number of queued transfers
+func (s *StatsInfo) SetTransferQueue(n int, size int64) {
+ s.mu.Lock()
+ s.transferQueue = n
+ s.transferQueueSize = size
+ s.mu.Unlock()
+}
+
+// SetRenameQueue sets the number of queued transfers
+func (s *StatsInfo) SetRenameQueue(n int, size int64) {
+ s.mu.Lock()
+ s.renameQueue = n
+ s.renameQueueSize = size
+ s.mu.Unlock()
+}
diff --git a/vendor/github.com/ncw/rclone/fs/accounting/stringset.go b/vendor/github.com/ncw/rclone/fs/accounting/stringset.go
new file mode 100755
index 0000000..e65be60
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/accounting/stringset.go
@@ -0,0 +1,88 @@
+package accounting
+
+import (
+ "sort"
+ "strings"
+ "sync"
+)
+
+// stringSet holds a set of strings
+type stringSet struct {
+ mu sync.RWMutex
+ items map[string]struct{}
+}
+
+// newStringSet creates a new empty string set of capacity size
+func newStringSet(size int) *stringSet {
+ return &stringSet{
+ items: make(map[string]struct{}, size),
+ }
+}
+
+// add adds remote to the set
+func (ss *stringSet) add(remote string) {
+ ss.mu.Lock()
+ ss.items[remote] = struct{}{}
+ ss.mu.Unlock()
+}
+
+// del removes remote from the set
+func (ss *stringSet) del(remote string) {
+ ss.mu.Lock()
+ delete(ss.items, remote)
+ ss.mu.Unlock()
+}
+
+// empty returns whether the set has any items
+func (ss *stringSet) empty() bool {
+ ss.mu.RLock()
+ defer ss.mu.RUnlock()
+ return len(ss.items) == 0
+}
+
+// count returns the number of items in the set
+func (ss *stringSet) count() int {
+ ss.mu.RLock()
+ defer ss.mu.RUnlock()
+ return len(ss.items)
+}
+
+// Strings returns all the strings in the stringSet
+func (ss *stringSet) Strings() []string {
+ ss.mu.RLock()
+ defer ss.mu.RUnlock()
+ strings := make([]string, 0, len(ss.items))
+ for name := range ss.items {
+ var out string
+ if acc := Stats.inProgress.get(name); acc != nil {
+ out = acc.String()
+ } else {
+ out = name
+ }
+ strings = append(strings, " * "+out)
+ }
+ sorted := sort.StringSlice(strings)
+ sorted.Sort()
+ return sorted
+}
+
+// String returns all the file names in the stringSet joined by newline
+func (ss *stringSet) String() string {
+ return strings.Join(ss.Strings(), "\n")
+}
+
+// progress returns total bytes read as well as the size.
+func (ss *stringSet) progress() (totalBytes, totalSize int64) {
+ ss.mu.RLock()
+ defer ss.mu.RUnlock()
+ for name := range ss.items {
+ if acc := Stats.inProgress.get(name); acc != nil {
+ bytes, size := acc.progress()
+ if size >= 0 && bytes >= 0 {
+ totalBytes += bytes
+ totalSize += size
+ }
+ }
+ }
+ return totalBytes, totalSize
+}
diff --git a/vendor/github.com/ncw/rclone/fs/accounting/token_bucket.go b/vendor/github.com/ncw/rclone/fs/accounting/token_bucket.go
new file mode 100755
index 0000000..e71091e
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/accounting/token_bucket.go
@@ -0,0 +1,169 @@
+package accounting
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/ncw/rclone/fs/rc"
+ "github.com/pkg/errors"
+ "golang.org/x/time/rate"
+)
+
+// Globals
+var (
+ tokenBucketMu sync.Mutex // protects the token bucket variables
+ tokenBucket *rate.Limiter
+ prevTokenBucket = tokenBucket
+ bwLimitToggledOff = false
+ currLimitMu sync.Mutex // protects changes to the timeslot
+ currLimit fs.BwTimeSlot
+)
+
+const maxBurstSize = 4 * 1024 * 1024 // must be bigger than the biggest request
+
+// make a new empty token bucket with the bandwidth given
+func newTokenBucket(bandwidth fs.SizeSuffix) *rate.Limiter {
+ newTokenBucket := rate.NewLimiter(rate.Limit(bandwidth), maxBurstSize)
+ // empty the bucket
+ err := newTokenBucket.WaitN(context.Background(), maxBurstSize)
+ if err != nil {
+ fs.Errorf(nil, "Failed to empty token bucket: %v", err)
+ }
+ return newTokenBucket
+}
+
+// StartTokenBucket starts the token bucket if necessary
+func StartTokenBucket() {
+ currLimitMu.Lock()
+ currLimit := fs.Config.BwLimit.LimitAt(time.Now())
+ currLimitMu.Unlock()
+
+ if currLimit.Bandwidth > 0 {
+ tokenBucket = newTokenBucket(currLimit.Bandwidth)
+ fs.Infof(nil, "Starting bandwidth limiter at %vBytes/s", &currLimit.Bandwidth)
+
+ // Start the SIGUSR2 signal handler to toggle bandwidth.
+ // This function does nothing in windows systems.
+ startSignalHandler()
+ }
+}
+
+// StartTokenTicker creates a ticker to update the bandwidth limiter every minute.
+func StartTokenTicker() {
+ // If the timetable has a single entry or was not specified, we don't need
+ // a ticker to update the bandwidth.
+ if len(fs.Config.BwLimit) <= 1 {
+ return
+ }
+
+ ticker := time.NewTicker(time.Minute)
+ go func() {
+ for range ticker.C {
+ limitNow := fs.Config.BwLimit.LimitAt(time.Now())
+ currLimitMu.Lock()
+
+ if currLimit.Bandwidth != limitNow.Bandwidth {
+ tokenBucketMu.Lock()
+
+ // If bwlimit is toggled off, the change should only
+ // become active on the next toggle, which causes
+ // an exchange of tokenBucket <-> prevTokenBucket
+ var targetBucket **rate.Limiter
+ if bwLimitToggledOff {
+ targetBucket = &prevTokenBucket
+ } else {
+ targetBucket = &tokenBucket
+ }
+
+ // Set new bandwidth. If unlimited, set tokenbucket to nil.
+ if limitNow.Bandwidth > 0 {
+ *targetBucket = newTokenBucket(limitNow.Bandwidth)
+ if bwLimitToggledOff {
+ fs.Logf(nil, "Scheduled bandwidth change. "+
+ "Limit will be set to %vBytes/s when toggled on again.", &limitNow.Bandwidth)
+ } else {
+ fs.Logf(nil, "Scheduled bandwidth change. Limit set to %vBytes/s", &limitNow.Bandwidth)
+ }
+ } else {
+ *targetBucket = nil
+ fs.Logf(nil, "Scheduled bandwidth change. Bandwidth limits disabled")
+ }
+
+ currLimit = limitNow
+ tokenBucketMu.Unlock()
+ }
+ currLimitMu.Unlock()
+ }
+ }()
+}
+
+// limitBandwith sleeps for the correct amount of time for the passage
+// of n bytes according to the current bandwidth limit
+func limitBandwidth(n int) {
+ tokenBucketMu.Lock()
+
+ // Limit the transfer speed if required
+ if tokenBucket != nil {
+ err := tokenBucket.WaitN(context.Background(), n)
+ if err != nil {
+ fs.Errorf(nil, "Token bucket error: %v", err)
+ }
+ }
+
+ tokenBucketMu.Unlock()
+}
+
+// SetBwLimit sets the current bandwidth limit
+func SetBwLimit(bandwidth fs.SizeSuffix) {
+ tokenBucketMu.Lock()
+ defer tokenBucketMu.Unlock()
+ if bandwidth > 0 {
+ tokenBucket = newTokenBucket(bandwidth)
+ fs.Logf(nil, "Bandwidth limit set to %v", bandwidth)
+ } else {
+ tokenBucket = nil
+ fs.Logf(nil, "Bandwidth limit reset to unlimited")
+ }
+}
+
+// Remote control for the token bucket
+func init() {
+ rc.Add(rc.Call{
+ Path: "core/bwlimit",
+ Fn: func(in rc.Params) (out rc.Params, err error) {
+ ibwlimit, ok := in["rate"]
+ if !ok {
+ return out, errors.Errorf("parameter rate not found")
+ }
+ bwlimit, ok := ibwlimit.(string)
+ if !ok {
+ return out, errors.Errorf("value must be string rate=%v", ibwlimit)
+ }
+ var bws fs.BwTimetable
+ err = bws.Set(bwlimit)
+ if err != nil {
+ return out, errors.Wrap(err, "bad bwlimit")
+ }
+ if len(bws) != 1 {
+ return out, errors.New("need exactly 1 bandwidth setting")
+ }
+ bw := bws[0]
+ SetBwLimit(bw.Bandwidth)
+ return rc.Params{"rate": bw.Bandwidth.String()}, nil
+ },
+ Title: "Set the bandwidth limit.",
+ Help: `
+This sets the bandwidth limit to that passed in.
+
+Eg
+
+ rclone rc core/bwlimit rate=1M
+ rclone rc core/bwlimit rate=off
+
+The format of the parameter is exactly the same as passed to --bwlimit
+except only one bandwidth may be specified.
+`,
+ })
+}
diff --git a/vendor/github.com/ncw/rclone/fs/asyncreader/asyncreader.go b/vendor/github.com/ncw/rclone/fs/asyncreader/asyncreader.go
new file mode 100755
index 0000000..9f7437d
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/asyncreader/asyncreader.go
@@ -0,0 +1,343 @@
+// Package asyncreader provides an asynchronous reader which reads
+// independently of write
+package asyncreader
+
+import (
+ "io"
+ "sync"
+
+ "github.com/ncw/rclone/lib/readers"
+ "github.com/pkg/errors"
+)
+
+const (
+ // BufferSize is the default size of the async buffer
+ BufferSize = 1024 * 1024
+ softStartInitial = 4 * 1024
+)
+
+var asyncBufferPool = sync.Pool{
+ New: func() interface{} { return newBuffer() },
+}
+
+var errorStreamAbandoned = errors.New("stream abandoned")
+
+// AsyncReader will do async read-ahead from the input reader
+// and make the data available as an io.Reader.
+// This should be fully transparent, except that once an error
+// has been returned from the Reader, it will not recover.
+type AsyncReader struct {
+ in io.ReadCloser // Input reader
+ ready chan *buffer // Buffers ready to be handed to the reader
+ token chan struct{} // Tokens which allow a buffer to be taken
+ exit chan struct{} // Closes when finished
+ buffers int // Number of buffers
+ err error // If an error has occurred it is here
+ cur *buffer // Current buffer being served
+ exited chan struct{} // Channel is closed been the async reader shuts down
+ size int // size of buffer to use
+ closed bool // whether we have closed the underlying stream
+ mu sync.Mutex // lock for Read/WriteTo/Abandon/Close
+}
+
+// New returns a reader that will asynchronously read from
+// the supplied Reader into a number of buffers each of size BufferSize
+// It will start reading from the input at once, maybe even before this
+// function has returned.
+// The input can be read from the returned reader.
+// When done use Close to release the buffers and close the supplied input.
+func New(rd io.ReadCloser, buffers int) (*AsyncReader, error) {
+ if buffers <= 0 {
+ return nil, errors.New("number of buffers too small")
+ }
+ if rd == nil {
+ return nil, errors.New("nil reader supplied")
+ }
+ a := &AsyncReader{}
+ a.init(rd, buffers)
+ return a, nil
+}
+
+func (a *AsyncReader) init(rd io.ReadCloser, buffers int) {
+ a.in = rd
+ a.ready = make(chan *buffer, buffers)
+ a.token = make(chan struct{}, buffers)
+ a.exit = make(chan struct{}, 0)
+ a.exited = make(chan struct{}, 0)
+ a.buffers = buffers
+ a.cur = nil
+ a.size = softStartInitial
+
+ // Create tokens
+ for i := 0; i < buffers; i++ {
+ a.token <- struct{}{}
+ }
+
+ // Start async reader
+ go func() {
+ // Ensure that when we exit this is signalled.
+ defer close(a.exited)
+ defer close(a.ready)
+ for {
+ select {
+ case <-a.token:
+ b := a.getBuffer()
+ if a.size < BufferSize {
+ b.buf = b.buf[:a.size]
+ a.size <<= 1
+ }
+ err := b.read(a.in)
+ a.ready <- b
+ if err != nil {
+ return
+ }
+ case <-a.exit:
+ return
+ }
+ }
+ }()
+}
+
+// return the buffer to the pool (clearing it)
+func (a *AsyncReader) putBuffer(b *buffer) {
+ b.clear()
+ asyncBufferPool.Put(b)
+}
+
+// get a buffer from the pool
+func (a *AsyncReader) getBuffer() *buffer {
+ b := asyncBufferPool.Get().(*buffer)
+ return b
+}
+
+// Read will return the next available data.
+func (a *AsyncReader) fill() (err error) {
+ if a.cur.isEmpty() {
+ if a.cur != nil {
+ a.putBuffer(a.cur)
+ a.token <- struct{}{}
+ a.cur = nil
+ }
+ b, ok := <-a.ready
+ if !ok {
+ // Return an error to show fill failed
+ if a.err == nil {
+ return errorStreamAbandoned
+ }
+ return a.err
+ }
+ a.cur = b
+ }
+ return nil
+}
+
+// Read will return the next available data.
+func (a *AsyncReader) Read(p []byte) (n int, err error) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ // Swap buffer and maybe return error
+ err = a.fill()
+ if err != nil {
+ return 0, err
+ }
+
+ // Copy what we can
+ n = copy(p, a.cur.buffer())
+ a.cur.increment(n)
+
+ // If at end of buffer, return any error, if present
+ if a.cur.isEmpty() {
+ a.err = a.cur.err
+ return n, a.err
+ }
+ return n, nil
+}
+
+// WriteTo writes data to w until there's no more data to write or when an error occurs.
+// The return value n is the number of bytes written.
+// Any error encountered during the write is also returned.
+func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ n = 0
+ for {
+ err = a.fill()
+ if err != nil {
+ return n, err
+ }
+ n2, err := w.Write(a.cur.buffer())
+ a.cur.increment(n2)
+ n += int64(n2)
+ if err != nil {
+ return n, err
+ }
+ if a.cur.err != nil {
+ a.err = a.cur.err
+ return n, a.cur.err
+ }
+ }
+}
+
+// SkipBytes will try to seek 'skip' bytes relative to the current position.
+// On success it returns true. If 'skip' is outside the current buffer data or
+// an error occurs, Abandon is called and false is returned.
+func (a *AsyncReader) SkipBytes(skip int) (ok bool) {
+ a.mu.Lock()
+ defer func() {
+ a.mu.Unlock()
+ if !ok {
+ a.Abandon()
+ }
+ }()
+
+ if a.err != nil {
+ return false
+ }
+ if skip < 0 {
+ // seek backwards if skip is inside current buffer
+ if a.cur != nil && a.cur.offset+skip >= 0 {
+ a.cur.offset += skip
+ return true
+ }
+ return false
+ }
+ // early return if skip is past the maximum buffer capacity
+ if skip >= (len(a.ready)+1)*BufferSize {
+ return false
+ }
+
+ refillTokens := 0
+ for {
+ if a.cur.isEmpty() {
+ if a.cur != nil {
+ a.putBuffer(a.cur)
+ refillTokens++
+ a.cur = nil
+ }
+ select {
+ case b, ok := <-a.ready:
+ if !ok {
+ return false
+ }
+ a.cur = b
+ default:
+ return false
+ }
+ }
+
+ n := len(a.cur.buffer())
+ if n > skip {
+ n = skip
+ }
+ a.cur.increment(n)
+ skip -= n
+ if skip == 0 {
+ for ; refillTokens > 0; refillTokens-- {
+ a.token <- struct{}{}
+ }
+ // If at end of buffer, store any error, if present
+ if a.cur.isEmpty() && a.cur.err != nil {
+ a.err = a.cur.err
+ }
+ return true
+ }
+ if a.cur.err != nil {
+ a.err = a.cur.err
+ return false
+ }
+ }
+}
+
+// Abandon will ensure that the underlying async reader is shut down.
+// It will NOT close the input supplied on New.
+func (a *AsyncReader) Abandon() {
+ select {
+ case <-a.exit:
+ // Do nothing if reader routine already exited
+ return
+ default:
+ }
+ // Close and wait for go routine
+ close(a.exit)
+ <-a.exited
+ // take the lock to wait for Read/WriteTo to complete
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ // Return any outstanding buffers to the Pool
+ if a.cur != nil {
+ a.putBuffer(a.cur)
+ a.cur = nil
+ }
+ for b := range a.ready {
+ a.putBuffer(b)
+ }
+}
+
+// Close will ensure that the underlying async reader is shut down.
+// It will also close the input supplied on New.
+func (a *AsyncReader) Close() (err error) {
+ a.Abandon()
+ if a.closed {
+ return nil
+ }
+ a.closed = true
+ return a.in.Close()
+}
+
+// Internal buffer
+// If an error is present, it must be returned
+// once all buffer content has been served.
+type buffer struct {
+ buf []byte
+ err error
+ offset int
+}
+
+func newBuffer() *buffer {
+ return &buffer{
+ buf: make([]byte, BufferSize),
+ err: nil,
+ }
+}
+
+// clear returns the buffer to its full size and clears the members
+func (b *buffer) clear() {
+ b.buf = b.buf[:cap(b.buf)]
+ b.err = nil
+ b.offset = 0
+}
+
+// isEmpty returns true is offset is at end of
+// buffer, or
+func (b *buffer) isEmpty() bool {
+ if b == nil {
+ return true
+ }
+ if len(b.buf)-b.offset <= 0 {
+ return true
+ }
+ return false
+}
+
+// read into start of the buffer from the supplied reader,
+// resets the offset and updates the size of the buffer.
+// Any error encountered during the read is returned.
+func (b *buffer) read(rd io.Reader) error {
+ var n int
+ n, b.err = readers.ReadFill(rd, b.buf)
+ b.buf = b.buf[0:n]
+ b.offset = 0
+ return b.err
+}
+
+// Return the buffer at current offset
+func (b *buffer) buffer() []byte {
+ return b.buf[b.offset:]
+}
+
+// increment the offset
+func (b *buffer) increment(n int) {
+ b.offset += n
+}
diff --git a/vendor/github.com/ncw/rclone/fs/bwtimetable.go b/vendor/github.com/ncw/rclone/fs/bwtimetable.go
new file mode 100755
index 0000000..f4f2c06
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/bwtimetable.go
@@ -0,0 +1,214 @@
+package fs
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+// BwTimeSlot represents a bandwidth configuration at a point in time.
+type BwTimeSlot struct {
+ DayOfTheWeek int
+ HHMM int
+ Bandwidth SizeSuffix
+}
+
+// BwTimetable contains all configured time slots.
+type BwTimetable []BwTimeSlot
+
+// String returns a printable representation of BwTimetable.
+func (x BwTimetable) String() string {
+ ret := []string{}
+ for _, ts := range x {
+ ret = append(ret, fmt.Sprintf("%s-%04.4d,%s", time.Weekday(ts.DayOfTheWeek), ts.HHMM, ts.Bandwidth.String()))
+ }
+ return strings.Join(ret, " ")
+}
+
+// Basic hour format checking
+func validateHour(HHMM string) error {
+ if len(HHMM) != 5 {
+ return errors.Errorf("invalid time specification (hh:mm): %q", HHMM)
+ }
+ hh, err := strconv.Atoi(HHMM[0:2])
+ if err != nil {
+ return errors.Errorf("invalid hour in time specification %q: %v", HHMM, err)
+ }
+ if hh < 0 || hh > 23 {
+ return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh)
+ }
+ mm, err := strconv.Atoi(HHMM[3:])
+ if err != nil {
+ return errors.Errorf("invalid minute in time specification: %q: %v", HHMM, err)
+ }
+ if mm < 0 || mm > 59 {
+ return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh)
+ }
+ return nil
+}
+
+// Basic weekday format checking
+func parseWeekday(dayOfWeek string) (int, error) {
+ dayOfWeek = strings.ToLower(dayOfWeek)
+ if dayOfWeek == "sun" || dayOfWeek == "sunday" {
+ return 0, nil
+ }
+ if dayOfWeek == "mon" || dayOfWeek == "monday" {
+ return 1, nil
+ }
+ if dayOfWeek == "tue" || dayOfWeek == "tuesday" {
+ return 2, nil
+ }
+ if dayOfWeek == "wed" || dayOfWeek == "wednesday" {
+ return 3, nil
+ }
+ if dayOfWeek == "thu" || dayOfWeek == "thursday" {
+ return 4, nil
+ }
+ if dayOfWeek == "fri" || dayOfWeek == "friday" {
+ return 5, nil
+ }
+ if dayOfWeek == "sat" || dayOfWeek == "saturday" {
+ return 6, nil
+ }
+ return 0, errors.Errorf("invalid weekday: %q", dayOfWeek)
+}
+
+// Set the bandwidth timetable.
+func (x *BwTimetable) Set(s string) error {
+ // The timetable is formatted as:
+ // "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,banwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off"
+ // If only a single bandwidth identifier is provided, we assume constant bandwidth.
+
+ if len(s) == 0 {
+ return errors.New("empty string")
+ }
+ // Single value without time specification.
+ if !strings.Contains(s, " ") && !strings.Contains(s, ",") {
+ ts := BwTimeSlot{}
+ if err := ts.Bandwidth.Set(s); err != nil {
+ return err
+ }
+ ts.DayOfTheWeek = 0
+ ts.HHMM = 0
+ *x = BwTimetable{ts}
+ return nil
+ }
+
+ for _, tok := range strings.Split(s, " ") {
+ tv := strings.Split(tok, ",")
+
+ // Format must be dayOfWeek-HH:MM,BW
+ if len(tv) != 2 {
+ return errors.Errorf("invalid time/bandwidth specification: %q", tok)
+ }
+
+ weekday := 0
+ HHMM := ""
+ if !strings.Contains(tv[0], "-") {
+ HHMM = tv[0]
+ if err := validateHour(HHMM); err != nil {
+ return err
+ }
+ for i := 0; i < 7; i++ {
+ hh, _ := strconv.Atoi(HHMM[0:2])
+ mm, _ := strconv.Atoi(HHMM[3:])
+ ts := BwTimeSlot{
+ DayOfTheWeek: i,
+ HHMM: (hh * 100) + mm,
+ }
+ if err := ts.Bandwidth.Set(tv[1]); err != nil {
+ return err
+ }
+ *x = append(*x, ts)
+ }
+ } else {
+ timespec := strings.Split(tv[0], "-")
+ if len(timespec) != 2 {
+ return errors.Errorf("invalid time specification: %q", tv[0])
+ }
+ var err error
+ weekday, err = parseWeekday(timespec[0])
+ if err != nil {
+ return err
+ }
+ HHMM = timespec[1]
+ if err := validateHour(HHMM); err != nil {
+ return err
+ }
+
+ hh, _ := strconv.Atoi(HHMM[0:2])
+ mm, _ := strconv.Atoi(HHMM[3:])
+ ts := BwTimeSlot{
+ DayOfTheWeek: weekday,
+ HHMM: (hh * 100) + mm,
+ }
+ // Bandwidth limit for this time slot.
+ if err := ts.Bandwidth.Set(tv[1]); err != nil {
+ return err
+ }
+ *x = append(*x, ts)
+ }
+ }
+ return nil
+}
+
+// Difference in minutes between lateDayOfWeekHHMM and earlyDayOfWeekHHMM
+func timeDiff(lateDayOfWeekHHMM int, earlyDayOfWeekHHMM int) int {
+
+ lateTimeMinutes := (lateDayOfWeekHHMM / 10000) * 24 * 60
+ lateTimeMinutes += ((lateDayOfWeekHHMM / 100) % 100) * 60
+ lateTimeMinutes += lateDayOfWeekHHMM % 100
+
+ earlyTimeMinutes := (earlyDayOfWeekHHMM / 10000) * 24 * 60
+ earlyTimeMinutes += ((earlyDayOfWeekHHMM / 100) % 100) * 60
+ earlyTimeMinutes += earlyDayOfWeekHHMM % 100
+
+ return lateTimeMinutes - earlyTimeMinutes
+}
+
+// LimitAt returns a BwTimeSlot for the time requested.
+func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot {
+ // If the timetable is empty, we return an unlimited BwTimeSlot starting at Sunday midnight.
+ if len(x) == 0 {
+ return BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: -1}
+ }
+
+ dayOfWeekHHMM := int(tt.Weekday())*10000 + tt.Hour()*100 + tt.Minute()
+
+ // By default, we return the last element in the timetable. This
+ // satisfies two conditions: 1) If there's only one element it
+ // will always be selected, and 2) The last element of the table
+ // will "wrap around" until overridden by an earlier time slot.
+ // there's only one time slot in the timetable.
+ ret := x[len(x)-1]
+ mindif := 0
+ first := true
+
+ // Look for most recent time slot.
+ for _, ts := range x {
+ // Ignore the past
+ if dayOfWeekHHMM < (ts.DayOfTheWeek*10000)+ts.HHMM {
+ continue
+ }
+ dif := timeDiff(dayOfWeekHHMM, (ts.DayOfTheWeek*10000)+ts.HHMM)
+ if first {
+ mindif = dif
+ first = false
+ }
+ if dif <= mindif {
+ mindif = dif
+ ret = ts
+ }
+ }
+
+ return ret
+}
+
+// Type of the value
+func (x BwTimetable) Type() string {
+ return "BwTimetable"
+}
diff --git a/vendor/github.com/ncw/rclone/fs/config.go b/vendor/github.com/ncw/rclone/fs/config.go
new file mode 100755
index 0000000..cf608b2
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/config.go
@@ -0,0 +1,131 @@
+package fs
+
+import (
+ "net"
+ "strings"
+ "time"
+)
+
+// Global
+var (
+ // Config is the global config
+ Config = NewConfig()
+
+ // Read a value from the config file
+ //
+ // This is a function pointer to decouple the config
+ // implementation from the fs
+ ConfigFileGet = func(section, key string) (string, bool) { return "", false }
+
+ // Set a value into the config file
+ //
+ // This is a function pointer to decouple the config
+ // implementation from the fs
+ ConfigFileSet = func(section, key, value string) {
+ Errorf(nil, "No config handler to set %q = %q in section %q of the config file", key, value, section)
+ }
+
+ // CountError counts an error. If any errors have been
+ // counted then it will exit with a non zero error code.
+ //
+ // This is a function pointer to decouple the config
+ // implementation from the fs
+ CountError = func(err error) {}
+
+ // ConfigProvider is the config key used for provider options
+ ConfigProvider = "provider"
+)
+
+// ConfigInfo is filesystem config options
+type ConfigInfo struct {
+ LogLevel LogLevel
+ StatsLogLevel LogLevel
+ DryRun bool
+ CheckSum bool
+ SizeOnly bool
+ IgnoreTimes bool
+ IgnoreExisting bool
+ IgnoreErrors bool
+ ModifyWindow time.Duration
+ Checkers int
+ Transfers int
+ ConnectTimeout time.Duration // Connect timeout
+ Timeout time.Duration // Data channel timeout
+ Dump DumpFlags
+ InsecureSkipVerify bool // Skip server certificate verification
+ DeleteMode DeleteMode
+ MaxDelete int64
+ TrackRenames bool // Track file renames.
+ LowLevelRetries int
+ UpdateOlder bool // Skip files that are newer on the destination
+ NoGzip bool // Disable compression
+ MaxDepth int
+ IgnoreSize bool
+ IgnoreChecksum bool
+ NoUpdateModTime bool
+ DataRateUnit string
+ BackupDir string
+ Suffix string
+ UseListR bool
+ BufferSize SizeSuffix
+ BwLimit BwTimetable
+ TPSLimit float64
+ TPSLimitBurst int
+ BindAddr net.IP
+ DisableFeatures []string
+ UserAgent string
+ Immutable bool
+ AutoConfirm bool
+ StreamingUploadCutoff SizeSuffix
+ StatsFileNameLength int
+ AskPassword bool
+ UseServerModTime bool
+ MaxTransfer SizeSuffix
+ MaxBacklog int
+ StatsOneLine bool
+ Progress bool
+}
+
+// NewConfig creates a new config with everything set to the default
+// value. These are the ultimate defaults and are overriden by the
+// config module.
+func NewConfig() *ConfigInfo {
+ c := new(ConfigInfo)
+
+ // Set any values which aren't the zero for the type
+ c.LogLevel = LogLevelNotice
+ c.StatsLogLevel = LogLevelInfo
+ c.ModifyWindow = time.Nanosecond
+ c.Checkers = 8
+ c.Transfers = 4
+ c.ConnectTimeout = 60 * time.Second
+ c.Timeout = 5 * 60 * time.Second
+ c.DeleteMode = DeleteModeDefault
+ c.MaxDelete = -1
+ c.LowLevelRetries = 10
+ c.MaxDepth = -1
+ c.DataRateUnit = "bytes"
+ c.BufferSize = SizeSuffix(16 << 20)
+ c.UserAgent = "rclone/" + Version
+ c.StreamingUploadCutoff = SizeSuffix(100 * 1024)
+ c.StatsFileNameLength = 40
+ c.AskPassword = true
+ c.TPSLimitBurst = 1
+ c.MaxTransfer = -1
+ c.MaxBacklog = 10000
+
+ return c
+}
+
+// ConfigToEnv converts an config section and name, eg ("myremote",
+// "ignore-size") into an environment name
+// "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE"
+func ConfigToEnv(section, name string) string {
+ return "RCLONE_CONFIG_" + strings.ToUpper(strings.Replace(section+"_"+name, "-", "_", -1))
+}
+
+// OptionToEnv converts an option name, eg "ignore-size" into an
+// environment name "RCLONE_IGNORE_SIZE"
+func OptionToEnv(name string) string {
+ return "RCLONE_" + strings.ToUpper(strings.Replace(name, "-", "_", -1))
+}
diff --git a/vendor/github.com/ncw/rclone/fs/config/config.go b/vendor/github.com/ncw/rclone/fs/config/config.go
new file mode 100755
index 0000000..6023914
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/config/config.go
@@ -0,0 +1,1346 @@
+// Package config reads, writes and edits the config file and deals with command line flags
+package config
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ mathrand "math/rand"
+ "os"
+ "os/user"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/Unknwon/goconfig"
+ "github.com/ncw/rclone/fs"
+ "github.com/ncw/rclone/fs/accounting"
+ "github.com/ncw/rclone/fs/config/configstruct"
+ "github.com/ncw/rclone/fs/config/obscure"
+ "github.com/ncw/rclone/fs/driveletter"
+ "github.com/ncw/rclone/fs/fshttp"
+ "github.com/ncw/rclone/fs/fspath"
+ "github.com/pkg/errors"
+ "golang.org/x/crypto/nacl/secretbox"
+ "golang.org/x/text/unicode/norm"
+)
+
+const (
+ configFileName = "rclone.conf"
+ hiddenConfigFileName = "." + configFileName
+
+ // ConfigToken is the key used to store the token under
+ ConfigToken = "token"
+
+ // ConfigClientID is the config key used to store the client id
+ ConfigClientID = "client_id"
+
+ // ConfigClientSecret is the config key used to store the client secret
+ ConfigClientSecret = "client_secret"
+
+ // ConfigAuthURL is the config key used to store the auth server endpoint
+ ConfigAuthURL = "auth_url"
+
+ // ConfigTokenURL is the config key used to store the token server endpoint
+ ConfigTokenURL = "token_url"
+
+ // ConfigAutomatic indicates that we want non-interactive configuration
+ ConfigAutomatic = "config_automatic"
+)
+
+// Global
+var (
+ // configFile is the global config data structure. Don't read it directly, use getConfigData()
+ configFile *goconfig.ConfigFile
+
+ // ConfigPath points to the config file
+ ConfigPath = makeConfigPath()
+
+ // CacheDir points to the cache directory. Users of this
+ // should make a subdirectory and use MkdirAll() to create it
+ // and any parents.
+ CacheDir = makeCacheDir()
+
+ // Key to use for password en/decryption.
+ // When nil, no encryption will be used for saving.
+ configKey []byte
+
+ // output of prompt for password
+ PasswordPromptOutput = os.Stderr
+
+ // If set to true, the configKey is obscured with obscure.Obscure and saved to a temp file when it is
+ // calculated from the password. The path of that temp file is then written to the environment variable
+ // `_RCLONE_CONFIG_KEY_FILE`. If `_RCLONE_CONFIG_KEY_FILE` is present, password prompt is skipped and `RCLONE_CONFIG_PASS` ignored.
+ // For security reasons, the temp file is deleted once the configKey is successfully loaded.
+ // This can be used to pass the configKey to a child process.
+ PassConfigKeyForDaemonization = false
+)
+
+func init() {
+ // Set the function pointers up in fs
+ fs.ConfigFileGet = FileGetFlag
+ fs.ConfigFileSet = FileSet
+}
+
+func getConfigData() *goconfig.ConfigFile {
+ if configFile == nil {
+ LoadConfig()
+ }
+ return configFile
+}
+
+// Return the path to the configuration file
+func makeConfigPath() string {
+ // Find user's home directory
+ usr, err := user.Current()
+ var homedir string
+ if err == nil {
+ homedir = usr.HomeDir
+ } else {
+ // Fall back to reading $HOME - work around user.Current() not
+ // working for cross compiled binaries on OSX.
+ // https://github.com/golang/go/issues/6376
+ homedir = os.Getenv("HOME")
+ }
+
+ // Possibly find the user's XDG config paths
+ // See XDG Base Directory specification
+ // https://specifications.freedesktop.org/basedir-spec/latest/
+ xdgdir := os.Getenv("XDG_CONFIG_HOME")
+ var xdgcfgdir string
+ if xdgdir != "" {
+ xdgcfgdir = filepath.Join(xdgdir, "rclone")
+ } else if homedir != "" {
+ xdgdir = filepath.Join(homedir, ".config")
+ xdgcfgdir = filepath.Join(xdgdir, "rclone")
+ }
+
+ // Use $XDG_CONFIG_HOME/rclone/rclone.conf if already existing
+ var xdgconf string
+ if xdgcfgdir != "" {
+ xdgconf = filepath.Join(xdgcfgdir, configFileName)
+ _, err := os.Stat(xdgconf)
+ if err == nil {
+ return xdgconf
+ }
+ }
+
+ // Use $HOME/.rclone.conf if already existing
+ var homeconf string
+ if homedir != "" {
+ homeconf = filepath.Join(homedir, hiddenConfigFileName)
+ _, err := os.Stat(homeconf)
+ if err == nil {
+ return homeconf
+ }
+ }
+
+ // Try to create $XDG_CONFIG_HOME/rclone/rclone.conf
+ if xdgconf != "" {
+ // xdgconf != "" implies xdgcfgdir != ""
+ err := os.MkdirAll(xdgcfgdir, os.ModePerm)
+ if err == nil {
+ return xdgconf
+ }
+ }
+
+ // Try to create $HOME/.rclone.conf
+ if homeconf != "" {
+ return homeconf
+ }
+
+ // Check to see if user supplied a --config variable or environment
+ // variable. We can't use pflag for this because it isn't initialised
+ // yet so we search the command line manually.
+ _, configSupplied := os.LookupEnv("RCLONE_CONFIG")
+ for _, item := range os.Args {
+ if item == "--config" || strings.HasPrefix(item, "--config=") {
+ configSupplied = true
+ break
+ }
+ }
+
+ // Default to ./.rclone.conf (current working directory)
+ if !configSupplied {
+ fs.Errorf(nil, "Couldn't find home directory or read HOME or XDG_CONFIG_HOME environment variables.")
+ fs.Errorf(nil, "Defaulting to storing config in current directory.")
+ fs.Errorf(nil, "Use --config flag to workaround.")
+ fs.Errorf(nil, "Error was: %v", err)
+ }
+ return hiddenConfigFileName
+}
+
+// LoadConfig loads the config file
+func LoadConfig() {
+ // Load configuration file.
+ var err error
+ configFile, err = loadConfigFile()
+ if err == errorConfigFileNotFound {
+ fs.Logf(nil, "Config file %q not found - using defaults", ConfigPath)
+ configFile, _ = goconfig.LoadFromReader(&bytes.Buffer{})
+ } else if err != nil {
+ log.Fatalf("Failed to load config file %q: %v", ConfigPath, err)
+ } else {
+ fs.Debugf(nil, "Using config file from %q", ConfigPath)
+ }
+
+ // Start the token bucket limiter
+ accounting.StartTokenBucket()
+
+ // Start the bandwidth update ticker
+ accounting.StartTokenTicker()
+
+ // Start the transactions per second limiter
+ fshttp.StartHTTPTokenBucket()
+}
+
+var errorConfigFileNotFound = errors.New("config file not found")
+
+// loadConfigFile will load a config file, and
+// automatically decrypt it.
+func loadConfigFile() (*goconfig.ConfigFile, error) {
+ b, err := ioutil.ReadFile(ConfigPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, errorConfigFileNotFound
+ }
+ return nil, err
+ }
+
+ // Find first non-empty line
+ r := bufio.NewReader(bytes.NewBuffer(b))
+ for {
+ line, _, err := r.ReadLine()
+ if err != nil {
+ if err == io.EOF {
+ return goconfig.LoadFromReader(bytes.NewBuffer(b))
+ }
+ return nil, err
+ }
+ l := strings.TrimSpace(string(line))
+ if len(l) == 0 || strings.HasPrefix(l, ";") || strings.HasPrefix(l, "#") {
+ continue
+ }
+ // First non-empty or non-comment must be ENCRYPT_V0
+ if l == "RCLONE_ENCRYPT_V0:" {
+ break
+ }
+ if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") {
+ return nil, errors.New("unsupported configuration encryption - update rclone for support")
+ }
+ return goconfig.LoadFromReader(bytes.NewBuffer(b))
+ }
+
+ // Encrypted content is base64 encoded.
+ dec := base64.NewDecoder(base64.StdEncoding, r)
+ box, err := ioutil.ReadAll(dec)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to load base64 encoded data")
+ }
+ if len(box) < 24+secretbox.Overhead {
+ return nil, errors.New("Configuration data too short")
+ }
+ envpw := os.Getenv("RCLONE_CONFIG_PASS")
+
+ var out []byte
+ for {
+ if envKeyFile := os.Getenv("_RCLONE_CONFIG_KEY_FILE"); len(envKeyFile) > 0 {
+ fs.Debugf(nil, "attempting to obtain configKey from temp file %s", envKeyFile)
+ obscuredKey, err := ioutil.ReadFile(envKeyFile)
+ if err != nil {
+ errRemove := os.Remove(envKeyFile)
+ if errRemove != nil {
+ log.Fatalf("unable to read obscured config key and unable to delete the temp file: %v", err)
+ }
+ log.Fatalf("unable to read obscured config key: %v", err)
+ }
+ errRemove := os.Remove(envKeyFile)
+ if errRemove != nil {
+ log.Fatalf("unable to delete temp file with configKey: %v", err)
+ }
+ configKey = []byte(obscure.MustReveal(string(obscuredKey)))
+ fs.Debugf(nil, "using _RCLONE_CONFIG_KEY_FILE for configKey")
+ } else {
+ if len(configKey) == 0 && envpw != "" {
+ err := setConfigPassword(envpw)
+ if err != nil {
+ fmt.Println("Using RCLONE_CONFIG_PASS returned:", err)
+ } else {
+ fs.Debugf(nil, "Using RCLONE_CONFIG_PASS password.")
+ }
+ }
+ if len(configKey) == 0 {
+ if !fs.Config.AskPassword {
+ return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
+ }
+ getConfigPassword("Enter configuration password:")
+ }
+ }
+
+ // Nonce is first 24 bytes of the ciphertext
+ var nonce [24]byte
+ copy(nonce[:], box[:24])
+ var key [32]byte
+ copy(key[:], configKey[:32])
+
+ // Attempt to decrypt
+ var ok bool
+ out, ok = secretbox.Open(nil, box[24:], &nonce, &key)
+ if ok {
+ break
+ }
+
+ // Retry
+ fs.Errorf(nil, "Couldn't decrypt configuration, most likely wrong password.")
+ configKey = nil
+ envpw = ""
+ }
+ return goconfig.LoadFromReader(bytes.NewBuffer(out))
+}
+
+// checkPassword normalises and validates the password
+func checkPassword(password string) (string, error) {
+ if !utf8.ValidString(password) {
+ return "", errors.New("password contains invalid utf8 characters")
+ }
+ // Check for leading/trailing whitespace
+ trimmedPassword := strings.TrimSpace(password)
+ // Warn user if password has leading+trailing whitespace
+ if len(password) != len(trimmedPassword) {
+ _, _ = fmt.Fprintln(os.Stderr, "Your password contains leading/trailing whitespace - in previous versions of rclone this was stripped")
+ }
+ // Normalize to reduce weird variations.
+ password = norm.NFKC.String(password)
+ if len(password) == 0 || len(trimmedPassword) == 0 {
+ return "", errors.New("no characters in password")
+ }
+ return password, nil
+}
+
+// GetPassword asks the user for a password with the prompt given.
+func GetPassword(prompt string) string {
+ _, _ = fmt.Fprintln(PasswordPromptOutput, prompt)
+ for {
+ _, _ = fmt.Fprint(PasswordPromptOutput, "password:")
+ password := ReadPassword()
+ password, err := checkPassword(password)
+ if err == nil {
+ return password
+ }
+ _, _ = fmt.Fprintf(os.Stderr, "Bad password: %v\n", err)
+ }
+}
+
+// ChangePassword will query the user twice for the named password. If
+// the same password is entered it is returned.
+func ChangePassword(name string) string {
+ for {
+ a := GetPassword(fmt.Sprintf("Enter %s password:", name))
+ b := GetPassword(fmt.Sprintf("Confirm %s password:", name))
+ if a == b {
+ return a
+ }
+ fmt.Println("Passwords do not match!")
+ }
+}
+
+// getConfigPassword will query the user for a password the
+// first time it is required.
+func getConfigPassword(q string) {
+ if len(configKey) != 0 {
+ return
+ }
+ for {
+ password := GetPassword(q)
+ err := setConfigPassword(password)
+ if err == nil {
+ return
+ }
+ _, _ = fmt.Fprintln(os.Stderr, "Error:", err)
+ }
+}
+
+// setConfigPassword will set the configKey to the hash of
+// the password. If the length of the password is
+// zero after trimming+normalization, an error is returned.
+func setConfigPassword(password string) error {
+ password, err := checkPassword(password)
+ if err != nil {
+ return err
+ }
+ // Create SHA256 has of the password
+ sha := sha256.New()
+ _, err = sha.Write([]byte("[" + password + "][rclone-config]"))
+ if err != nil {
+ return err
+ }
+ configKey = sha.Sum(nil)
+ if PassConfigKeyForDaemonization {
+ tempFile, err := ioutil.TempFile("", "rclone")
+ if err != nil {
+ log.Fatalf("cannot create temp file to store configKey: %v", err)
+ }
+ _, err = tempFile.WriteString(obscure.MustObscure(string(configKey)))
+ if err != nil {
+ errRemove := os.Remove(tempFile.Name())
+ if errRemove != nil {
+ log.Fatalf("error writing configKey to temp file and also error deleting it: %v", err)
+ }
+ log.Fatalf("error writing configKey to temp file: %v", err)
+ }
+ err = tempFile.Close()
+ if err != nil {
+ errRemove := os.Remove(tempFile.Name())
+ if errRemove != nil {
+ log.Fatalf("error closing temp file with configKey and also error deleting it: %v", err)
+ }
+ log.Fatalf("error closing temp file with configKey: %v", err)
+ }
+ fs.Debugf(nil, "saving configKey to temp file")
+ err = os.Setenv("_RCLONE_CONFIG_KEY_FILE", tempFile.Name())
+ if err != nil {
+ errRemove := os.Remove(tempFile.Name())
+ if errRemove != nil {
+ log.Fatalf("unable to set environment variable _RCLONE_CONFIG_KEY_FILE and unable to delete the temp file: %v", err)
+ }
+ log.Fatalf("unable to set environment variable _RCLONE_CONFIG_KEY_FILE: %v", err)
+ }
+ }
+ return nil
+}
+
+// changeConfigPassword will query the user twice
+// for a password. If the same password is entered
+// twice the key is updated.
+func changeConfigPassword() {
+ err := setConfigPassword(ChangePassword("NEW configuration"))
+ if err != nil {
+ fmt.Printf("Failed to set config password: %v\n", err)
+ return
+ }
+}
+
+// saveConfig saves configuration file.
+// if configKey has been set, the file will be encrypted.
+func saveConfig() error {
+ dir, name := filepath.Split(ConfigPath)
+ f, err := ioutil.TempFile(dir, name)
+ if err != nil {
+ return errors.Errorf("Failed to create temp file for new config: %v", err)
+ }
+ defer func() {
+ if err := os.Remove(f.Name()); err != nil && !os.IsNotExist(err) {
+ fs.Errorf(nil, "Failed to remove temp config file: %v", err)
+ }
+ }()
+
+ var buf bytes.Buffer
+ err = goconfig.SaveConfigData(getConfigData(), &buf)
+ if err != nil {
+ return errors.Errorf("Failed to save config file: %v", err)
+ }
+
+ if len(configKey) == 0 {
+ if _, err := buf.WriteTo(f); err != nil {
+ return errors.Errorf("Failed to write temp config file: %v", err)
+ }
+ } else {
+ _, _ = fmt.Fprintln(f, "# Encrypted rclone configuration File")
+ _, _ = fmt.Fprintln(f, "")
+ _, _ = fmt.Fprintln(f, "RCLONE_ENCRYPT_V0:")
+
+ // Generate new nonce and write it to the start of the ciphertext
+ var nonce [24]byte
+ n, _ := rand.Read(nonce[:])
+ if n != 24 {
+ return errors.Errorf("nonce short read: %d", n)
+ }
+ enc := base64.NewEncoder(base64.StdEncoding, f)
+ _, err = enc.Write(nonce[:])
+ if err != nil {
+ return errors.Errorf("Failed to write temp config file: %v", err)
+ }
+
+ var key [32]byte
+ copy(key[:], configKey[:32])
+
+ b := secretbox.Seal(nil, buf.Bytes(), &nonce, &key)
+ _, err = enc.Write(b)
+ if err != nil {
+ return errors.Errorf("Failed to write temp config file: %v", err)
+ }
+ _ = enc.Close()
+ }
+
+ err = f.Close()
+ if err != nil {
+ return errors.Errorf("Failed to close config file: %v", err)
+ }
+
+ var fileMode os.FileMode = 0600
+ info, err := os.Stat(ConfigPath)
+ if err != nil {
+ fs.Debugf(nil, "Using default permissions for config file: %v", fileMode)
+ } else if info.Mode() != fileMode {
+ fs.Debugf(nil, "Keeping previous permissions for config file: %v", info.Mode())
+ fileMode = info.Mode()
+ }
+
+ attemptCopyGroup(ConfigPath, f.Name())
+
+ err = os.Chmod(f.Name(), fileMode)
+ if err != nil {
+ fs.Errorf(nil, "Failed to set permissions on config file: %v", err)
+ }
+
+ if err = os.Rename(ConfigPath, ConfigPath+".old"); err != nil && !os.IsNotExist(err) {
+ return errors.Errorf("Failed to move previous config to backup location: %v", err)
+ }
+ if err = os.Rename(f.Name(), ConfigPath); err != nil {
+ return errors.Errorf("Failed to move newly written config from %s to final location: %v", f.Name(), err)
+ }
+ if err := os.Remove(ConfigPath + ".old"); err != nil && !os.IsNotExist(err) {
+ fs.Errorf(nil, "Failed to remove backup config file: %v", err)
+ }
+ return nil
+}
+
+// SaveConfig calling function which saves configuration file.
+// if saveConfig returns error trying again after sleep.
+func SaveConfig() {
+ var err error
+ for i := 0; i < fs.Config.LowLevelRetries+1; i++ {
+ if err = saveConfig(); err == nil {
+ return
+ }
+ waitingTimeMs := mathrand.Intn(1000)
+ time.Sleep(time.Duration(waitingTimeMs) * time.Millisecond)
+ }
+ log.Fatalf("Failed to save config after %d tries: %v", fs.Config.LowLevelRetries, err)
+
+ return
+}
+
+// SetValueAndSave sets the key to the value and saves just that
+// value in the config file. It loads the old config file in from
+// disk first and overwrites the given value only.
+func SetValueAndSave(name, key, value string) (err error) {
+ // Set the value in config in case we fail to reload it
+ getConfigData().SetValue(name, key, value)
+ // Reload the config file
+ reloadedConfigFile, err := loadConfigFile()
+ if err == errorConfigFileNotFound {
+ // Config file not written yet so ignore reload
+ return nil
+ } else if err != nil {
+ return err
+ }
+ _, err = reloadedConfigFile.GetSection(name)
+ if err != nil {
+ // Section doesn't exist yet so ignore reload
+ return err
+ }
+ // Update the config file with the reloaded version
+ configFile = reloadedConfigFile
+ // Set the value in the reloaded version
+ reloadedConfigFile.SetValue(name, key, value)
+ // Save it again
+ SaveConfig()
+ return nil
+}
+
+// ShowRemotes shows an overview of the config file
+func ShowRemotes() {
+ remotes := getConfigData().GetSectionList()
+ if len(remotes) == 0 {
+ return
+ }
+ sort.Strings(remotes)
+ fmt.Printf("%-20s %s\n", "Name", "Type")
+ fmt.Printf("%-20s %s\n", "====", "====")
+ for _, remote := range remotes {
+ fmt.Printf("%-20s %s\n", remote, FileGet(remote, "type"))
+ }
+}
+
+// ChooseRemote chooses a remote name
+func ChooseRemote() string {
+ remotes := getConfigData().GetSectionList()
+ sort.Strings(remotes)
+ return Choose("remote", remotes, nil, false)
+}
+
+// ReadLine reads some input
+var ReadLine = func() string {
+ buf := bufio.NewReader(os.Stdin)
+ line, err := buf.ReadString('\n')
+ if err != nil {
+ log.Fatalf("Failed to read line: %v", err)
+ }
+ return strings.TrimSpace(line)
+}
+
+// Command - choose one
+func Command(commands []string) byte {
+ opts := []string{}
+ for _, text := range commands {
+ fmt.Printf("%c) %s\n", text[0], text[1:])
+ opts = append(opts, text[:1])
+ }
+ optString := strings.Join(opts, "")
+ optHelp := strings.Join(opts, "/")
+ for {
+ fmt.Printf("%s> ", optHelp)
+ result := strings.ToLower(ReadLine())
+ if len(result) != 1 {
+ continue
+ }
+ i := strings.Index(optString, string(result[0]))
+ if i >= 0 {
+ return result[0]
+ }
+ }
+}
+
+// ConfirmWithDefault asks the user for Yes or No and returns true or false.
+//
+// If AutoConfirm is set, it will return the Default value passed in
+func ConfirmWithDefault(Default bool) bool {
+ if fs.Config.AutoConfirm {
+ return Default
+ }
+ return Command([]string{"yYes", "nNo"}) == 'y'
+}
+
+// Confirm asks the user for Yes or No and returns true or false
+//
+// If AutoConfirm is set, it will return true
+func Confirm() bool {
+ return ConfirmWithDefault(true)
+}
+
+// Choose one of the defaults or type a new string if newOk is set
+func Choose(what string, defaults, help []string, newOk bool) string {
+ valueDescripton := "an existing"
+ if newOk {
+ valueDescripton = "your own"
+ }
+ fmt.Printf("Choose a number from below, or type in %s value\n", valueDescripton)
+ for i, text := range defaults {
+ var lines []string
+ if help != nil {
+ parts := strings.Split(help[i], "\n")
+ lines = append(lines, parts...)
+ }
+ lines = append(lines, fmt.Sprintf("%q", text))
+ pos := i + 1
+ if len(lines) == 1 {
+ fmt.Printf("%2d > %s\n", pos, text)
+ } else {
+ mid := (len(lines) - 1) / 2
+ for i, line := range lines {
+ var sep rune
+ switch i {
+ case 0:
+ sep = '/'
+ case len(lines) - 1:
+ sep = '\\'
+ default:
+ sep = '|'
+ }
+ number := " "
+ if i == mid {
+ number = fmt.Sprintf("%2d", pos)
+ }
+ fmt.Printf("%s %c %s\n", number, sep, line)
+ }
+ }
+ }
+ for {
+ fmt.Printf("%s> ", what)
+ result := ReadLine()
+ i, err := strconv.Atoi(result)
+ if err != nil {
+ if newOk {
+ return result
+ }
+ for _, v := range defaults {
+ if result == v {
+ return result
+ }
+ }
+ continue
+ }
+ if i >= 1 && i <= len(defaults) {
+ return defaults[i-1]
+ }
+ }
+}
+
+// ChooseNumber asks the user to enter a number between min and max
+// inclusive prompting them with what.
+func ChooseNumber(what string, min, max int) int {
+ for {
+ fmt.Printf("%s> ", what)
+ result := ReadLine()
+ i, err := strconv.Atoi(result)
+ if err != nil {
+ fmt.Printf("Bad number: %v\n", err)
+ continue
+ }
+ if i < min || i > max {
+ fmt.Printf("Out of range - %d to %d inclusive\n", min, max)
+ continue
+ }
+ return i
+ }
+}
+
+// ShowRemote shows the contents of the remote
+func ShowRemote(name string) {
+ fmt.Printf("--------------------\n")
+ fmt.Printf("[%s]\n", name)
+ fs := MustFindByName(name)
+ for _, key := range getConfigData().GetKeyList(name) {
+ isPassword := false
+ for _, option := range fs.Options {
+ if option.Name == key && option.IsPassword {
+ isPassword = true
+ break
+ }
+ }
+ value := FileGet(name, key)
+ if isPassword && value != "" {
+ fmt.Printf("%s = *** ENCRYPTED ***\n", key)
+ } else {
+ fmt.Printf("%s = %s\n", key, value)
+ }
+ }
+ fmt.Printf("--------------------\n")
+}
+
+// OkRemote prints the contents of the remote and ask if it is OK
+func OkRemote(name string) bool {
+ ShowRemote(name)
+ switch i := Command([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}); i {
+ case 'y':
+ return true
+ case 'e':
+ return false
+ case 'd':
+ getConfigData().DeleteSection(name)
+ return true
+ default:
+ fs.Errorf(nil, "Bad choice %c", i)
+ }
+ return false
+}
+
+// MustFindByName finds the RegInfo for the remote name passed in or
+// exits with a fatal error.
+func MustFindByName(name string) *fs.RegInfo {
+ fsType := FileGet(name, "type")
+ if fsType == "" {
+ log.Fatalf("Couldn't find type of fs for %q", name)
+ }
+ return fs.MustFind(fsType)
+}
+
+// RemoteConfig runs the config helper for the remote if needed
+func RemoteConfig(name string) {
+ fmt.Printf("Remote config\n")
+ f := MustFindByName(name)
+ if f.Config != nil {
+ m := fs.ConfigMap(f, name)
+ f.Config(name, m)
+ }
+}
+
+// matchProvider returns true if provider matches the providerConfig string.
+//
+// The providerConfig string can either be a list of providers to
+// match, or if it starts with "!" it will be a list of providers not
+// to match.
+//
+// If either providerConfig or provider is blank then it will return true
+func matchProvider(providerConfig, provider string) bool {
+ if providerConfig == "" || provider == "" {
+ return true
+ }
+ negate := false
+ if strings.HasPrefix(providerConfig, "!") {
+ providerConfig = providerConfig[1:]
+ negate = true
+ }
+ providers := strings.Split(providerConfig, ",")
+ matched := false
+ for _, p := range providers {
+ if p == provider {
+ matched = true
+ break
+ }
+ }
+ if negate {
+ return !matched
+ }
+ return matched
+}
+
+// ChooseOption asks the user to choose an option
+func ChooseOption(o *fs.Option, name string) string {
+ var subProvider = getConfigData().MustValue(name, fs.ConfigProvider, "")
+ fmt.Println(o.Help)
+ if o.IsPassword {
+ actions := []string{"yYes type in my own password", "gGenerate random password"}
+ if !o.Required {
+ actions = append(actions, "nNo leave this optional password blank")
+ }
+ var password string
+ switch i := Command(actions); i {
+ case 'y':
+ password = ChangePassword("the")
+ case 'g':
+ for {
+ fmt.Printf("Password strength in bits.\n64 is just about memorable\n128 is secure\n1024 is the maximum\n")
+ bits := ChooseNumber("Bits", 64, 1024)
+ bytes := bits / 8
+ if bits%8 != 0 {
+ bytes++
+ }
+ var pw = make([]byte, bytes)
+ n, _ := rand.Read(pw)
+ if n != bytes {
+ log.Fatalf("password short read: %d", n)
+ }
+ password = base64.RawURLEncoding.EncodeToString(pw)
+ fmt.Printf("Your password is: %s\n", password)
+ fmt.Printf("Use this password? Please note that an obscured version of this \npassword (and not the " +
+ "password itself) will be stored under your \nconfiguration file, so keep this generated password " +
+ "in a safe place.\n")
+ if Confirm() {
+ break
+ }
+ }
+ case 'n':
+ return ""
+ default:
+ fs.Errorf(nil, "Bad choice %c", i)
+ }
+ return obscure.MustObscure(password)
+ }
+ what := fmt.Sprintf("%T value", o.Default)
+ switch o.Default.(type) {
+ case bool:
+ what = "boolean value (true or false)"
+ case fs.SizeSuffix:
+ what = "size with suffix k,M,G,T"
+ case fs.Duration:
+ what = "duration s,m,h,d,w,M,y"
+ case int, int8, int16, int32, int64:
+ what = "signed integer"
+ case uint, byte, uint16, uint32, uint64:
+ what = "unsigned integer"
+ }
+ var in string
+ for {
+ fmt.Printf("Enter a %s. Press Enter for the default (%q).\n", what, fmt.Sprint(o.Default))
+ if len(o.Examples) > 0 {
+ var values []string
+ var help []string
+ for _, example := range o.Examples {
+ if matchProvider(example.Provider, subProvider) {
+ values = append(values, example.Value)
+ help = append(help, example.Help)
+ }
+ }
+ in = Choose(o.Name, values, help, true)
+ } else {
+ fmt.Printf("%s> ", o.Name)
+ in = ReadLine()
+ }
+ if in == "" {
+ if o.Required && fmt.Sprint(o.Default) == "" {
+ fmt.Printf("This value is required and it has no default.\n")
+ continue
+ }
+ break
+ }
+ newIn, err := configstruct.StringToInterface(o.Default, in)
+ if err != nil {
+ fmt.Printf("Failed to parse %q: %v\n", in, err)
+ continue
+ }
+ in = fmt.Sprint(newIn) // canonicalise
+ break
+ }
+ return in
+}
+
+// UpdateRemote adds the keyValues passed in to the remote of name.
+// keyValues should be key, value pairs.
+func UpdateRemote(name string, keyValues []string) error {
+ if len(keyValues)%2 != 0 {
+ return errors.New("found key without value")
+ }
+ // Set the config
+ for i := 0; i < len(keyValues); i += 2 {
+ getConfigData().SetValue(name, keyValues[i], keyValues[i+1])
+ }
+ RemoteConfig(name)
+ ShowRemote(name)
+ SaveConfig()
+ return nil
+}
+
+// CreateRemote creates a new remote with name, provider and a list of
+// parameters which are key, value pairs. If update is set then it
+// adds the new keys rather than replacing all of them.
+func CreateRemote(name string, provider string, keyValues []string) error {
+ // Suppress Confirm
+ fs.Config.AutoConfirm = true
+ // Delete the old config if it exists
+ getConfigData().DeleteSection(name)
+ // Set the type
+ getConfigData().SetValue(name, "type", provider)
+ // Show this is automatically configured
+ getConfigData().SetValue(name, ConfigAutomatic, "yes")
+ // Set the remaining values
+ return UpdateRemote(name, keyValues)
+}
+
+// PasswordRemote adds the keyValues passed in to the remote of name.
+// keyValues should be key, value pairs.
+func PasswordRemote(name string, keyValues []string) error {
+ if len(keyValues) != 2 {
+ return errors.New("found key without value")
+ }
+ // Suppress Confirm
+ fs.Config.AutoConfirm = true
+ passwd := obscure.MustObscure(keyValues[1])
+ if passwd != "" {
+ getConfigData().SetValue(name, keyValues[0], passwd)
+ RemoteConfig(name)
+ ShowRemote(name)
+ SaveConfig()
+ }
+ return nil
+}
+
+// JSONListProviders prints all the providers and options in JSON format
+func JSONListProviders() error {
+ b, err := json.MarshalIndent(fs.Registry, "", " ")
+ if err != nil {
+ return errors.Wrap(err, "failed to marshal examples")
+ }
+ _, err = os.Stdout.Write(b)
+ if err != nil {
+ return errors.Wrap(err, "failed to write providers list")
+ }
+ return nil
+}
+
+// fsOption returns an Option describing the possible remotes
+func fsOption() *fs.Option {
+ o := &fs.Option{
+ Name: "Storage",
+ Help: "Type of storage to configure.",
+ Default: "",
+ }
+ for _, item := range fs.Registry {
+ example := fs.OptionExample{
+ Value: item.Name,
+ Help: item.Description,
+ }
+ o.Examples = append(o.Examples, example)
+ }
+ o.Examples.Sort()
+ return o
+}
+
+// NewRemoteName asks the user for a name for a remote
+func NewRemoteName() (name string) {
+ for {
+ fmt.Printf("name> ")
+ name = ReadLine()
+ parts := fspath.Matcher.FindStringSubmatch(name + ":")
+ switch {
+ case name == "":
+ fmt.Printf("Can't use empty name.\n")
+ case driveletter.IsDriveLetter(name):
+ fmt.Printf("Can't use %q as it can be confused with a drive letter.\n", name)
+ case parts == nil:
+ fmt.Printf("Can't use %q as it has invalid characters in it.\n", name)
+ default:
+ return name
+ }
+ }
+}
+
+// editOptions edits the options. If new is true then it just allows
+// entry and doesn't show any old values.
+func editOptions(ri *fs.RegInfo, name string, isNew bool) {
+ hasAdvanced := false
+ for _, advanced := range []bool{false, true} {
+ if advanced {
+ if !hasAdvanced {
+ break
+ }
+ fmt.Printf("Edit advanced config? (y/n)\n")
+ if !Confirm() {
+ break
+ }
+ }
+ for _, option := range ri.Options {
+ hasAdvanced = hasAdvanced || option.Advanced
+ if option.Advanced != advanced {
+ continue
+ }
+ subProvider := getConfigData().MustValue(name, fs.ConfigProvider, "")
+ if matchProvider(option.Provider, subProvider) {
+ if !isNew {
+ fmt.Printf("Value %q = %q\n", option.Name, FileGet(name, option.Name))
+ fmt.Printf("Edit? (y/n)>\n")
+ if !Confirm() {
+ continue
+ }
+ }
+ FileSet(name, option.Name, ChooseOption(&option, name))
+ }
+ }
+ }
+}
+
+// NewRemote make a new remote from its name
+func NewRemote(name string) {
+ var (
+ newType string
+ ri *fs.RegInfo
+ err error
+ )
+
+ // Set the type first
+ for {
+ newType = ChooseOption(fsOption(), name)
+ ri, err = fs.Find(newType)
+ if err != nil {
+ fmt.Printf("Bad remote %q: %v\n", newType, err)
+ continue
+ }
+ break
+ }
+ getConfigData().SetValue(name, "type", newType)
+
+ editOptions(ri, name, true)
+ RemoteConfig(name)
+ if OkRemote(name) {
+ SaveConfig()
+ return
+ }
+ EditRemote(ri, name)
+}
+
+// EditRemote gets the user to edit a remote
+func EditRemote(ri *fs.RegInfo, name string) {
+ ShowRemote(name)
+ fmt.Printf("Edit remote\n")
+ for {
+ editOptions(ri, name, false)
+ if OkRemote(name) {
+ break
+ }
+ }
+ SaveConfig()
+ RemoteConfig(name)
+}
+
+// DeleteRemote gets the user to delete a remote
+func DeleteRemote(name string) {
+ getConfigData().DeleteSection(name)
+ SaveConfig()
+}
+
+// copyRemote asks the user for a new remote name and copies name into
+// it. Returns the new name.
+func copyRemote(name string) string {
+ newName := NewRemoteName()
+ // Copy the keys
+ for _, key := range getConfigData().GetKeyList(name) {
+ value := getConfigData().MustValue(name, key, "")
+ getConfigData().SetValue(newName, key, value)
+ }
+ return newName
+}
+
+// RenameRemote renames a config section
+func RenameRemote(name string) {
+ fmt.Printf("Enter new name for %q remote.\n", name)
+ newName := copyRemote(name)
+ if name != newName {
+ getConfigData().DeleteSection(name)
+ SaveConfig()
+ }
+}
+
+// CopyRemote copies a config section
+func CopyRemote(name string) {
+ fmt.Printf("Enter name for copy of %q remote.\n", name)
+ copyRemote(name)
+ SaveConfig()
+}
+
+// ShowConfigLocation prints the location of the config file in use
+func ShowConfigLocation() {
+ if _, err := os.Stat(ConfigPath); os.IsNotExist(err) {
+ fmt.Println("Configuration file doesn't exist, but rclone will use this path:")
+ } else {
+ fmt.Println("Configuration file is stored at:")
+ }
+ fmt.Printf("%s\n", ConfigPath)
+}
+
+// ShowConfig prints the (unencrypted) config options
+func ShowConfig() {
+ var buf bytes.Buffer
+ if err := goconfig.SaveConfigData(getConfigData(), &buf); err != nil {
+ log.Fatalf("Failed to serialize config: %v", err)
+ }
+ str := buf.String()
+ if str == "" {
+ str = "; empty config\n"
+ }
+ fmt.Printf("%s", str)
+}
+
+// EditConfig edits the config file interactively
+func EditConfig() {
+ for {
+ haveRemotes := len(getConfigData().GetSectionList()) != 0
+ what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "rRename remote", "cCopy remote", "sSet configuration password", "qQuit config"}
+ if haveRemotes {
+ fmt.Printf("Current remotes:\n\n")
+ ShowRemotes()
+ fmt.Printf("\n")
+ } else {
+ fmt.Printf("No remotes found - make a new one\n")
+ // take 2nd item and last 2 items of menu list
+ what = append(what[1:2], what[len(what)-2:]...)
+ }
+ switch i := Command(what); i {
+ case 'e':
+ name := ChooseRemote()
+ fs := MustFindByName(name)
+ EditRemote(fs, name)
+ case 'n':
+ NewRemote(NewRemoteName())
+ case 'd':
+ name := ChooseRemote()
+ DeleteRemote(name)
+ case 'r':
+ RenameRemote(ChooseRemote())
+ case 'c':
+ CopyRemote(ChooseRemote())
+ case 's':
+ SetPassword()
+ case 'q':
+ return
+
+ }
+ }
+}
+
+// SetPassword will allow the user to modify the current
+// configuration encryption settings.
+func SetPassword() {
+ for {
+ if len(configKey) > 0 {
+ fmt.Println("Your configuration is encrypted.")
+ what := []string{"cChange Password", "uUnencrypt configuration", "qQuit to main menu"}
+ switch i := Command(what); i {
+ case 'c':
+ changeConfigPassword()
+ SaveConfig()
+ fmt.Println("Password changed")
+ continue
+ case 'u':
+ configKey = nil
+ SaveConfig()
+ continue
+ case 'q':
+ return
+ }
+
+ } else {
+ fmt.Println("Your configuration is not encrypted.")
+ fmt.Println("If you add a password, you will protect your login information to cloud services.")
+ what := []string{"aAdd Password", "qQuit to main menu"}
+ switch i := Command(what); i {
+ case 'a':
+ changeConfigPassword()
+ SaveConfig()
+ fmt.Println("Password set")
+ continue
+ case 'q':
+ return
+ }
+ }
+ }
+}
+
+// Authorize is for remote authorization of headless machines.
+//
+// It expects 1 or 3 arguments
+//
+// rclone authorize "fs name"
+// rclone authorize "fs name" "client id" "client secret"
+func Authorize(args []string) {
+ switch len(args) {
+ case 1, 3:
+ default:
+ log.Fatalf("Invalid number of arguments: %d", len(args))
+ }
+ newType := args[0]
+ f := fs.MustFind(newType)
+ if f.Config == nil {
+ log.Fatalf("Can't authorize fs %q", newType)
+ }
+ // Name used for temporary fs
+ name := "**temp-fs**"
+
+ // Make sure we delete it
+ defer DeleteRemote(name)
+
+ // Indicate that we want fully automatic configuration.
+ getConfigData().SetValue(name, ConfigAutomatic, "yes")
+ if len(args) == 3 {
+ getConfigData().SetValue(name, ConfigClientID, args[1])
+ getConfigData().SetValue(name, ConfigClientSecret, args[2])
+ }
+ m := fs.ConfigMap(f, name)
+ f.Config(name, m)
+}
+
+// FileGetFlag gets the config key under section returning the
+// the value and true if found and or ("", false) otherwise
+func FileGetFlag(section, key string) (string, bool) {
+ newValue, err := getConfigData().GetValue(section, key)
+ return newValue, err == nil
+}
+
+// FileGet gets the config key under section returning the
+// default or empty string if not set.
+//
+// It looks up defaults in the environment if they are present
+func FileGet(section, key string, defaultVal ...string) string {
+ envKey := fs.ConfigToEnv(section, key)
+ newValue, found := os.LookupEnv(envKey)
+ if found {
+ defaultVal = []string{newValue}
+ }
+ return getConfigData().MustValue(section, key, defaultVal...)
+}
+
+// FileSet sets the key in section to value. It doesn't save
+// the config file.
+func FileSet(section, key, value string) {
+ if value != "" {
+ getConfigData().SetValue(section, key, value)
+ } else {
+ FileDeleteKey(section, key)
+ }
+}
+
+// FileDeleteKey deletes the config key in the config file.
+// It returns true if the key was deleted,
+// or returns false if the section or key didn't exist.
+func FileDeleteKey(section, key string) bool {
+ return getConfigData().DeleteKey(section, key)
+}
+
+var matchEnv = regexp.MustCompile(`^RCLONE_CONFIG_(.*?)_TYPE=.*$`)
+
+// FileSections returns the sections in the config file
+// including any defined by environment variables.
+func FileSections() []string {
+ sections := getConfigData().GetSectionList()
+ for _, item := range os.Environ() {
+ matches := matchEnv.FindStringSubmatch(item)
+ if len(matches) == 2 {
+ sections = append(sections, strings.ToLower(matches[1]))
+ }
+ }
+ return sections
+}
+
+// Dump dumps all the config as a JSON file
+func Dump() error {
+ dump := make(map[string]map[string]string)
+ for _, name := range getConfigData().GetSectionList() {
+ params := make(map[string]string)
+ for _, key := range getConfigData().GetKeyList(name) {
+ params[key] = FileGet(name, key)
+ }
+ dump[name] = params
+ }
+ b, err := json.MarshalIndent(dump, "", " ")
+ if err != nil {
+ return errors.Wrap(err, "failed to marshal config dump")
+ }
+ _, err = os.Stdout.Write(b)
+ if err != nil {
+ return errors.Wrap(err, "failed to write config dump")
+ }
+ return nil
+}
+
+// makeCacheDir returns a directory to use for caching.
+//
+// Code borrowed from go stdlib until it is made public
+func makeCacheDir() (dir string) {
+ // Compute default location.
+ switch runtime.GOOS {
+ case "windows":
+ dir = os.Getenv("LocalAppData")
+
+ case "darwin":
+ dir = os.Getenv("HOME")
+ if dir != "" {
+ dir += "/Library/Caches"
+ }
+
+ case "plan9":
+ dir = os.Getenv("home")
+ if dir != "" {
+ // Plan 9 has no established per-user cache directory,
+ // but $home/lib/xyz is the usual equivalent of $HOME/.xyz on Unix.
+ dir += "/lib/cache"
+ }
+
+ default: // Unix
+ // https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+ dir = os.Getenv("XDG_CACHE_HOME")
+ if dir == "" {
+ dir = os.Getenv("HOME")
+ if dir != "" {
+ dir += "/.cache"
+ }
+ }
+ }
+
+ // if no dir found then use TempDir - we will have a cachedir!
+ if dir == "" {
+ dir = os.TempDir()
+ }
+ return filepath.Join(dir, "rclone")
+}
diff --git a/vendor/github.com/ncw/rclone/fs/config/config_other.go b/vendor/github.com/ncw/rclone/fs/config/config_other.go
new file mode 100755
index 0000000..e9024a8
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/config/config_other.go
@@ -0,0 +1,10 @@
+// Read, write and edit the config file
+// Non-unix specific functions.
+
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
+
+package config
+
+// attemptCopyGroups tries to keep the group the same, which only makes sense
+// for system with user-group-world permission model.
+func attemptCopyGroup(fromPath, toPath string) {}
diff --git a/vendor/github.com/ncw/rclone/fs/config/config_read_password.go b/vendor/github.com/ncw/rclone/fs/config/config_read_password.go
new file mode 100755
index 0000000..9baf379
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/config/config_read_password.go
@@ -0,0 +1,29 @@
+// ReadPassword for OSes which are supported by golang.org/x/crypto/ssh/terminal
+// See https://github.com/golang/go/issues/14441 - plan9
+// https://github.com/golang/go/issues/13085 - solaris
+
+// +build !solaris,!plan9
+
+package config
+
+import (
+ "fmt"
+ "log"
+ "os"
+
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+// ReadPassword reads a password without echoing it to the terminal.
+func ReadPassword() string {
+ stdin := int(os.Stdin.Fd())
+ if !terminal.IsTerminal(stdin) {
+ return ReadLine()
+ }
+ line, err := terminal.ReadPassword(stdin)
+ _, _ = fmt.Fprintln(os.Stderr)
+ if err != nil {
+ log.Fatalf("Failed to read password: %v", err)
+ }
+ return string(line)
+}
diff --git a/vendor/github.com/ncw/rclone/fs/config/config_read_password_unsupported.go b/vendor/github.com/ncw/rclone/fs/config/config_read_password_unsupported.go
new file mode 100755
index 0000000..eb76244
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/config/config_read_password_unsupported.go
@@ -0,0 +1,12 @@
+// ReadPassword for OSes which are not supported by golang.org/x/crypto/ssh/terminal
+// See https://github.com/golang/go/issues/14441 - plan9
+// https://github.com/golang/go/issues/13085 - solaris
+
+// +build solaris plan9
+
+package config
+
+// ReadPassword reads a password with echoing it to the terminal.
+func ReadPassword() string {
+ return ReadLine()
+}
diff --git a/vendor/github.com/ncw/rclone/fs/config/config_unix.go b/vendor/github.com/ncw/rclone/fs/config/config_unix.go
new file mode 100755
index 0000000..6cda925
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/config/config_unix.go
@@ -0,0 +1,37 @@
+// Read, write and edit the config file
+// Unix specific functions.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package config
+
+import (
+ "os"
+ "os/user"
+ "strconv"
+ "syscall"
+
+ "github.com/ncw/rclone/fs"
+)
+
+// attemptCopyGroups tries to keep the group the same. User will be the one
+// who is currently running this process.
+func attemptCopyGroup(fromPath, toPath string) {
+ info, err := os.Stat(fromPath)
+ if err != nil || info.Sys() == nil {
+ return
+ }
+ if stat, ok := info.Sys().(*syscall.Stat_t); ok {
+ uid := int(stat.Uid)
+ // prefer self over previous owner of file, because it has a higher chance
+ // of success
+ if user, err := user.Current(); err == nil {
+ if tmpUID, err := strconv.Atoi(user.Uid); err == nil {
+ uid = tmpUID
+ }
+ }
+ if err = os.Chown(toPath, uid, int(stat.Gid)); err != nil {
+ fs.Debugf(nil, "Failed to keep previous owner of config file: %v", err)
+ }
+ }
+}
diff --git a/vendor/github.com/ncw/rclone/fs/config/configmap/configmap.go b/vendor/github.com/ncw/rclone/fs/config/configmap/configmap.go
new file mode 100755
index 0000000..2d1267a
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/config/configmap/configmap.go
@@ -0,0 +1,86 @@
+// Package configmap provides an abstraction for reading and writing config
+package configmap
+
+// Getter provides an interface to get config items
+type Getter interface {
+ // Get should get an item with the key passed in and return
+ // the value. If the item is found then it should return true,
+ // otherwise false.
+ Get(key string) (value string, ok bool)
+}
+
+// Setter provides an interface to set config items
+type Setter interface {
+ // Set should set an item into persistent config store.
+ Set(key, value string)
+}
+
+// Mapper provides an interface to read and write config
+type Mapper interface {
+ Getter
+ Setter
+}
+
+// Map provides a wrapper around multiple Setter and
+// Getter interfaces.
+type Map struct {
+ setters []Setter
+ getters []Getter
+}
+
+// New returns an empty Map
+func New() *Map {
+ return &Map{}
+}
+
+// AddGetter appends a getter onto the end of the getters
+func (c *Map) AddGetter(getter Getter) *Map {
+ c.getters = append(c.getters, getter)
+ return c
+}
+
+// AddGetters appends multiple getters onto the end of the getters
+func (c *Map) AddGetters(getters ...Getter) *Map {
+ c.getters = append(c.getters, getters...)
+ return c
+}
+
+// AddSetter appends a setter onto the end of the setters
+func (c *Map) AddSetter(setter Setter) *Map {
+ c.setters = append(c.setters, setter)
+ return c
+}
+
+// Get gets an item with the key passed in and return the value from
+// the first getter. If the item is found then it returns true,
+// otherwise false.
+func (c *Map) Get(key string) (value string, ok bool) {
+ for _, do := range c.getters {
+ value, ok = do.Get(key)
+ if ok {
+ return value, ok
+ }
+ }
+ return "", false
+}
+
+// Set sets an item into all the stored setters.
+func (c *Map) Set(key, value string) {
+ for _, do := range c.setters {
+ do.Set(key, value)
+ }
+}
+
+// Simple is a simple Mapper for testing
+type Simple map[string]string
+
+// Get the value
+func (c Simple) Get(key string) (value string, ok bool) {
+ value, ok = c[key]
+ return value, ok
+}
+
+// Set the value
+func (c Simple) Set(key, value string) {
+ c[key] = value
+}
diff --git a/vendor/github.com/ncw/rclone/fs/config/configstruct/configstruct.go b/vendor/github.com/ncw/rclone/fs/config/configstruct/configstruct.go
new file mode 100755
index 0000000..e2f57c4
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/config/configstruct/configstruct.go
@@ -0,0 +1,127 @@
+// Package configstruct parses unstructured maps into structures
+package configstruct
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "github.com/ncw/rclone/fs/config/configmap"
+ "github.com/pkg/errors"
+)
+
+var matchUpper = regexp.MustCompile("([A-Z]+)")
+
+// camelToSnake converts CamelCase to snake_case
+func camelToSnake(in string) string {
+ out := matchUpper.ReplaceAllString(in, "_$1")
+ out = strings.ToLower(out)
+ out = strings.Trim(out, "_")
+ return out
+}
+
+// StringToInterface turns in into an interface{} the same type as def
+func StringToInterface(def interface{}, in string) (newValue interface{}, err error) {
+ typ := reflect.TypeOf(def)
+ switch typ.Kind() {
+ case reflect.String:
+ // Pass strings unmodified
+ return in, nil
+ }
+ // Otherwise parse with Sscanln
+ //
+ // This means any types we use here must implement fmt.Scanner
+ o := reflect.New(typ)
+ n, err := fmt.Sscanln(in, o.Interface())
+ if err != nil {
+ return newValue, errors.Wrapf(err, "parsing %q as %T failed", in, def)
+ }
+ if n != 1 {
+ return newValue, errors.New("no items parsed")
+ }
+ return o.Elem().Interface(), nil
+}
+
+// Item descripts a single entry in the options structure
+type Item struct {
+ Name string // snake_case
+ Field string // CamelCase
+ Num int // number of the field in the struct
+ Value interface{}
+}
+
+// Items parses the opt struct and returns a slice of Item objects.
+//
+// opt must be a pointer to a struct. The struct should have entirely
+// public fields.
+//
+// The config_name is looked up in a struct tag called "config" or if
+// not found is the field name converted from CamelCase to snake_case.
+func Items(opt interface{}) (items []Item, err error) {
+ def := reflect.ValueOf(opt)
+ if def.Kind() != reflect.Ptr {
+ return nil, errors.New("argument must be a pointer")
+ }
+ def = def.Elem() // indirect the pointer
+ if def.Kind() != reflect.Struct {
+ return nil, errors.New("argument must be a pointer to a struct")
+ }
+ defType := def.Type()
+ for i := 0; i < def.NumField(); i++ {
+ field := defType.Field(i)
+ fieldName := field.Name
+ configName, ok := field.Tag.Lookup("config")
+ if !ok {
+ configName = camelToSnake(fieldName)
+ }
+ defaultItem := Item{
+ Name: configName,
+ Field: fieldName,
+ Num: i,
+ Value: def.Field(i).Interface(),
+ }
+ items = append(items, defaultItem)
+ }
+ return items, nil
+}
+
+// Set interprets the field names in defaults and looks up config
+// values in the config passed in. Any values found in config will be
+// set in the opt structure.
+//
+// opt must be a pointer to a struct. The struct should have entirely
+// public fields. The field names are converted from CamelCase to
+// snake_case and looked up in the config supplied or a
+// `config:"field_name"` is looked up.
+//
+// If items are found then they are converted from string to native
+// types and set in opt.
+//
+// All the field types in the struct must implement fmt.Scanner.
+func Set(config configmap.Getter, opt interface{}) (err error) {
+ defaultItems, err := Items(opt)
+ if err != nil {
+ return err
+ }
+ defStruct := reflect.ValueOf(opt).Elem()
+ for _, defaultItem := range defaultItems {
+ newValue := defaultItem.Value
+ if configValue, ok := config.Get(defaultItem.Name); ok {
+ var newNewValue interface{}
+ newNewValue, err = StringToInterface(newValue, configValue)
+ if err != nil {
+ // Mask errors if setting an empty string as
+ // it isn't valid for all types. This makes
+ // empty string be the equivalent of unset.
+ if configValue != "" {
+ return errors.Wrapf(err, "couldn't parse config item %q = %q as %T", defaultItem.Name, configValue, defaultItem.Value)
+ }
+ } else {
+ newValue = newNewValue
+ }
+ }
+ defStruct.Field(defaultItem.Num).Set(reflect.ValueOf(newValue))
+ }
+ return nil
+}
diff --git a/vendor/github.com/ncw/rclone/fs/config/obscure/obscure.go b/vendor/github.com/ncw/rclone/fs/config/obscure/obscure.go
new file mode 100755
index 0000000..2f2261f
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/config/obscure/obscure.go
@@ -0,0 +1,94 @@
+// Package obscure contains the Obscure and Reveal commands
+package obscure
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "encoding/base64"
+ "io"
+ "log"
+
+ "github.com/pkg/errors"
+)
+
+// crypt internals
+var (
+ cryptKey = []byte{
+ 0x9c, 0x93, 0x5b, 0x48, 0x73, 0x0a, 0x55, 0x4d,
+ 0x6b, 0xfd, 0x7c, 0x63, 0xc8, 0x86, 0xa9, 0x2b,
+ 0xd3, 0x90, 0x19, 0x8e, 0xb8, 0x12, 0x8a, 0xfb,
+ 0xf4, 0xde, 0x16, 0x2b, 0x8b, 0x95, 0xf6, 0x38,
+ }
+ cryptBlock cipher.Block
+ cryptRand = rand.Reader
+)
+
+// crypt transforms in to out using iv under AES-CTR.
+//
+// in and out may be the same buffer.
+//
+// Note encryption and decryption are the same operation
+func crypt(out, in, iv []byte) error {
+ if cryptBlock == nil {
+ var err error
+ cryptBlock, err = aes.NewCipher(cryptKey)
+ if err != nil {
+ return err
+ }
+ }
+ stream := cipher.NewCTR(cryptBlock, iv)
+ stream.XORKeyStream(out, in)
+ return nil
+}
+
+// Obscure a value
+//
+// This is done by encrypting with AES-CTR
+func Obscure(x string) (string, error) {
+ plaintext := []byte(x)
+ ciphertext := make([]byte, aes.BlockSize+len(plaintext))
+ iv := ciphertext[:aes.BlockSize]
+ if _, err := io.ReadFull(cryptRand, iv); err != nil {
+ return "", errors.Wrap(err, "failed to read iv")
+ }
+ if err := crypt(ciphertext[aes.BlockSize:], plaintext, iv); err != nil {
+ return "", errors.Wrap(err, "encrypt failed")
+ }
+ return base64.RawURLEncoding.EncodeToString(ciphertext), nil
+}
+
+// MustObscure obscures a value, exiting with a fatal error if it failed
+func MustObscure(x string) string {
+ out, err := Obscure(x)
+ if err != nil {
+ log.Fatalf("Obscure failed: %v", err)
+ }
+ return out
+}
+
+// Reveal an obscured value
+func Reveal(x string) (string, error) {
+ ciphertext, err := base64.RawURLEncoding.DecodeString(x)
+ if err != nil {
+ return "", errors.Wrap(err, "base64 decode failed when revealing password - is it obscured?")
+ }
+ if len(ciphertext) < aes.BlockSize {
+ return "", errors.New("input too short when revealing password - is it obscured?")
+ }
+ buf := ciphertext[aes.BlockSize:]
+ iv := ciphertext[:aes.BlockSize]
+ if err := crypt(buf, buf, iv); err != nil {
+ return "", errors.Wrap(err, "decrypt failed when revealing password - is it obscured?")
+ }
+ return string(buf), nil
+}
+
+// MustReveal reveals an obscured value, exiting with a fatal error if it failed
+func MustReveal(x string) string {
+ out, err := Reveal(x)
+ if err != nil {
+ log.Fatalf("Reveal failed: %v", err)
+ }
+ return out
+}
diff --git a/vendor/github.com/ncw/rclone/fs/config_list.go b/vendor/github.com/ncw/rclone/fs/config_list.go
new file mode 100755
index 0000000..ce1e529
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/config_list.go
@@ -0,0 +1,94 @@
+package fs
+
+import (
+ "bytes"
+ "encoding/csv"
+ "fmt"
+)
+
+// CommaSepList is a comma separated config value
+// It uses the encoding/csv rules for quoting and escaping
+type CommaSepList []string
+
+// SpaceSepList is a space separated config value
+// It uses the encoding/csv rules for quoting and escaping
+type SpaceSepList []string
+
+type genericList []string
+
+func (l CommaSepList) String() string {
+ return genericList(l).string(',')
+}
+
+// Set the List entries
+func (l *CommaSepList) Set(s string) error {
+ return (*genericList)(l).set(',', []byte(s))
+}
+
+// Type of the value
+func (CommaSepList) Type() string {
+ return "[]string"
+}
+
+// Scan implements the fmt.Scanner interface
+func (l *CommaSepList) Scan(s fmt.ScanState, ch rune) error {
+ return (*genericList)(l).scan(',', s, ch)
+}
+
+func (l SpaceSepList) String() string {
+ return genericList(l).string(' ')
+}
+
+// Set the List entries
+func (l *SpaceSepList) Set(s string) error {
+ return (*genericList)(l).set(' ', []byte(s))
+}
+
+// Type of the value
+func (SpaceSepList) Type() string {
+ return "[]string"
+}
+
+// Scan implements the fmt.Scanner interface
+func (l *SpaceSepList) Scan(s fmt.ScanState, ch rune) error {
+ return (*genericList)(l).scan(' ', s, ch)
+}
+
+func (gl genericList) string(sep rune) string {
+ var buf bytes.Buffer
+ w := csv.NewWriter(&buf)
+ w.Comma = sep
+ err := w.Write(gl)
+ if err != nil {
+ // can only happen if w.Comma is invalid
+ panic(err)
+ }
+ w.Flush()
+ return string(bytes.TrimSpace(buf.Bytes()))
+}
+
+func (gl *genericList) set(sep rune, b []byte) error {
+ if len(b) == 0 {
+ *gl = nil
+ return nil
+ }
+ r := csv.NewReader(bytes.NewReader(b))
+ r.Comma = sep
+
+ record, err := r.Read()
+ switch _err := err.(type) {
+ case nil:
+ *gl = record
+ case *csv.ParseError:
+ err = _err.Err // remove line numbers from the error message
+ }
+ return err
+}
+
+func (gl *genericList) scan(sep rune, s fmt.ScanState, ch rune) error {
+ token, err := s.Token(true, func(rune) bool { return true })
+ if err != nil {
+ return err
+ }
+ return gl.set(sep, bytes.TrimSpace(token))
+}
diff --git a/vendor/github.com/ncw/rclone/fs/deletemode.go b/vendor/github.com/ncw/rclone/fs/deletemode.go
new file mode 100755
index 0000000..9e16373
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/deletemode.go
@@ -0,0 +1,14 @@
+package fs
+
+// DeleteMode describes the possible delete modes in the config
+type DeleteMode byte
+
+// DeleteMode constants
+const (
+ DeleteModeOff DeleteMode = iota
+ DeleteModeBefore
+ DeleteModeDuring
+ DeleteModeAfter
+ DeleteModeOnly
+ DeleteModeDefault = DeleteModeAfter
+)
diff --git a/vendor/github.com/ncw/rclone/fs/dir.go b/vendor/github.com/ncw/rclone/fs/dir.go
new file mode 100755
index 0000000..891dc8d
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/dir.go
@@ -0,0 +1,97 @@
+package fs
+
+import "time"
+
+// Dir describes an unspecialized directory for directory/container/bucket lists
+type Dir struct {
+ remote string // name of the directory
+ modTime time.Time // modification or creation time - IsZero for unknown
+ size int64 // size of directory and contents or -1 if unknown
+ items int64 // number of objects or -1 for unknown
+ id string // optional ID
+}
+
+// NewDir creates an unspecialized Directory object
+func NewDir(remote string, modTime time.Time) *Dir {
+ return &Dir{
+ remote: remote,
+ modTime: modTime,
+ size: -1,
+ items: -1,
+ }
+}
+
+// NewDirCopy creates an unspecialized copy of the Directory object passed in
+func NewDirCopy(d Directory) *Dir {
+ return &Dir{
+ remote: d.Remote(),
+ modTime: d.ModTime(),
+ size: d.Size(),
+ items: d.Items(),
+ }
+}
+
+// String returns the name
+func (d *Dir) String() string {
+ return d.remote
+}
+
+// Remote returns the remote path
+func (d *Dir) Remote() string {
+ return d.remote
+}
+
+// SetRemote sets the remote
+func (d *Dir) SetRemote(remote string) *Dir {
+ d.remote = remote
+ return d
+}
+
+// ID gets the optional ID
+func (d *Dir) ID() string {
+ return d.id
+}
+
+// SetID sets the optional ID
+func (d *Dir) SetID(id string) *Dir {
+ d.id = id
+ return d
+}
+
+// ModTime returns the modification date of the file
+// It should return a best guess if one isn't available
+func (d *Dir) ModTime() time.Time {
+ if !d.modTime.IsZero() {
+ return d.modTime
+ }
+ return time.Now()
+}
+
+// Size returns the size of the file
+func (d *Dir) Size() int64 {
+ return d.size
+}
+
+// SetSize sets the size of the directory
+func (d *Dir) SetSize(size int64) *Dir {
+ d.size = size
+ return d
+}
+
+// Items returns the count of items in this directory or this
+// directory and subdirectories if known, -1 for unknown
+func (d *Dir) Items() int64 {
+ return d.items
+}
+
+// SetItems sets the number of items in the directory
+func (d *Dir) SetItems(items int64) *Dir {
+ d.items = items
+ return d
+}
+
+// Check interfaces
+var (
+ _ DirEntry = (*Dir)(nil)
+ _ Directory = (*Dir)(nil)
+)
diff --git a/vendor/github.com/ncw/rclone/fs/direntries.go b/vendor/github.com/ncw/rclone/fs/direntries.go
new file mode 100755
index 0000000..b3ae9ea
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/direntries.go
@@ -0,0 +1,81 @@
+package fs
+
+import "fmt"
+
+// DirEntries is a slice of Object or *Dir
+type DirEntries []DirEntry
+
+// Len is part of sort.Interface.
+func (ds DirEntries) Len() int {
+ return len(ds)
+}
+
+// Swap is part of sort.Interface.
+func (ds DirEntries) Swap(i, j int) {
+ ds[i], ds[j] = ds[j], ds[i]
+}
+
+// Less is part of sort.Interface.
+func (ds DirEntries) Less(i, j int) bool {
+ return ds[i].Remote() < ds[j].Remote()
+}
+
+// ForObject runs the function supplied on every object in the entries
+func (ds DirEntries) ForObject(fn func(o Object)) {
+ for _, entry := range ds {
+ o, ok := entry.(Object)
+ if ok {
+ fn(o)
+ }
+ }
+}
+
+// ForObjectError runs the function supplied on every object in the entries
+func (ds DirEntries) ForObjectError(fn func(o Object) error) error {
+ for _, entry := range ds {
+ o, ok := entry.(Object)
+ if ok {
+ err := fn(o)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// ForDir runs the function supplied on every Directory in the entries
+func (ds DirEntries) ForDir(fn func(dir Directory)) {
+ for _, entry := range ds {
+ dir, ok := entry.(Directory)
+ if ok {
+ fn(dir)
+ }
+ }
+}
+
+// ForDirError runs the function supplied on every Directory in the entries
+func (ds DirEntries) ForDirError(fn func(dir Directory) error) error {
+ for _, entry := range ds {
+ dir, ok := entry.(Directory)
+ if ok {
+ err := fn(dir)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// DirEntryType returns a string description of the DirEntry, either
+// "object", "directory" or "unknown type XXX"
+func DirEntryType(d DirEntry) string {
+ switch d.(type) {
+ case Object:
+ return "object"
+ case Directory:
+ return "directory"
+ }
+ return fmt.Sprintf("unknown type %T", d)
+}
diff --git a/vendor/github.com/ncw/rclone/fs/driveletter/driveletter.go b/vendor/github.com/ncw/rclone/fs/driveletter/driveletter.go
new file mode 100755
index 0000000..322b244
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/driveletter/driveletter.go
@@ -0,0 +1,14 @@
+// Package driveletter returns whether a name is a valid drive letter
+
+// +build !windows
+
+package driveletter
+
+// IsDriveLetter returns a bool indicating whether name is a valid
+// Windows drive letter
+//
+// On non windows platforms we don't have drive letters so we always
+// return false
+func IsDriveLetter(name string) bool {
+ return false
+}
diff --git a/vendor/github.com/ncw/rclone/fs/driveletter/driveletter_windows.go b/vendor/github.com/ncw/rclone/fs/driveletter/driveletter_windows.go
new file mode 100755
index 0000000..7f63b94
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/driveletter/driveletter_windows.go
@@ -0,0 +1,13 @@
+// +build windows
+
+package driveletter
+
+// IsDriveLetter returns a bool indicating whether name is a valid
+// Windows drive letter
+func IsDriveLetter(name string) bool {
+ if len(name) != 1 {
+ return false
+ }
+ c := name[0]
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
+}
diff --git a/vendor/github.com/ncw/rclone/fs/dump.go b/vendor/github.com/ncw/rclone/fs/dump.go
new file mode 100755
index 0000000..ad3c984
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/dump.go
@@ -0,0 +1,93 @@
+package fs
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// DumpFlags describes the Dump options in force
+type DumpFlags int
+
+// DumpFlags definitions
+const (
+ DumpHeaders DumpFlags = 1 << iota
+ DumpBodies
+ DumpRequests
+ DumpResponses
+ DumpAuth
+ DumpFilters
+ DumpGoRoutines
+ DumpOpenFiles
+)
+
+var dumpFlags = []struct {
+ flag DumpFlags
+ name string
+}{
+ {DumpHeaders, "headers"},
+ {DumpBodies, "bodies"},
+ {DumpRequests, "requests"},
+ {DumpResponses, "responses"},
+ {DumpAuth, "auth"},
+ {DumpFilters, "filters"},
+ {DumpGoRoutines, "goroutines"},
+ {DumpOpenFiles, "openfiles"},
+}
+
+// DumpFlagsList is a list of dump flags used in the help
+var DumpFlagsList string
+
+func init() {
+ // calculate the dump flags list
+ var out []string
+ for _, info := range dumpFlags {
+ out = append(out, info.name)
+ }
+ DumpFlagsList = strings.Join(out, ",")
+}
+
+// String turns a DumpFlags into a string
+func (f DumpFlags) String() string {
+ var out []string
+ for _, info := range dumpFlags {
+ if f&info.flag != 0 {
+ out = append(out, info.name)
+ f &^= info.flag
+ }
+ }
+ if f != 0 {
+ out = append(out, fmt.Sprintf("Unknown-0x%X", int(f)))
+ }
+ return strings.Join(out, ",")
+}
+
+// Set a DumpFlags as a comma separated list of flags
+func (f *DumpFlags) Set(s string) error {
+ var flags DumpFlags
+ parts := strings.Split(s, ",")
+ for _, part := range parts {
+ found := false
+ part = strings.ToLower(strings.TrimSpace(part))
+ if part == "" {
+ continue
+ }
+ for _, info := range dumpFlags {
+ if part == info.name {
+ found = true
+ flags |= info.flag
+ }
+ }
+ if !found {
+ return errors.Errorf("Unknown dump flag %q", part)
+ }
+ }
+ *f = flags
+ return nil
+}
+
+// Type of the value
+func (f *DumpFlags) Type() string {
+ return "string"
+}
diff --git a/vendor/github.com/ncw/rclone/fs/filter/filter.go b/vendor/github.com/ncw/rclone/fs/filter/filter.go
new file mode 100755
index 0000000..c63ce9a
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/filter/filter.go
@@ -0,0 +1,498 @@
+// Package filter controls the filtering of files
+package filter
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "os"
+ "path"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/pkg/errors"
+)
+
+// Active is the globally active filter
+var Active = mustNewFilter(nil)
+
+// rule is one filter rule
+type rule struct {
+ Include bool
+ Regexp *regexp.Regexp
+}
+
+// Match returns true if rule matches path
+func (r *rule) Match(path string) bool {
+ return r.Regexp.MatchString(path)
+}
+
+// String the rule
+func (r *rule) String() string {
+ c := "-"
+ if r.Include {
+ c = "+"
+ }
+ return fmt.Sprintf("%s %s", c, r.Regexp.String())
+}
+
+// rules is a slice of rules
+type rules struct {
+ rules []rule
+ existing map[string]struct{}
+}
+
+// add adds a rule if it doesn't exist already
+func (rs *rules) add(Include bool, re *regexp.Regexp) {
+ if rs.existing == nil {
+ rs.existing = make(map[string]struct{})
+ }
+ newRule := rule{
+ Include: Include,
+ Regexp: re,
+ }
+ newRuleString := newRule.String()
+ if _, ok := rs.existing[newRuleString]; ok {
+ return // rule already exists
+ }
+ rs.rules = append(rs.rules, newRule)
+ rs.existing[newRuleString] = struct{}{}
+}
+
+// clear clears all the rules
+func (rs *rules) clear() {
+ rs.rules = nil
+ rs.existing = nil
+}
+
+// len returns the number of rules
+func (rs *rules) len() int {
+ return len(rs.rules)
+}
+
+// FilesMap describes the map of files to transfer
+type FilesMap map[string]struct{}
+
+// Opt configues the filter
+type Opt struct {
+ DeleteExcluded bool
+ FilterRule []string
+ FilterFrom []string
+ ExcludeRule []string
+ ExcludeFrom []string
+ ExcludeFile string
+ IncludeRule []string
+ IncludeFrom []string
+ FilesFrom []string
+ MinAge fs.Duration
+ MaxAge fs.Duration
+ MinSize fs.SizeSuffix
+ MaxSize fs.SizeSuffix
+}
+
+// DefaultOpt is the default config for the filter
+var DefaultOpt = Opt{
+ MinAge: fs.DurationOff,
+ MaxAge: fs.DurationOff,
+ MinSize: fs.SizeSuffix(-1),
+ MaxSize: fs.SizeSuffix(-1),
+}
+
+// Filter describes any filtering in operation
+type Filter struct {
+ Opt Opt
+ ModTimeFrom time.Time
+ ModTimeTo time.Time
+ fileRules rules
+ dirRules rules
+ files FilesMap // files if filesFrom
+ dirs FilesMap // dirs from filesFrom
+}
+
+// NewFilter parses the command line options and creates a Filter
+// object. If opt is nil, then DefaultOpt will be used
+func NewFilter(opt *Opt) (f *Filter, err error) {
+ f = &Filter{}
+
+ // Make a copy of the options
+ if opt != nil {
+ f.Opt = *opt
+ } else {
+ f.Opt = DefaultOpt
+ }
+
+ // Filter flags
+ if f.Opt.MinAge.IsSet() {
+ f.ModTimeTo = time.Now().Add(-time.Duration(f.Opt.MinAge))
+ fs.Debugf(nil, "--min-age %v to %v", f.Opt.MinAge, f.ModTimeTo)
+ }
+ if f.Opt.MaxAge.IsSet() {
+ f.ModTimeFrom = time.Now().Add(-time.Duration(f.Opt.MaxAge))
+ if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) {
+ log.Fatal("filter: --min-age can't be larger than --max-age")
+ }
+ fs.Debugf(nil, "--max-age %v to %v", f.Opt.MaxAge, f.ModTimeFrom)
+ }
+
+ addImplicitExclude := false
+ foundExcludeRule := false
+
+ for _, rule := range f.Opt.IncludeRule {
+ err = f.Add(true, rule)
+ if err != nil {
+ return nil, err
+ }
+ addImplicitExclude = true
+ }
+ for _, rule := range f.Opt.IncludeFrom {
+ err := forEachLine(rule, func(line string) error {
+ return f.Add(true, line)
+ })
+ if err != nil {
+ return nil, err
+ }
+ addImplicitExclude = true
+ }
+ for _, rule := range f.Opt.ExcludeRule {
+ err = f.Add(false, rule)
+ if err != nil {
+ return nil, err
+ }
+ foundExcludeRule = true
+ }
+ for _, rule := range f.Opt.ExcludeFrom {
+ err := forEachLine(rule, func(line string) error {
+ return f.Add(false, line)
+ })
+ if err != nil {
+ return nil, err
+ }
+ foundExcludeRule = true
+ }
+
+ if addImplicitExclude && foundExcludeRule {
+ fs.Errorf(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
+ }
+
+ for _, rule := range f.Opt.FilterRule {
+ err = f.AddRule(rule)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, rule := range f.Opt.FilterFrom {
+ err := forEachLine(rule, f.AddRule)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, rule := range f.Opt.FilesFrom {
+ f.initAddFile() // init to show --files-from set even if no files within
+ err := forEachLine(rule, func(line string) error {
+ return f.AddFile(line)
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ if addImplicitExclude {
+ err = f.Add(false, "/**")
+ if err != nil {
+ return nil, err
+ }
+ }
+ if fs.Config.Dump&fs.DumpFilters != 0 {
+ fmt.Println("--- start filters ---")
+ fmt.Println(f.DumpFilters())
+ fmt.Println("--- end filters ---")
+ }
+ return f, nil
+}
+
+func mustNewFilter(opt *Opt) *Filter {
+ f, err := NewFilter(opt)
+ if err != nil {
+ panic(err)
+ }
+ return f
+}
+
+// addDirGlobs adds directory globs from the file glob passed in
+func (f *Filter) addDirGlobs(Include bool, glob string) error {
+ for _, dirGlob := range globToDirGlobs(glob) {
+ // Don't add "/" as we always include the root
+ if dirGlob == "/" {
+ continue
+ }
+ dirRe, err := globToRegexp(dirGlob)
+ if err != nil {
+ return err
+ }
+ f.dirRules.add(Include, dirRe)
+ }
+ return nil
+}
+
+// Add adds a filter rule with include or exclude status indicated
+func (f *Filter) Add(Include bool, glob string) error {
+ isDirRule := strings.HasSuffix(glob, "/")
+ isFileRule := !isDirRule
+ if strings.Contains(glob, "**") {
+ isDirRule, isFileRule = true, true
+ }
+ re, err := globToRegexp(glob)
+ if err != nil {
+ return err
+ }
+ if isFileRule {
+ f.fileRules.add(Include, re)
+ // If include rule work out what directories are needed to scan
+ // if exclude rule, we can't rule anything out
+ // Unless it is `*` which matches everything
+ // NB ** and /** are DirRules
+ if Include || glob == "*" {
+ err = f.addDirGlobs(Include, glob)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ if isDirRule {
+ f.dirRules.add(Include, re)
+ }
+ return nil
+}
+
+// AddRule adds a filter rule with include/exclude indicated by the prefix
+//
+// These are
+//
+// + glob
+// - glob
+// !
+//
+// '+' includes the glob, '-' excludes it and '!' resets the filter list
+//
+// Line comments may be introduced with '#' or ';'
+func (f *Filter) AddRule(rule string) error {
+ switch {
+ case rule == "!":
+ f.Clear()
+ return nil
+ case strings.HasPrefix(rule, "- "):
+ return f.Add(false, rule[2:])
+ case strings.HasPrefix(rule, "+ "):
+ return f.Add(true, rule[2:])
+ }
+ return errors.Errorf("malformed rule %q", rule)
+}
+
+// initAddFile creates f.files and f.dirs
+func (f *Filter) initAddFile() {
+ if f.files == nil {
+ f.files = make(FilesMap)
+ f.dirs = make(FilesMap)
+ }
+}
+
+// AddFile adds a single file to the files from list
+func (f *Filter) AddFile(file string) error {
+ f.initAddFile()
+ file = strings.Trim(file, "/")
+ f.files[file] = struct{}{}
+ // Put all the parent directories into f.dirs
+ for {
+ file = path.Dir(file)
+ if file == "." {
+ break
+ }
+ if _, found := f.dirs[file]; found {
+ break
+ }
+ f.dirs[file] = struct{}{}
+ }
+ return nil
+}
+
+// Files returns all the files from the `--files-from` list
+//
+// It may be nil if the list is empty
+func (f *Filter) Files() FilesMap {
+ return f.files
+}
+
+// Clear clears all the filter rules
+func (f *Filter) Clear() {
+ f.fileRules.clear()
+ f.dirRules.clear()
+}
+
+// InActive returns false if any filters are active
+func (f *Filter) InActive() bool {
+ return (f.files == nil &&
+ f.ModTimeFrom.IsZero() &&
+ f.ModTimeTo.IsZero() &&
+ f.Opt.MinSize < 0 &&
+ f.Opt.MaxSize < 0 &&
+ f.fileRules.len() == 0 &&
+ f.dirRules.len() == 0 &&
+ len(f.Opt.ExcludeFile) == 0)
+}
+
+// includeRemote returns whether this remote passes the filter rules.
+func (f *Filter) includeRemote(remote string) bool {
+ for _, rule := range f.fileRules.rules {
+ if rule.Match(remote) {
+ return rule.Include
+ }
+ }
+ return true
+}
+
+// ListContainsExcludeFile checks if exclude file is present in the list.
+func (f *Filter) ListContainsExcludeFile(entries fs.DirEntries) bool {
+ if len(f.Opt.ExcludeFile) == 0 {
+ return false
+ }
+ for _, entry := range entries {
+ obj, ok := entry.(fs.Object)
+ if ok {
+ basename := path.Base(obj.Remote())
+ if basename == f.Opt.ExcludeFile {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// IncludeDirectory returns a function which checks whether this
+// directory should be included in the sync or not.
+func (f *Filter) IncludeDirectory(fs fs.Fs) func(string) (bool, error) {
+ return func(remote string) (bool, error) {
+ remote = strings.Trim(remote, "/")
+ // first check if we need to remove directory based on
+ // the exclude file
+ excl, err := f.DirContainsExcludeFile(fs, remote)
+ if err != nil {
+ return false, err
+ }
+ if excl {
+ return false, nil
+ }
+
+ // filesFrom takes precedence
+ if f.files != nil {
+ _, include := f.dirs[remote]
+ return include, nil
+ }
+ remote += "/"
+ for _, rule := range f.dirRules.rules {
+ if rule.Match(remote) {
+ return rule.Include, nil
+ }
+ }
+
+ return true, nil
+ }
+}
+
+// DirContainsExcludeFile checks if exclude file is present in a
+// directroy. If fs is nil, it works properly if ExcludeFile is an
+// empty string (for testing).
+func (f *Filter) DirContainsExcludeFile(fremote fs.Fs, remote string) (bool, error) {
+ if len(f.Opt.ExcludeFile) > 0 {
+ exists, err := fs.FileExists(fremote, path.Join(remote, f.Opt.ExcludeFile))
+ if err != nil {
+ return false, err
+ }
+ if exists {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// Include returns whether this object should be included into the
+// sync or not
+func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
+ // filesFrom takes precedence
+ if f.files != nil {
+ _, include := f.files[remote]
+ return include
+ }
+ if !f.ModTimeFrom.IsZero() && modTime.Before(f.ModTimeFrom) {
+ return false
+ }
+ if !f.ModTimeTo.IsZero() && modTime.After(f.ModTimeTo) {
+ return false
+ }
+ if f.Opt.MinSize >= 0 && size < int64(f.Opt.MinSize) {
+ return false
+ }
+ if f.Opt.MaxSize >= 0 && size > int64(f.Opt.MaxSize) {
+ return false
+ }
+ return f.includeRemote(remote)
+}
+
+// IncludeObject returns whether this object should be included into
+// the sync or not. This is a convenience function to avoid calling
+// o.ModTime(), which is an expensive operation.
+func (f *Filter) IncludeObject(o fs.Object) bool {
+ var modTime time.Time
+
+ if !f.ModTimeFrom.IsZero() || !f.ModTimeTo.IsZero() {
+ modTime = o.ModTime()
+ } else {
+ modTime = time.Unix(0, 0)
+ }
+
+ return f.Include(o.Remote(), o.Size(), modTime)
+}
+
+// forEachLine calls fn on every line in the file pointed to by path
+//
+// It ignores empty lines and lines starting with '#' or ';'
+func forEachLine(path string, fn func(string) error) (err error) {
+ in, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer fs.CheckClose(in, &err)
+ scanner := bufio.NewScanner(in)
+ for scanner.Scan() {
+ line := scanner.Text()
+ line = strings.TrimSpace(line)
+ if len(line) == 0 || line[0] == '#' || line[0] == ';' {
+ continue
+ }
+ err := fn(line)
+ if err != nil {
+ return err
+ }
+ }
+ return scanner.Err()
+}
+
+// DumpFilters dumps the filters in textual form, 1 per line
+func (f *Filter) DumpFilters() string {
+ rules := []string{}
+ if !f.ModTimeFrom.IsZero() {
+ rules = append(rules, fmt.Sprintf("Last-modified date must be equal or greater than: %s", f.ModTimeFrom.String()))
+ }
+ if !f.ModTimeTo.IsZero() {
+ rules = append(rules, fmt.Sprintf("Last-modified date must be equal or less than: %s", f.ModTimeTo.String()))
+ }
+ rules = append(rules, "--- File filter rules ---")
+ for _, rule := range f.fileRules.rules {
+ rules = append(rules, rule.String())
+ }
+ rules = append(rules, "--- Directory filter rules ---")
+ for _, dirRule := range f.dirRules.rules {
+ rules = append(rules, dirRule.String())
+ }
+ return strings.Join(rules, "\n")
+}
diff --git a/vendor/github.com/ncw/rclone/fs/filter/glob.go b/vendor/github.com/ncw/rclone/fs/filter/glob.go
new file mode 100755
index 0000000..84c03f4
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/filter/glob.go
@@ -0,0 +1,166 @@
+// rsync style glob parser
+
+package filter
+
+import (
+ "bytes"
+ "regexp"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// globToRegexp converts an rsync style glob to a regexp
+//
+// documented in filtering.md
+func globToRegexp(glob string) (*regexp.Regexp, error) {
+ var re bytes.Buffer
+ if strings.HasPrefix(glob, "/") {
+ glob = glob[1:]
+ _, _ = re.WriteRune('^')
+ } else {
+ _, _ = re.WriteString("(^|/)")
+ }
+ consecutiveStars := 0
+ insertStars := func() error {
+ if consecutiveStars > 0 {
+ switch consecutiveStars {
+ case 1:
+ _, _ = re.WriteString(`[^/]*`)
+ case 2:
+ _, _ = re.WriteString(`.*`)
+ default:
+ return errors.Errorf("too many stars in %q", glob)
+ }
+ }
+ consecutiveStars = 0
+ return nil
+ }
+ inBraces := false
+ inBrackets := 0
+ slashed := false
+ for _, c := range glob {
+ if slashed {
+ _, _ = re.WriteRune(c)
+ slashed = false
+ continue
+ }
+ if c != '*' {
+ err := insertStars()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if inBrackets > 0 {
+ _, _ = re.WriteRune(c)
+ if c == '[' {
+ inBrackets++
+ }
+ if c == ']' {
+ inBrackets--
+ }
+ continue
+ }
+ switch c {
+ case '\\':
+ _, _ = re.WriteRune(c)
+ slashed = true
+ case '*':
+ consecutiveStars++
+ case '?':
+ _, _ = re.WriteString(`[^/]`)
+ case '[':
+ _, _ = re.WriteRune(c)
+ inBrackets++
+ case ']':
+ return nil, errors.Errorf("mismatched ']' in glob %q", glob)
+ case '{':
+ if inBraces {
+ return nil, errors.Errorf("can't nest '{' '}' in glob %q", glob)
+ }
+ inBraces = true
+ _, _ = re.WriteRune('(')
+ case '}':
+ if !inBraces {
+ return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
+ }
+ _, _ = re.WriteRune(')')
+ inBraces = false
+ case ',':
+ if inBraces {
+ _, _ = re.WriteRune('|')
+ } else {
+ _, _ = re.WriteRune(c)
+ }
+ case '.', '+', '(', ')', '|', '^', '$': // regexp meta characters not dealt with above
+ _, _ = re.WriteRune('\\')
+ _, _ = re.WriteRune(c)
+ default:
+ _, _ = re.WriteRune(c)
+ }
+ }
+ err := insertStars()
+ if err != nil {
+ return nil, err
+ }
+ if inBrackets > 0 {
+ return nil, errors.Errorf("mismatched '[' and ']' in glob %q", glob)
+ }
+ if inBraces {
+ return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
+ }
+ _, _ = re.WriteRune('$')
+ result, err := regexp.Compile(re.String())
+ if err != nil {
+ return nil, errors.Wrapf(err, "bad glob pattern %q (regexp %q)", glob, re.String())
+ }
+ return result, nil
+}
+
+var (
+ // Can't deal with / or ** in {}
+ tooHardRe = regexp.MustCompile(`{[^{}]*(\*\*|/)[^{}]*}`)
+
+ // Squash all /
+ squashSlash = regexp.MustCompile(`/{2,}`)
+)
+
+// globToDirGlobs takes a file glob and turns it into a series of
+// directory globs. When matched with a directory (with a trailing /)
+// this should answer the question as to whether this glob could be in
+// this directory.
+func globToDirGlobs(glob string) (out []string) {
+ if tooHardRe.MatchString(glob) {
+ // Can't figure this one out so return any directory might match
+ out = append(out, "/**")
+ return out
+ }
+
+ // Get rid of multiple /s
+ glob = squashSlash.ReplaceAllString(glob, "/")
+
+ // Split on / or **
+ // (** can contain /)
+ for {
+ i := strings.LastIndex(glob, "/")
+ j := strings.LastIndex(glob, "**")
+ what := ""
+ if j > i {
+ i = j
+ what = "**"
+ }
+ if i < 0 {
+ if len(out) == 0 {
+ out = append(out, "/**")
+ }
+ break
+ }
+ glob = glob[:i]
+ newGlob := glob + what + "/"
+ if len(out) == 0 || out[len(out)-1] != newGlob {
+ out = append(out, newGlob)
+ }
+ }
+
+ return out
+}
diff --git a/vendor/github.com/ncw/rclone/fs/fs.go b/vendor/github.com/ncw/rclone/fs/fs.go
new file mode 100755
index 0000000..34b4f13
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/fs.go
@@ -0,0 +1,1063 @@
+// Package fs is a generic file system interface for rclone object storage systems
+package fs
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "math"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/ncw/rclone/fs/config/configmap"
+ "github.com/ncw/rclone/fs/config/configstruct"
+ "github.com/ncw/rclone/fs/fspath"
+ "github.com/ncw/rclone/fs/hash"
+ "github.com/pkg/errors"
+)
+
+// EntryType can be associated with remote paths to identify their type
+type EntryType int
+
+// Constants
+const (
+ // ModTimeNotSupported is a very large precision value to show
+ // mod time isn't supported on this Fs
+ ModTimeNotSupported = 100 * 365 * 24 * time.Hour
+ // MaxLevel is a sentinel representing an infinite depth for listings
+ MaxLevel = math.MaxInt32
+ // EntryDirectory should be used to classify remote paths in directories
+ EntryDirectory EntryType = iota // 0
+ // EntryObject should be used to classify remote paths in objects
+ EntryObject // 1
+)
+
+// Globals
+var (
+ // Filesystem registry
+ Registry []*RegInfo
+ // ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
+ ErrorNotFoundInConfigFile = errors.New("didn't find section in config file")
+ ErrorCantPurge = errors.New("can't purge directory")
+ ErrorCantCopy = errors.New("can't copy object - incompatible remotes")
+ ErrorCantMove = errors.New("can't move object - incompatible remotes")
+ ErrorCantDirMove = errors.New("can't move directory - incompatible remotes")
+ ErrorDirExists = errors.New("can't copy directory - destination already exists")
+ ErrorCantSetModTime = errors.New("can't set modified time")
+ ErrorCantSetModTimeWithoutDelete = errors.New("can't set modified time without deleting existing object")
+ ErrorDirNotFound = errors.New("directory not found")
+ ErrorObjectNotFound = errors.New("object not found")
+ ErrorLevelNotSupported = errors.New("level value not supported")
+ ErrorListAborted = errors.New("list aborted")
+ ErrorListBucketRequired = errors.New("bucket or container name is needed in remote")
+ ErrorIsFile = errors.New("is a file not a directory")
+ ErrorNotAFile = errors.New("is a not a regular file")
+ ErrorNotDeleting = errors.New("not deleting files as there were IO errors")
+ ErrorNotDeletingDirs = errors.New("not deleting directories as there were IO errors")
+ ErrorCantMoveOverlapping = errors.New("can't move files on overlapping remotes")
+ ErrorDirectoryNotEmpty = errors.New("directory not empty")
+ ErrorImmutableModified = errors.New("immutable file modified")
+ ErrorPermissionDenied = errors.New("permission denied")
+)
+
+// RegInfo provides information about a filesystem
+type RegInfo struct {
+ // Name of this fs
+ Name string
+ // Description of this fs - defaults to Name
+ Description string
+ // Prefix for command line flags for this fs - defaults to Name if not set
+ Prefix string
+ // Create a new file system. If root refers to an existing
+ // object, then it should return a Fs which which points to
+ // the parent of that object and ErrorIsFile.
+ NewFs func(name string, root string, config configmap.Mapper) (Fs, error) `json:"-"`
+ // Function to call to help with config
+ Config func(name string, config configmap.Mapper) `json:"-"`
+ // Options for the Fs configuration
+ Options Options
+}
+
+// Options is a slice of configuration Option for a backend
+type Options []Option
+
+// Set the default values for the options
+func (os Options) setValues() {
+ for i := range os {
+ o := &os[i]
+ if o.Default == nil {
+ o.Default = ""
+ }
+ }
+}
+
+// OptionVisibility controls whether the options are visible in the
+// configurator or the command line.
+type OptionVisibility byte
+
+// Constants Option.Hide
+const (
+ OptionHideCommandLine OptionVisibility = 1 << iota
+ OptionHideConfigurator
+ OptionHideBoth = OptionHideCommandLine | OptionHideConfigurator
+)
+
+// Option is describes an option for the config wizard
+//
+// This also describes command line options and environment variables
+type Option struct {
+ Name string // name of the option in snake_case
+ Help string // Help, the first line only is used for the command line help
+ Provider string // Set to filter on provider
+ Default interface{} // default value, nil => ""
+ Value interface{} // value to be set by flags
+ Examples OptionExamples `json:",omitempty"` // config examples
+ ShortOpt string // the short option for this if required
+ Hide OptionVisibility // set this to hide the config from the configurator or the command line
+ Required bool // this option is required
+ IsPassword bool // set if the option is a password
+ NoPrefix bool // set if the option for this should not use the backend prefix
+ Advanced bool // set if this is an advanced config option
+}
+
+// Gets the current current value which is the default if not set
+func (o *Option) value() interface{} {
+ val := o.Value
+ if val == nil {
+ val = o.Default
+ }
+ return val
+}
+
+// String turns Option into a string
+func (o *Option) String() string {
+ return fmt.Sprint(o.value())
+}
+
+// Set a Option from a string
+func (o *Option) Set(s string) (err error) {
+ newValue, err := configstruct.StringToInterface(o.value(), s)
+ if err != nil {
+ return err
+ }
+ o.Value = newValue
+ return nil
+}
+
+// Type of the value
+func (o *Option) Type() string {
+ return reflect.TypeOf(o.value()).Name()
+}
+
+// OptionExamples is a slice of examples
+type OptionExamples []OptionExample
+
+// Len is part of sort.Interface.
+func (os OptionExamples) Len() int { return len(os) }
+
+// Swap is part of sort.Interface.
+func (os OptionExamples) Swap(i, j int) { os[i], os[j] = os[j], os[i] }
+
+// Less is part of sort.Interface.
+func (os OptionExamples) Less(i, j int) bool { return os[i].Help < os[j].Help }
+
+// Sort sorts an OptionExamples
+func (os OptionExamples) Sort() { sort.Sort(os) }
+
+// OptionExample describes an example for an Option
+type OptionExample struct {
+ Value string
+ Help string
+ Provider string
+}
+
+// Register a filesystem
+//
+// Fs modules should use this in an init() function
+func Register(info *RegInfo) {
+ info.Options.setValues()
+ if info.Prefix == "" {
+ info.Prefix = info.Name
+ }
+ Registry = append(Registry, info)
+}
+
+// Fs is the interface a cloud storage system must provide
+type Fs interface {
+ Info
+
+ // List the objects and directories in dir into entries. The
+ // entries can be returned in any order but should be for a
+ // complete directory.
+ //
+ // dir should be "" to list the root, and should not have
+ // trailing slashes.
+ //
+ // This should return ErrDirNotFound if the directory isn't
+ // found.
+ List(dir string) (entries DirEntries, err error)
+
+ // NewObject finds the Object at remote. If it can't be found
+ // it returns the error ErrorObjectNotFound.
+ NewObject(remote string) (Object, error)
+
+ // Put in to the remote path with the modTime given of the given size
+ //
+ // May create the object even if it returns an error - if so
+ // will return the object and the error, otherwise will return
+ // nil and the error
+ Put(in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
+
+ // Mkdir makes the directory (container, bucket)
+ //
+ // Shouldn't return an error if it already exists
+ Mkdir(dir string) error
+
+ // Rmdir removes the directory (container, bucket) if empty
+ //
+ // Return an error if it doesn't exist or isn't empty
+ Rmdir(dir string) error
+}
+
+// Info provides a read only interface to information about a filesystem.
+type Info interface {
+ // Name of the remote (as passed into NewFs)
+ Name() string
+
+ // Root of the remote (as passed into NewFs)
+ Root() string
+
+ // String returns a description of the FS
+ String() string
+
+ // Precision of the ModTimes in this Fs
+ Precision() time.Duration
+
+ // Returns the supported hash types of the filesystem
+ Hashes() hash.Set
+
+ // Features returns the optional features of this Fs
+ Features() *Features
+}
+
+// Object is a filesystem like object provided by an Fs
+type Object interface {
+ ObjectInfo
+
+ // SetModTime sets the metadata on the object to set the modification date
+ SetModTime(time.Time) error
+
+ // Open opens the file for read. Call Close() on the returned io.ReadCloser
+ Open(options ...OpenOption) (io.ReadCloser, error)
+
+ // Update in to the object with the modTime given of the given size
+ Update(in io.Reader, src ObjectInfo, options ...OpenOption) error
+
+ // Removes this object
+ Remove() error
+}
+
+// ObjectInfo provides read only information about an object.
+type ObjectInfo interface {
+ DirEntry
+
+ // Fs returns read only access to the Fs that this object is part of
+ Fs() Info
+
+ // Hash returns the selected checksum of the file
+ // If no checksum is available it returns ""
+ Hash(hash.Type) (string, error)
+
+ // Storable says whether this object can be stored
+ Storable() bool
+}
+
+// DirEntry provides read only information about the common subset of
+// a Dir or Object. These are returned from directory listings - type
+// assert them into the correct type.
+type DirEntry interface {
+ // String returns a description of the Object
+ String() string
+
+ // Remote returns the remote path
+ Remote() string
+
+ // ModTime returns the modification date of the file
+ // It should return a best guess if one isn't available
+ ModTime() time.Time
+
+ // Size returns the size of the file
+ Size() int64
+}
+
+// Directory is a filesystem like directory provided by an Fs
+type Directory interface {
+ DirEntry
+
+ // Items returns the count of items in this directory or this
+ // directory and subdirectories if known, -1 for unknown
+ Items() int64
+
+ // ID returns the internal ID of this directory if known, or
+ // "" otherwise
+ ID() string
+}
+
+// MimeTyper is an optional interface for Object
+type MimeTyper interface {
+ // MimeType returns the content type of the Object if
+ // known, or "" if not
+ MimeType() string
+}
+
+// IDer is an optional interface for Object
+type IDer interface {
+ // ID returns the ID of the Object if known, or "" if not
+ ID() string
+}
+
+// ObjectUnWrapper is an optional interface for Object
+type ObjectUnWrapper interface {
+ // UnWrap returns the Object that this Object is wrapping or
+ // nil if it isn't wrapping anything
+ UnWrap() Object
+}
+
+// ListRCallback defines a callback function for ListR to use
+//
+// It is called for each tranche of entries read from the listing and
+// if it returns an error, the listing stops.
+type ListRCallback func(entries DirEntries) error
+
+// ListRFn is defines the call used to recursively list a directory
+type ListRFn func(dir string, callback ListRCallback) error
+
+// NewUsageValue makes a valid value
+func NewUsageValue(value int64) *int64 {
+ p := new(int64)
+ *p = value
+ return p
+}
+
+// Usage is returned by the About call
+//
+// If a value is nil then it isn't supported by that backend
+type Usage struct {
+ Total *int64 `json:"total,omitempty"` // quota of bytes that can be used
+ Used *int64 `json:"used,omitempty"` // bytes in use
+ Trashed *int64 `json:"trashed,omitempty"` // bytes in trash
+ Other *int64 `json:"other,omitempty"` // other usage eg gmail in drive
+ Free *int64 `json:"free,omitempty"` // bytes which can be uploaded before reaching the quota
+ Objects *int64 `json:"objects,omitempty"` // objects in the storage system
+}
+
+// Features describe the optional features of the Fs
+type Features struct {
+ // Feature flags, whether Fs
+ CaseInsensitive bool // has case insensitive files
+ DuplicateFiles bool // allows duplicate files
+ ReadMimeType bool // can read the mime type of objects
+ WriteMimeType bool // can set the mime type of objects
+ CanHaveEmptyDirectories bool // can have empty directories
+ BucketBased bool // is bucket based (like s3, swift etc)
+
+ // Purge all files in the root and the root directory
+ //
+ // Implement this if you have a way of deleting all the files
+ // quicker than just running Remove() on the result of List()
+ //
+ // Return an error if it doesn't exist
+ Purge func() error
+
+ // Copy src to this remote using server side copy operations.
+ //
+ // This is stored with the remote path given
+ //
+ // It returns the destination Object and a possible error
+ //
+ // Will only be called if src.Fs().Name() == f.Name()
+ //
+ // If it isn't possible then return fs.ErrorCantCopy
+ Copy func(src Object, remote string) (Object, error)
+
+ // Move src to this remote using server side move operations.
+ //
+ // This is stored with the remote path given
+ //
+ // It returns the destination Object and a possible error
+ //
+ // Will only be called if src.Fs().Name() == f.Name()
+ //
+ // If it isn't possible then return fs.ErrorCantMove
+ Move func(src Object, remote string) (Object, error)
+
+ // DirMove moves src, srcRemote to this remote at dstRemote
+ // using server side move operations.
+ //
+ // Will only be called if src.Fs().Name() == f.Name()
+ //
+ // If it isn't possible then return fs.ErrorCantDirMove
+ //
+ // If destination exists then return fs.ErrorDirExists
+ DirMove func(src Fs, srcRemote, dstRemote string) error
+
+ // ChangeNotify calls the passed function with a path
+ // that has had changes. If the implementation
+ // uses polling, it should adhere to the given interval.
+ ChangeNotify func(func(string, EntryType), time.Duration) chan bool
+
+ // UnWrap returns the Fs that this Fs is wrapping
+ UnWrap func() Fs
+
+ // WrapFs returns the Fs that is wrapping this Fs
+ WrapFs func() Fs
+
+ // SetWrapper sets the Fs that is wrapping this Fs
+ SetWrapper func(f Fs)
+
+ // DirCacheFlush resets the directory cache - used in testing
+ // as an optional interface
+ DirCacheFlush func()
+
+ // PublicLink generates a public link to the remote path (usually readable by anyone)
+ PublicLink func(remote string) (string, error)
+
+ // Put in to the remote path with the modTime given of the given size
+ //
+ // May create the object even if it returns an error - if so
+ // will return the object and the error, otherwise will return
+ // nil and the error
+ //
+ // May create duplicates or return errors if src already
+ // exists.
+ PutUnchecked func(in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
+
+ // PutStream uploads to the remote path with the modTime given of indeterminate size
+ //
+ // May create the object even if it returns an error - if so
+ // will return the object and the error, otherwise will return
+ // nil and the error
+ PutStream func(in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
+
+ // MergeDirs merges the contents of all the directories passed
+ // in into the first one and rmdirs the other directories.
+ MergeDirs func([]Directory) error
+
+ // CleanUp the trash in the Fs
+ //
+ // Implement this if you have a way of emptying the trash or
+ // otherwise cleaning up old versions of files.
+ CleanUp func() error
+
+ // ListR lists the objects and directories of the Fs starting
+ // from dir recursively into out.
+ //
+ // dir should be "" to start from the root, and should not
+ // have trailing slashes.
+ //
+ // This should return ErrDirNotFound if the directory isn't
+ // found.
+ //
+ // It should call callback for each tranche of entries read.
+ // These need not be returned in any particular order. If
+ // callback returns an error then the listing will stop
+ // immediately.
+ //
+ // Don't implement this unless you have a more efficient way
+ // of listing recursively that doing a directory traversal.
+ ListR ListRFn
+
+ // About gets quota information from the Fs
+ About func() (*Usage, error)
+}
+
+// Disable nil's out the named feature. If it isn't found then it
+// will log a message.
+func (ft *Features) Disable(name string) *Features {
+ v := reflect.ValueOf(ft).Elem()
+ vType := v.Type()
+ for i := 0; i < v.NumField(); i++ {
+ vName := vType.Field(i).Name
+ field := v.Field(i)
+ if strings.EqualFold(name, vName) {
+ if !field.CanSet() {
+ Errorf(nil, "Can't set Feature %q", name)
+ } else {
+ zero := reflect.Zero(field.Type())
+ field.Set(zero)
+ Debugf(nil, "Reset feature %q", name)
+ }
+ }
+ }
+ return ft
+}
+
+// List returns a slice of all the possible feature names
+func (ft *Features) List() (out []string) {
+ v := reflect.ValueOf(ft).Elem()
+ vType := v.Type()
+ for i := 0; i < v.NumField(); i++ {
+ out = append(out, vType.Field(i).Name)
+ }
+ return out
+}
+
+// DisableList nil's out the comma separated list of named features.
+// If it isn't found then it will log a message.
+func (ft *Features) DisableList(list []string) *Features {
+ for _, feature := range list {
+ ft.Disable(strings.TrimSpace(feature))
+ }
+ return ft
+}
+
+// Fill fills in the function pointers in the Features struct from the
+// optional interfaces. It returns the original updated Features
+// struct passed in.
+func (ft *Features) Fill(f Fs) *Features {
+ if do, ok := f.(Purger); ok {
+ ft.Purge = do.Purge
+ }
+ if do, ok := f.(Copier); ok {
+ ft.Copy = do.Copy
+ }
+ if do, ok := f.(Mover); ok {
+ ft.Move = do.Move
+ }
+ if do, ok := f.(DirMover); ok {
+ ft.DirMove = do.DirMove
+ }
+ if do, ok := f.(ChangeNotifier); ok {
+ ft.ChangeNotify = do.ChangeNotify
+ }
+ if do, ok := f.(UnWrapper); ok {
+ ft.UnWrap = do.UnWrap
+ }
+ if do, ok := f.(Wrapper); ok {
+ ft.WrapFs = do.WrapFs
+ ft.SetWrapper = do.SetWrapper
+ }
+ if do, ok := f.(DirCacheFlusher); ok {
+ ft.DirCacheFlush = do.DirCacheFlush
+ }
+ if do, ok := f.(PublicLinker); ok {
+ ft.PublicLink = do.PublicLink
+ }
+ if do, ok := f.(PutUncheckeder); ok {
+ ft.PutUnchecked = do.PutUnchecked
+ }
+ if do, ok := f.(PutStreamer); ok {
+ ft.PutStream = do.PutStream
+ }
+ if do, ok := f.(MergeDirser); ok {
+ ft.MergeDirs = do.MergeDirs
+ }
+ if do, ok := f.(CleanUpper); ok {
+ ft.CleanUp = do.CleanUp
+ }
+ if do, ok := f.(ListRer); ok {
+ ft.ListR = do.ListR
+ }
+ if do, ok := f.(Abouter); ok {
+ ft.About = do.About
+ }
+ return ft.DisableList(Config.DisableFeatures)
+}
+
+// Mask the Features with the Fs passed in
+//
+// Only optional features which are implemented in both the original
+// Fs AND the one passed in will be advertised. Any features which
+// aren't in both will be set to false/nil, except for UnWrap/Wrap which
+// will be left untouched.
+func (ft *Features) Mask(f Fs) *Features {
+ mask := f.Features()
+ ft.CaseInsensitive = ft.CaseInsensitive && mask.CaseInsensitive
+ ft.DuplicateFiles = ft.DuplicateFiles && mask.DuplicateFiles
+ ft.ReadMimeType = ft.ReadMimeType && mask.ReadMimeType
+ ft.WriteMimeType = ft.WriteMimeType && mask.WriteMimeType
+ ft.CanHaveEmptyDirectories = ft.CanHaveEmptyDirectories && mask.CanHaveEmptyDirectories
+ ft.BucketBased = ft.BucketBased && mask.BucketBased
+ if mask.Purge == nil {
+ ft.Purge = nil
+ }
+ if mask.Copy == nil {
+ ft.Copy = nil
+ }
+ if mask.Move == nil {
+ ft.Move = nil
+ }
+ if mask.DirMove == nil {
+ ft.DirMove = nil
+ }
+ if mask.ChangeNotify == nil {
+ ft.ChangeNotify = nil
+ }
+ // if mask.UnWrap == nil {
+ // ft.UnWrap = nil
+ // }
+ if mask.DirCacheFlush == nil {
+ ft.DirCacheFlush = nil
+ }
+ if mask.PutUnchecked == nil {
+ ft.PutUnchecked = nil
+ }
+ if mask.PutStream == nil {
+ ft.PutStream = nil
+ }
+ if mask.MergeDirs == nil {
+ ft.MergeDirs = nil
+ }
+ if mask.CleanUp == nil {
+ ft.CleanUp = nil
+ }
+ if mask.ListR == nil {
+ ft.ListR = nil
+ }
+ if mask.About == nil {
+ ft.About = nil
+ }
+ return ft.DisableList(Config.DisableFeatures)
+}
+
+// Wrap makes a Copy of the features passed in, overriding the UnWrap/Wrap
+// method only if available in f.
+func (ft *Features) Wrap(f Fs) *Features {
+ ftCopy := new(Features)
+ *ftCopy = *ft
+ if do, ok := f.(UnWrapper); ok {
+ ftCopy.UnWrap = do.UnWrap
+ }
+ if do, ok := f.(Wrapper); ok {
+ ftCopy.WrapFs = do.WrapFs
+ ftCopy.SetWrapper = do.SetWrapper
+ }
+ return ftCopy
+}
+
+// WrapsFs adds extra information between `f` which wraps `w`
+func (ft *Features) WrapsFs(f Fs, w Fs) *Features {
+ wFeatures := w.Features()
+ if wFeatures.WrapFs != nil && wFeatures.SetWrapper != nil {
+ wFeatures.SetWrapper(f)
+ }
+ return ft
+}
+
+// Purger is an optional interfaces for Fs
+type Purger interface {
+ // Purge all files in the root and the root directory
+ //
+ // Implement this if you have a way of deleting all the files
+ // quicker than just running Remove() on the result of List()
+ //
+ // Return an error if it doesn't exist
+ Purge() error
+}
+
+// Copier is an optional interface for Fs
+type Copier interface {
+ // Copy src to this remote using server side copy operations.
+ //
+ // This is stored with the remote path given
+ //
+ // It returns the destination Object and a possible error
+ //
+ // Will only be called if src.Fs().Name() == f.Name()
+ //
+ // If it isn't possible then return fs.ErrorCantCopy
+ Copy(src Object, remote string) (Object, error)
+}
+
+// Mover is an optional interface for Fs
+type Mover interface {
+ // Move src to this remote using server side move operations.
+ //
+ // This is stored with the remote path given
+ //
+ // It returns the destination Object and a possible error
+ //
+ // Will only be called if src.Fs().Name() == f.Name()
+ //
+ // If it isn't possible then return fs.ErrorCantMove
+ Move(src Object, remote string) (Object, error)
+}
+
+// DirMover is an optional interface for Fs
+type DirMover interface {
+ // DirMove moves src, srcRemote to this remote at dstRemote
+ // using server side move operations.
+ //
+ // Will only be called if src.Fs().Name() == f.Name()
+ //
+ // If it isn't possible then return fs.ErrorCantDirMove
+ //
+ // If destination exists then return fs.ErrorDirExists
+ DirMove(src Fs, srcRemote, dstRemote string) error
+}
+
+// ChangeNotifier is an optional interface for Fs
+type ChangeNotifier interface {
+ // ChangeNotify calls the passed function with a path
+ // that has had changes. If the implementation
+ // uses polling, it should adhere to the given interval.
+ ChangeNotify(func(string, EntryType), time.Duration) chan bool
+}
+
+// UnWrapper is an optional interfaces for Fs
+type UnWrapper interface {
+ // UnWrap returns the Fs that this Fs is wrapping
+ UnWrap() Fs
+}
+
+// Wrapper is an optional interfaces for Fs
+type Wrapper interface {
+ // Wrap returns the Fs that is wrapping this Fs
+ WrapFs() Fs
+ // SetWrapper sets the Fs that is wrapping this Fs
+ SetWrapper(f Fs)
+}
+
+// DirCacheFlusher is an optional interface for Fs
+type DirCacheFlusher interface {
+ // DirCacheFlush resets the directory cache - used in testing
+ // as an optional interface
+ DirCacheFlush()
+}
+
+// PutUncheckeder is an optional interface for Fs
+type PutUncheckeder interface {
+ // Put in to the remote path with the modTime given of the given size
+ //
+ // May create the object even if it returns an error - if so
+ // will return the object and the error, otherwise will return
+ // nil and the error
+ //
+ // May create duplicates or return errors if src already
+ // exists.
+ PutUnchecked(in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
+}
+
+// PutStreamer is an optional interface for Fs
+type PutStreamer interface {
+ // PutStream uploads to the remote path with the modTime given of indeterminate size
+ //
+ // May create the object even if it returns an error - if so
+ // will return the object and the error, otherwise will return
+ // nil and the error
+ PutStream(in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error)
+}
+
+// PublicLinker is an optional interface for Fs
+type PublicLinker interface {
+ // PublicLink generates a public link to the remote path (usually readable by anyone)
+ PublicLink(remote string) (string, error)
+}
+
+// MergeDirser is an option interface for Fs
+type MergeDirser interface {
+ // MergeDirs merges the contents of all the directories passed
+ // in into the first one and rmdirs the other directories.
+ MergeDirs([]Directory) error
+}
+
+// CleanUpper is an optional interfaces for Fs
+type CleanUpper interface {
+ // CleanUp the trash in the Fs
+ //
+ // Implement this if you have a way of emptying the trash or
+ // otherwise cleaning up old versions of files.
+ CleanUp() error
+}
+
+// ListRer is an optional interfaces for Fs
+type ListRer interface {
+ // ListR lists the objects and directories of the Fs starting
+ // from dir recursively into out.
+ //
+ // dir should be "" to start from the root, and should not
+ // have trailing slashes.
+ //
+ // This should return ErrDirNotFound if the directory isn't
+ // found.
+ //
+ // It should call callback for each tranche of entries read.
+ // These need not be returned in any particular order. If
+ // callback returns an error then the listing will stop
+ // immediately.
+ //
+ // Don't implement this unless you have a more efficient way
+ // of listing recursively that doing a directory traversal.
+ ListR(dir string, callback ListRCallback) error
+}
+
+// RangeSeeker is the interface that wraps the RangeSeek method.
+//
+// Some of the returns from Object.Open() may optionally implement
+// this method for efficiency purposes.
+type RangeSeeker interface {
+ // RangeSeek behaves like a call to Seek(offset int64, whence
+ // int) with the output wrapped in an io.LimitedReader
+ // limiting the total length to limit.
+ //
+ // RangeSeek with a limit of < 0 is equivalent to a regular Seek.
+ RangeSeek(offset int64, whence int, length int64) (int64, error)
+}
+
+// Abouter is an optional interface for Fs
+type Abouter interface {
+ // About gets quota information from the Fs
+ About() (*Usage, error)
+}
+
+// ObjectsChan is a channel of Objects
+type ObjectsChan chan Object
+
+// Objects is a slice of Object~s
+type Objects []Object
+
+// ObjectPair is a pair of Objects used to describe a potential copy
+// operation.
+type ObjectPair struct {
+ Src, Dst Object
+}
+
+// Find looks for an RegInfo object for the name passed in. The name
+// can be either the Name or the Prefix.
+//
+// Services are looked up in the config file
+func Find(name string) (*RegInfo, error) {
+ for _, item := range Registry {
+ if item.Name == name || item.Prefix == name {
+ return item, nil
+ }
+ }
+ return nil, errors.Errorf("didn't find backend called %q", name)
+}
+
+// MustFind looks for an Info object for the type name passed in
+//
+// Services are looked up in the config file
+//
+// Exits with a fatal error if not found
+func MustFind(name string) *RegInfo {
+ fs, err := Find(name)
+ if err != nil {
+ log.Fatalf("Failed to find remote: %v", err)
+ }
+ return fs
+}
+
+// ParseRemote deconstructs a path into configName, fsPath, looking up
+// the fsName in the config file (returning NotFoundInConfigFile if not found)
+func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err error) {
+ configName, fsPath = fspath.Parse(path)
+ var fsName string
+ var ok bool
+ if configName != "" {
+ if strings.HasPrefix(configName, ":") {
+ fsName = configName[1:]
+ } else {
+ m := ConfigMap(nil, configName)
+ fsName, ok = m.Get("type")
+ if !ok {
+ return nil, "", "", ErrorNotFoundInConfigFile
+ }
+ }
+ } else {
+ fsName = "local"
+ configName = "local"
+ }
+ fsInfo, err = Find(fsName)
+ return fsInfo, configName, fsPath, err
+}
+
+// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
+type configEnvVars string
+
+// Get a config item from the environment variables if possible
+func (configName configEnvVars) Get(key string) (value string, ok bool) {
+ return os.LookupEnv(ConfigToEnv(string(configName), key))
+}
+
+// A configmap.Getter to read from the environment RCLONE_option_name
+type optionEnvVars string
+
+// Get a config item from the option environment variables if possible
+func (prefix optionEnvVars) Get(key string) (value string, ok bool) {
+ return os.LookupEnv(OptionToEnv(string(prefix) + "-" + key))
+}
+
+// A configmap.Getter to read either the default value or the set
+// value from the RegInfo.Options
+type regInfoValues struct {
+ fsInfo *RegInfo
+ useDefault bool
+}
+
+// override the values in configMap with the either the flag values or
+// the default values
+func (r *regInfoValues) Get(key string) (value string, ok bool) {
+ for i := range r.fsInfo.Options {
+ o := &r.fsInfo.Options[i]
+ if o.Name == key {
+ if r.useDefault || o.Value != nil {
+ return o.String(), true
+ }
+ break
+ }
+ }
+ return "", false
+}
+
+// A configmap.Setter to read from the config file
+type setConfigFile string
+
+// Set a config item into the config file
+func (section setConfigFile) Set(key, value string) {
+ Debugf(nil, "Saving config %q = %q in section %q of the config file", key, value, section)
+ ConfigFileSet(string(section), key, value)
+}
+
+// A configmap.Getter to read from the config file
+type getConfigFile string
+
+// Get a config item from the config file
+func (section getConfigFile) Get(key string) (value string, ok bool) {
+ value, ok = ConfigFileGet(string(section), key)
+ // Ignore empty lines in the config file
+ if value == "" {
+ ok = false
+ }
+ return value, ok
+}
+
+// ConfigMap creates a configmap.Map from the *RegInfo and the
+// configName passed in.
+//
+// If fsInfo is nil then the returned configmap.Map should only be
+// used for reading non backend specific parameters, such as "type".
+func ConfigMap(fsInfo *RegInfo, configName string) (config *configmap.Map) {
+ // Create the config
+ config = configmap.New()
+
+ // Read the config, more specific to least specific
+
+ // flag values
+ if fsInfo != nil {
+ config.AddGetter(®InfoValues{fsInfo, false})
+ }
+
+ // remote specific environment vars
+ config.AddGetter(configEnvVars(configName))
+
+ // backend specific environment vars
+ if fsInfo != nil {
+ config.AddGetter(optionEnvVars(fsInfo.Prefix))
+ }
+
+ // config file
+ config.AddGetter(getConfigFile(configName))
+
+ // default values
+ if fsInfo != nil {
+ config.AddGetter(®InfoValues{fsInfo, true})
+ }
+
+ // Set Config
+ config.AddSetter(setConfigFile(configName))
+ return config
+}
+
+// ConfigFs makes the config for calling NewFs with.
+//
+// It parses the path which is of the form remote:path
+//
+// Remotes are looked up in the config file. If the remote isn't
+// found then NotFoundInConfigFile will be returned.
+func ConfigFs(path string) (fsInfo *RegInfo, configName, fsPath string, config *configmap.Map, err error) {
+ // Parse the remote path
+ fsInfo, configName, fsPath, err = ParseRemote(path)
+ if err != nil {
+ return
+ }
+ config = ConfigMap(fsInfo, configName)
+ return
+}
+
+// NewFs makes a new Fs object from the path
+//
+// The path is of the form remote:path
+//
+// Remotes are looked up in the config file. If the remote isn't
+// found then NotFoundInConfigFile will be returned.
+//
+// On Windows avoid single character remote names as they can be mixed
+// up with drive letters.
+func NewFs(path string) (Fs, error) {
+ fsInfo, configName, fsPath, config, err := ConfigFs(path)
+ if err != nil {
+ return nil, err
+ }
+ return fsInfo.NewFs(configName, fsPath, config)
+}
+
+// TemporaryLocalFs creates a local FS in the OS's temporary directory.
+//
+// No cleanup is performed, the caller must call Purge on the Fs themselves.
+func TemporaryLocalFs() (Fs, error) {
+ path, err := ioutil.TempDir("", "rclone-spool")
+ if err == nil {
+ err = os.Remove(path)
+ }
+ if err != nil {
+ return nil, err
+ }
+ path = filepath.ToSlash(path)
+ return NewFs(path)
+}
+
+// CheckClose is a utility function used to check the return from
+// Close in a defer statement.
+func CheckClose(c io.Closer, err *error) {
+ cerr := c.Close()
+ if *err == nil {
+ *err = cerr
+ }
+}
+
+// FileExists returns true if a file remote exists.
+// If remote is a directory, FileExists returns false.
+func FileExists(fs Fs, remote string) (bool, error) {
+ _, err := fs.NewObject(remote)
+ if err != nil {
+ if err == ErrorObjectNotFound || err == ErrorNotAFile || err == ErrorPermissionDenied {
+ return false, nil
+ }
+ return false, err
+ }
+ return true, nil
+}
+
+// GetModifyWindow calculates the maximum modify window between the given Fses
+// and the Config.ModifyWindow parameter.
+func GetModifyWindow(fss ...Info) time.Duration {
+ window := Config.ModifyWindow
+ for _, f := range fss {
+ if f != nil {
+ precision := f.Precision()
+ if precision == ModTimeNotSupported {
+ return ModTimeNotSupported
+ }
+ if precision > window {
+ window = precision
+ }
+ }
+ }
+ return window
+}
diff --git a/vendor/github.com/ncw/rclone/fs/fserrors/error.go b/vendor/github.com/ncw/rclone/fs/fserrors/error.go
new file mode 100755
index 0000000..4357f47
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/fserrors/error.go
@@ -0,0 +1,292 @@
+// Package fserrors provides errors and error handling
+package fserrors
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "reflect"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// Retrier is an optional interface for error as to whether the
+// operation should be retried at a high level.
+//
+// This should be returned from Update or Put methods as required
+type Retrier interface {
+ error
+ Retry() bool
+}
+
+// retryError is a type of error
+type retryError string
+
+// Error interface
+func (r retryError) Error() string {
+ return string(r)
+}
+
+// Retry interface
+func (r retryError) Retry() bool {
+ return true
+}
+
+// Check interface
+var _ Retrier = retryError("")
+
+// RetryErrorf makes an error which indicates it would like to be retried
+func RetryErrorf(format string, a ...interface{}) error {
+ return retryError(fmt.Sprintf(format, a...))
+}
+
+// wrappedRetryError is an error wrapped so it will satisfy the
+// Retrier interface and return true
+type wrappedRetryError struct {
+ error
+}
+
+// Retry interface
+func (err wrappedRetryError) Retry() bool {
+ return true
+}
+
+// Check interface
+var _ Retrier = wrappedRetryError{error(nil)}
+
+// RetryError makes an error which indicates it would like to be retried
+func RetryError(err error) error {
+ if err == nil {
+ err = errors.New("needs retry")
+ }
+ return wrappedRetryError{err}
+}
+
+// IsRetryError returns true if err conforms to the Retry interface
+// and calling the Retry method returns true.
+func IsRetryError(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, err = Cause(err)
+ if r, ok := err.(Retrier); ok {
+ return r.Retry()
+ }
+ return false
+}
+
+// Fataler is an optional interface for error as to whether the
+// operation should cause the entire operation to finish immediately.
+//
+// This should be returned from Update or Put methods as required
+type Fataler interface {
+ error
+ Fatal() bool
+}
+
+// wrappedFatalError is an error wrapped so it will satisfy the
+// Retrier interface and return true
+type wrappedFatalError struct {
+ error
+}
+
+// Fatal interface
+func (err wrappedFatalError) Fatal() bool {
+ return true
+}
+
+// Check interface
+var _ Fataler = wrappedFatalError{error(nil)}
+
+// FatalError makes an error which indicates it is a fatal error and
+// the sync should stop.
+func FatalError(err error) error {
+ if err == nil {
+ err = errors.New("fatal error")
+ }
+ return wrappedFatalError{err}
+}
+
+// IsFatalError returns true if err conforms to the Fatal interface
+// and calling the Fatal method returns true.
+func IsFatalError(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, err = Cause(err)
+ if r, ok := err.(Fataler); ok {
+ return r.Fatal()
+ }
+ return false
+}
+
+// NoRetrier is an optional interface for error as to whether the
+// operation should not be retried at a high level.
+//
+// If only NoRetry errors are returned in a sync then the sync won't
+// be retried.
+//
+// This should be returned from Update or Put methods as required
+type NoRetrier interface {
+ error
+ NoRetry() bool
+}
+
+// wrappedNoRetryError is an error wrapped so it will satisfy the
+// Retrier interface and return true
+type wrappedNoRetryError struct {
+ error
+}
+
+// NoRetry interface
+func (err wrappedNoRetryError) NoRetry() bool {
+ return true
+}
+
+// Check interface
+var _ NoRetrier = wrappedNoRetryError{error(nil)}
+
+// NoRetryError makes an error which indicates the sync shouldn't be
+// retried.
+func NoRetryError(err error) error {
+ return wrappedNoRetryError{err}
+}
+
+// IsNoRetryError returns true if err conforms to the NoRetry
+// interface and calling the NoRetry method returns true.
+func IsNoRetryError(err error) bool {
+ if err == nil {
+ return false
+ }
+ _, err = Cause(err)
+ if r, ok := err.(NoRetrier); ok {
+ return r.NoRetry()
+ }
+ return false
+}
+
+// Cause is a souped up errors.Cause which can unwrap some standard
+// library errors too. It returns true if any of the intermediate
+// errors had a Timeout() or Temporary() method which returned true.
+func Cause(cause error) (retriable bool, err error) {
+ err = cause
+ for prev := err; err != nil; prev = err {
+ // Check for net error Timeout()
+ if x, ok := err.(interface {
+ Timeout() bool
+ }); ok && x.Timeout() {
+ retriable = true
+ }
+
+ // Check for net error Temporary()
+ if x, ok := err.(interface {
+ Temporary() bool
+ }); ok && x.Temporary() {
+ retriable = true
+ }
+
+ // Unwrap 1 level if possible
+ err = errors.Cause(err)
+ if err == nil {
+ // errors.Cause can return nil which isn't
+ // desirable so pick the previous error in
+ // this case.
+ err = prev
+ }
+ if err == prev {
+ // Unpack any struct or *struct with a field
+ // of name Err which satisfies the error
+ // interface. This includes *url.Error,
+ // *net.OpError, *os.SyscallError and many
+ // others in the stdlib
+ errType := reflect.TypeOf(err)
+ errValue := reflect.ValueOf(err)
+ if errValue.IsValid() && errType.Kind() == reflect.Ptr {
+ errType = errType.Elem()
+ errValue = errValue.Elem()
+ }
+ if errValue.IsValid() && errType.Kind() == reflect.Struct {
+ if errField := errValue.FieldByName("Err"); errField.IsValid() {
+ errFieldValue := errField.Interface()
+ if newErr, ok := errFieldValue.(error); ok {
+ err = newErr
+ }
+ }
+ }
+ }
+ if err == prev {
+ break
+ }
+ }
+ return retriable, err
+}
+
+// retriableErrorStrings is a list of phrases which when we find it
+// in an an error, we know it is a networking error which should be
+// retried.
+//
+// This is incredibly ugly - if only errors.Cause worked for all
+// errors and all errors were exported from the stdlib.
+var retriableErrorStrings = []string{
+ "use of closed network connection", // internal/poll/fd.go
+ "unexpected EOF reading trailer", // net/http/transfer.go
+ "transport connection broken", // net/http/transport.go
+ "http: ContentLength=", // net/http/transfer.go
+}
+
+// Errors which indicate networking errors which should be retried
+//
+// These are added to in retriable_errors*.go
+var retriableErrors = []error{
+ io.EOF,
+ io.ErrUnexpectedEOF,
+}
+
+// ShouldRetry looks at an error and tries to work out if retrying the
+// operation that caused it would be a good idea. It returns true if
+// the error implements Timeout() or Temporary() or if the error
+// indicates a premature closing of the connection.
+func ShouldRetry(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ // Find root cause if available
+ retriable, err := Cause(err)
+ if retriable {
+ return true
+ }
+
+ // Check if it is a retriable error
+ for _, retriableErr := range retriableErrors {
+ if err == retriableErr {
+ return true
+ }
+ }
+
+ // Check error strings (yuch!) too
+ errString := err.Error()
+ for _, phrase := range retriableErrorStrings {
+ if strings.Contains(errString, phrase) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// ShouldRetryHTTP returns a boolean as to whether this resp deserves.
+// It checks to see if the HTTP response code is in the slice
+// retryErrorCodes.
+func ShouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
+ if resp == nil {
+ return false
+ }
+ for _, e := range retryErrorCodes {
+ if resp.StatusCode == e {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/ncw/rclone/fs/fserrors/retriable_errors.go b/vendor/github.com/ncw/rclone/fs/fserrors/retriable_errors.go
new file mode 100755
index 0000000..9ec0b5b
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/fserrors/retriable_errors.go
@@ -0,0 +1,21 @@
+// +build !plan9
+
+package fserrors
+
+import (
+ "syscall"
+)
+
+func init() {
+ retriableErrors = append(retriableErrors,
+ syscall.EPIPE,
+ syscall.ETIMEDOUT,
+ syscall.ECONNREFUSED,
+ syscall.EHOSTDOWN,
+ syscall.EHOSTUNREACH,
+ syscall.ECONNABORTED,
+ syscall.EAGAIN,
+ syscall.EWOULDBLOCK,
+ syscall.ECONNRESET,
+ )
+}
diff --git a/vendor/github.com/ncw/rclone/fs/fserrors/retriable_errors_windows.go b/vendor/github.com/ncw/rclone/fs/fserrors/retriable_errors_windows.go
new file mode 100755
index 0000000..55c8c59
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/fserrors/retriable_errors_windows.go
@@ -0,0 +1,31 @@
+// +build windows
+
+package fserrors
+
+import (
+ "syscall"
+)
+
+const (
+ WSAECONNABORTED syscall.Errno = 10053
+ WSAHOST_NOT_FOUND syscall.Errno = 11001
+ WSATRY_AGAIN syscall.Errno = 11002
+ WSAENETRESET syscall.Errno = 10052
+ WSAETIMEDOUT syscall.Errno = 10060
+)
+
+func init() {
+ // append some lower level errors since the standardized ones
+ // don't seem to happen
+ retriableErrors = append(retriableErrors,
+ syscall.WSAECONNRESET,
+ WSAECONNABORTED,
+ WSAHOST_NOT_FOUND,
+ WSATRY_AGAIN,
+ WSAENETRESET,
+ WSAETIMEDOUT,
+ syscall.ERROR_HANDLE_EOF,
+ syscall.ERROR_NETNAME_DELETED,
+ syscall.ERROR_BROKEN_PIPE,
+ )
+}
diff --git a/vendor/github.com/ncw/rclone/fs/fshttp/http.go b/vendor/github.com/ncw/rclone/fs/fshttp/http.go
new file mode 100755
index 0000000..e8240c9
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/fshttp/http.go
@@ -0,0 +1,312 @@
+// Package fshttp contains the common http parts of the config, Transport and Client
+package fshttp
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "reflect"
+ "sync"
+ "time"
+
+ "github.com/ncw/rclone/fs"
+ "golang.org/x/time/rate"
+)
+
+const (
+ separatorReq = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
+ separatorResp = "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
+)
+
+var (
+ transport http.RoundTripper
+ noTransport sync.Once
+ tpsBucket *rate.Limiter // for limiting number of http transactions per second
+)
+
+// StartHTTPTokenBucket starts the token bucket if necessary
+func StartHTTPTokenBucket() {
+ if fs.Config.TPSLimit > 0 {
+ tpsBurst := fs.Config.TPSLimitBurst
+ if tpsBurst < 1 {
+ tpsBurst = 1
+ }
+ tpsBucket = rate.NewLimiter(rate.Limit(fs.Config.TPSLimit), tpsBurst)
+ fs.Infof(nil, "Starting HTTP transaction limiter: max %g transactions/s with burst %d", fs.Config.TPSLimit, tpsBurst)
+ }
+}
+
+// A net.Conn that sets a deadline for every Read or Write operation
+type timeoutConn struct {
+ net.Conn
+ timeout time.Duration
+}
+
+// create a timeoutConn using the timeout
+func newTimeoutConn(conn net.Conn, timeout time.Duration) (c *timeoutConn, err error) {
+ c = &timeoutConn{
+ Conn: conn,
+ timeout: timeout,
+ }
+ err = c.nudgeDeadline()
+ return
+}
+
+// Nudge the deadline for an idle timeout on by c.timeout if non-zero
+func (c *timeoutConn) nudgeDeadline() (err error) {
+ if c.timeout == 0 {
+ return nil
+ }
+ when := time.Now().Add(c.timeout)
+ return c.Conn.SetDeadline(when)
+}
+
+// readOrWrite bytes doing idle timeouts
+func (c *timeoutConn) readOrWrite(f func([]byte) (int, error), b []byte) (n int, err error) {
+ n, err = f(b)
+ // Don't nudge if no bytes or an error
+ if n == 0 || err != nil {
+ return
+ }
+ // Nudge the deadline on successful Read or Write
+ err = c.nudgeDeadline()
+ return
+}
+
+// Read bytes doing idle timeouts
+func (c *timeoutConn) Read(b []byte) (n int, err error) {
+ return c.readOrWrite(c.Conn.Read, b)
+}
+
+// Write bytes doing idle timeouts
+func (c *timeoutConn) Write(b []byte) (n int, err error) {
+ return c.readOrWrite(c.Conn.Write, b)
+}
+
+// setDefaults for a from b
+//
+// Copy the public members from b to a. We can't just use a struct
+// copy as Transport contains a private mutex.
+func setDefaults(a, b interface{}) {
+ pt := reflect.TypeOf(a)
+ t := pt.Elem()
+ va := reflect.ValueOf(a).Elem()
+ vb := reflect.ValueOf(b).Elem()
+ for i := 0; i < t.NumField(); i++ {
+ aField := va.Field(i)
+ // Set a from b if it is public
+ if aField.CanSet() {
+ bField := vb.Field(i)
+ aField.Set(bField)
+ }
+ }
+}
+
+// dial with context and timeouts
+func dialContextTimeout(ctx context.Context, network, address string, ci *fs.ConfigInfo) (net.Conn, error) {
+ dialer := NewDialer(ci)
+ c, err := dialer.DialContext(ctx, network, address)
+ if err != nil {
+ return c, err
+ }
+ return newTimeoutConn(c, ci.Timeout)
+}
+
+// NewTransport returns an http.RoundTripper with the correct timeouts
+func NewTransport(ci *fs.ConfigInfo) http.RoundTripper {
+ noTransport.Do(func() {
+ // Start with a sensible set of defaults then override.
+ // This also means we get new stuff when it gets added to go
+ t := new(http.Transport)
+ setDefaults(t, http.DefaultTransport.(*http.Transport))
+ t.Proxy = http.ProxyFromEnvironment
+ t.MaxIdleConnsPerHost = 2 * (ci.Checkers + ci.Transfers + 1)
+ t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost
+ t.TLSHandshakeTimeout = ci.ConnectTimeout
+ t.ResponseHeaderTimeout = ci.Timeout
+ t.TLSClientConfig = &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify}
+ t.DisableCompression = ci.NoGzip
+ t.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return dialContextTimeout(ctx, network, addr, ci)
+ }
+ t.IdleConnTimeout = 60 * time.Second
+ t.ExpectContinueTimeout = ci.ConnectTimeout
+ // Wrap that http.Transport in our own transport
+ transport = newTransport(ci, t)
+ })
+ return transport
+}
+
+// NewClient returns an http.Client with the correct timeouts
+func NewClient(ci *fs.ConfigInfo) *http.Client {
+ return &http.Client{
+ Transport: NewTransport(ci),
+ }
+}
+
+// Transport is a our http Transport which wraps an http.Transport
+// * Sets the User Agent
+// * Does logging
+type Transport struct {
+ *http.Transport
+ dump fs.DumpFlags
+ filterRequest func(req *http.Request)
+ userAgent string
+}
+
+// newTransport wraps the http.Transport passed in and logs all
+// roundtrips including the body if logBody is set.
+func newTransport(ci *fs.ConfigInfo, transport *http.Transport) *Transport {
+ return &Transport{
+ Transport: transport,
+ dump: ci.Dump,
+ userAgent: ci.UserAgent,
+ }
+}
+
+// SetRequestFilter sets a filter to be used on each request
+func (t *Transport) SetRequestFilter(f func(req *http.Request)) {
+ t.filterRequest = f
+}
+
+// A mutex to protect this map
+var checkedHostMu sync.RWMutex
+
+// A map of servers we have checked for time
+var checkedHost = make(map[string]struct{}, 1)
+
+// Check the server time is the same as ours, once for each server
+func checkServerTime(req *http.Request, resp *http.Response) {
+ host := req.URL.Host
+ if req.Host != "" {
+ host = req.Host
+ }
+ checkedHostMu.RLock()
+ _, ok := checkedHost[host]
+ checkedHostMu.RUnlock()
+ if ok {
+ return
+ }
+ dateString := resp.Header.Get("Date")
+ if dateString == "" {
+ return
+ }
+ date, err := http.ParseTime(dateString)
+ if err != nil {
+ fs.Debugf(nil, "Couldn't parse Date: from server %s: %q: %v", host, dateString, err)
+ return
+ }
+ dt := time.Since(date)
+ const window = 5 * 60 * time.Second
+ if dt > window || dt < -window {
+ fs.Logf(nil, "Time may be set wrong - time from %q is %v different from this computer", host, dt)
+ }
+ checkedHostMu.Lock()
+ checkedHost[host] = struct{}{}
+ checkedHostMu.Unlock()
+}
+
+// cleanAuth gets rid of one authBuf header within the first 4k
+func cleanAuth(buf, authBuf []byte) []byte {
+ // Find how much buffer to check
+ n := 4096
+ if len(buf) < n {
+ n = len(buf)
+ }
+ // See if there is an Authorization: header
+ i := bytes.Index(buf[:n], authBuf)
+ if i < 0 {
+ return buf
+ }
+ i += len(authBuf)
+ // Overwrite the next 4 chars with 'X'
+ for j := 0; i < len(buf) && j < 4; j++ {
+ if buf[i] == '\n' {
+ break
+ }
+ buf[i] = 'X'
+ i++
+ }
+ // Snip out to the next '\n'
+ j := bytes.IndexByte(buf[i:], '\n')
+ if j < 0 {
+ return buf[:i]
+ }
+ n = copy(buf[i:], buf[i+j:])
+ return buf[:i+n]
+}
+
+var authBufs = [][]byte{
+ []byte("Authorization: "),
+ []byte("X-Auth-Token: "),
+}
+
+// cleanAuths gets rid of all the possible Auth headers
+func cleanAuths(buf []byte) []byte {
+ for _, authBuf := range authBufs {
+ buf = cleanAuth(buf, authBuf)
+ }
+ return buf
+}
+
+// RoundTrip implements the RoundTripper interface.
+func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+ // Get transactions per second token first if limiting
+ if tpsBucket != nil {
+ tbErr := tpsBucket.Wait(req.Context())
+ if tbErr != nil {
+ fs.Errorf(nil, "HTTP token bucket error: %v", err)
+ }
+ }
+ // Force user agent
+ req.Header.Set("User-Agent", t.userAgent)
+ // Filter the request if required
+ if t.filterRequest != nil {
+ t.filterRequest(req)
+ }
+ // Logf request
+ if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
+ buf, _ := httputil.DumpRequestOut(req, t.dump&(fs.DumpBodies|fs.DumpRequests) != 0)
+ if t.dump&fs.DumpAuth == 0 {
+ buf = cleanAuths(buf)
+ }
+ fs.Debugf(nil, "%s", separatorReq)
+ fs.Debugf(nil, "%s (req %p)", "HTTP REQUEST", req)
+ fs.Debugf(nil, "%s", string(buf))
+ fs.Debugf(nil, "%s", separatorReq)
+ }
+ // Do round trip
+ resp, err = t.Transport.RoundTrip(req)
+ // Logf response
+ if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
+ fs.Debugf(nil, "%s", separatorResp)
+ fs.Debugf(nil, "%s (req %p)", "HTTP RESPONSE", req)
+ if err != nil {
+ fs.Debugf(nil, "Error: %v", err)
+ } else {
+ buf, _ := httputil.DumpResponse(resp, t.dump&(fs.DumpBodies|fs.DumpResponses) != 0)
+ fs.Debugf(nil, "%s", string(buf))
+ }
+ fs.Debugf(nil, "%s", separatorResp)
+ }
+ if err == nil {
+ checkServerTime(req, resp)
+ }
+ return resp, err
+}
+
+// NewDialer creates a net.Dialer structure with Timeout, Keepalive
+// and LocalAddr set from rclone flags.
+func NewDialer(ci *fs.ConfigInfo) *net.Dialer {
+ dialer := &net.Dialer{
+ Timeout: ci.ConnectTimeout,
+ KeepAlive: 30 * time.Second,
+ }
+ if ci.BindAddr != nil {
+ dialer.LocalAddr = &net.TCPAddr{IP: ci.BindAddr}
+ }
+ return dialer
+}
diff --git a/vendor/github.com/ncw/rclone/fs/fspath/path.go b/vendor/github.com/ncw/rclone/fs/fspath/path.go
new file mode 100755
index 0000000..baa89bb
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/fspath/path.go
@@ -0,0 +1,50 @@
+// Package fspath contains routines for fspath manipulation
+package fspath
+
+import (
+ "path"
+ "path/filepath"
+ "regexp"
+
+ "github.com/ncw/rclone/fs/driveletter"
+)
+
+// Matcher is a pattern to match an rclone URL
+var Matcher = regexp.MustCompile(`^(:?[\w_ -]+):(.*)$`)
+
+// Parse deconstructs a remote path into configName and fsPath
+//
+// If the path is a local path then configName will be returned as "".
+//
+// So "remote:path/to/dir" will return "remote", "path/to/dir"
+// and "/path/to/local" will return ("", "/path/to/local")
+//
+// Note that this will turn \ into / in the fsPath on Windows
+func Parse(path string) (configName, fsPath string) {
+ parts := Matcher.FindStringSubmatch(path)
+ configName, fsPath = "", path
+ if parts != nil && !driveletter.IsDriveLetter(parts[1]) {
+ configName, fsPath = parts[1], parts[2]
+ }
+ // change native directory separators to / if there are any
+ fsPath = filepath.ToSlash(fsPath)
+ return configName, fsPath
+}
+
+// Split splits a remote into a parent and a leaf
+//
+// if it returns leaf as an empty string then remote is a directory
+//
+// if it returns parent as an empty string then that means the current directory
+//
+// The returned values have the property that parent + leaf == remote
+// (except under Windows where \ will be translated into /)
+func Split(remote string) (parent string, leaf string) {
+ remoteName, remotePath := Parse(remote)
+ if remoteName != "" {
+ remoteName += ":"
+ }
+ // Construct new remote name without last segment
+ parent, leaf = path.Split(remotePath)
+ return remoteName + parent, leaf
+}
diff --git a/vendor/github.com/ncw/rclone/fs/hash/hash.go b/vendor/github.com/ncw/rclone/fs/hash/hash.go
new file mode 100755
index 0000000..57b206b
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/hash/hash.go
@@ -0,0 +1,308 @@
+package hash
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "io"
+ "strings"
+
+ "github.com/ncw/rclone/backend/dropbox/dbhash"
+ "github.com/ncw/rclone/backend/onedrive/quickxorhash"
+ "github.com/pkg/errors"
+)
+
+// Type indicates a standard hashing algorithm
+type Type int
+
+// ErrUnsupported should be returned by filesystem,
+// if it is requested to deliver an unsupported hash type.
+var ErrUnsupported = errors.New("hash type not supported")
+
+const (
+ // MD5 indicates MD5 support
+ MD5 Type = 1 << iota
+
+ // SHA1 indicates SHA-1 support
+ SHA1
+
+ // Dropbox indicates Dropbox special hash
+ // https://www.dropbox.com/developers/reference/content-hash
+ Dropbox
+
+ // QuickXorHash indicates Microsoft onedrive hash
+ // https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
+ QuickXorHash
+
+ // None indicates no hashes are supported
+ None Type = 0
+)
+
+// Supported returns a set of all the supported hashes by
+// HashStream and MultiHasher.
+var Supported = NewHashSet(MD5, SHA1, Dropbox, QuickXorHash)
+
+// Width returns the width in characters for any HashType
+var Width = map[Type]int{
+ MD5: 32,
+ SHA1: 40,
+ Dropbox: 64,
+ QuickXorHash: 40,
+}
+
+// Stream will calculate hashes of all supported hash types.
+func Stream(r io.Reader) (map[Type]string, error) {
+ return StreamTypes(r, Supported)
+}
+
+// StreamTypes will calculate hashes of the requested hash types.
+func StreamTypes(r io.Reader, set Set) (map[Type]string, error) {
+ hashers, err := fromTypes(set)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = io.Copy(toMultiWriter(hashers), r)
+ if err != nil {
+ return nil, err
+ }
+ var ret = make(map[Type]string)
+ for k, v := range hashers {
+ ret[k] = hex.EncodeToString(v.Sum(nil))
+ }
+ return ret, nil
+}
+
+// String returns a string representation of the hash type.
+// The function will panic if the hash type is unknown.
+func (h Type) String() string {
+ switch h {
+ case None:
+ return "None"
+ case MD5:
+ return "MD5"
+ case SHA1:
+ return "SHA-1"
+ case Dropbox:
+ return "DropboxHash"
+ case QuickXorHash:
+ return "QuickXorHash"
+ default:
+ err := fmt.Sprintf("internal error: unknown hash type: 0x%x", int(h))
+ panic(err)
+ }
+}
+
+// Set a Type from a flag
+func (h *Type) Set(s string) error {
+ switch s {
+ case "None":
+ *h = None
+ case "MD5":
+ *h = MD5
+ case "SHA-1":
+ *h = SHA1
+ case "DropboxHash":
+ *h = Dropbox
+ case "QuickXorHash":
+ *h = QuickXorHash
+ default:
+ return errors.Errorf("Unknown hash type %q", s)
+ }
+ return nil
+}
+
+// Type of the value
+func (h Type) Type() string {
+ return "string"
+}
+
+// fromTypes will return hashers for all the requested types.
+// The types must be a subset of SupportedHashes,
+// and this function must support all types.
+func fromTypes(set Set) (map[Type]hash.Hash, error) {
+ if !set.SubsetOf(Supported) {
+ return nil, errors.Errorf("requested set %08x contains unknown hash types", int(set))
+ }
+ var hashers = make(map[Type]hash.Hash)
+ types := set.Array()
+ for _, t := range types {
+ switch t {
+ case MD5:
+ hashers[t] = md5.New()
+ case SHA1:
+ hashers[t] = sha1.New()
+ case Dropbox:
+ hashers[t] = dbhash.New()
+ case QuickXorHash:
+ hashers[t] = quickxorhash.New()
+ default:
+ err := fmt.Sprintf("internal error: Unsupported hash type %v", t)
+ panic(err)
+ }
+ }
+ return hashers, nil
+}
+
+// toMultiWriter will return a set of hashers into a
+// single multiwriter, where one write will update all
+// the hashers.
+func toMultiWriter(h map[Type]hash.Hash) io.Writer {
+ // Convert to to slice
+ var w = make([]io.Writer, 0, len(h))
+ for _, v := range h {
+ w = append(w, v)
+ }
+ return io.MultiWriter(w...)
+}
+
+// A MultiHasher will construct various hashes on
+// all incoming writes.
+type MultiHasher struct {
+ w io.Writer
+ size int64
+ h map[Type]hash.Hash // Hashes
+}
+
+// NewMultiHasher will return a hash writer that will write all
+// supported hash types.
+func NewMultiHasher() *MultiHasher {
+ h, err := NewMultiHasherTypes(Supported)
+ if err != nil {
+ panic("internal error: could not create multihasher")
+ }
+ return h
+}
+
+// NewMultiHasherTypes will return a hash writer that will write
+// the requested hash types.
+func NewMultiHasherTypes(set Set) (*MultiHasher, error) {
+ hashers, err := fromTypes(set)
+ if err != nil {
+ return nil, err
+ }
+ m := MultiHasher{h: hashers, w: toMultiWriter(hashers)}
+ return &m, nil
+}
+
+func (m *MultiHasher) Write(p []byte) (n int, err error) {
+ n, err = m.w.Write(p)
+ m.size += int64(n)
+ return n, err
+}
+
+// Sums returns the sums of all accumulated hashes as hex encoded
+// strings.
+func (m *MultiHasher) Sums() map[Type]string {
+ dst := make(map[Type]string)
+ for k, v := range m.h {
+ dst[k] = hex.EncodeToString(v.Sum(nil))
+ }
+ return dst
+}
+
+// Size returns the number of bytes written
+func (m *MultiHasher) Size() int64 {
+ return m.size
+}
+
+// A Set Indicates one or more hash types.
+type Set int
+
+// NewHashSet will create a new hash set with the hash types supplied
+func NewHashSet(t ...Type) Set {
+ h := Set(None)
+ return h.Add(t...)
+}
+
+// Add one or more hash types to the set.
+// Returns the modified hash set.
+func (h *Set) Add(t ...Type) Set {
+ for _, v := range t {
+ *h |= Set(v)
+ }
+ return *h
+}
+
+// Contains returns true if the
+func (h Set) Contains(t Type) bool {
+ return int(h)&int(t) != 0
+}
+
+// Overlap returns the overlapping hash types
+func (h Set) Overlap(t Set) Set {
+ return Set(int(h) & int(t))
+}
+
+// SubsetOf will return true if all types of h
+// is present in the set c
+func (h Set) SubsetOf(c Set) bool {
+ return int(h)|int(c) == int(c)
+}
+
+// GetOne will return a hash type.
+// Currently the first is returned, but it could be
+// improved to return the strongest.
+func (h Set) GetOne() Type {
+ v := int(h)
+ i := uint(0)
+ for v != 0 {
+ if v&1 != 0 {
+ return Type(1 << i)
+ }
+ i++
+ v >>= 1
+ }
+ return Type(None)
+}
+
+// Array returns an array of all hash types in the set
+func (h Set) Array() (ht []Type) {
+ v := int(h)
+ i := uint(0)
+ for v != 0 {
+ if v&1 != 0 {
+ ht = append(ht, Type(1<>= 1
+ }
+ return ht
+}
+
+// Count returns the number of hash types in the set
+func (h Set) Count() int {
+ if int(h) == 0 {
+ return 0
+ }
+ // credit: https://code.google.com/u/arnehormann/
+ x := uint64(h)
+ x -= (x >> 1) & 0x5555555555555555
+ x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
+ x += x >> 4
+ x &= 0x0f0f0f0f0f0f0f0f
+ x *= 0x0101010101010101
+ return int(x >> 56)
+}
+
+// String returns a string representation of the hash set.
+// The function will panic if it contains an unknown type.
+func (h Set) String() string {
+ a := h.Array()
+ var r []string
+ for _, v := range a {
+ r = append(r, v.String())
+ }
+ return "[" + strings.Join(r, ", ") + "]"
+}
+
+// Equals checks to see if src == dst, but ignores empty strings
+// and returns true if either is empty.
+func Equals(src, dst string) bool {
+ if src == "" || dst == "" {
+ return true
+ }
+ return src == dst
+}
diff --git a/vendor/github.com/ncw/rclone/fs/list/list.go b/vendor/github.com/ncw/rclone/fs/list/list.go
new file mode 100755
index 0000000..4e213ad
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/list/list.go
@@ -0,0 +1,102 @@
+// Package list contains list functions
+package list
+
+import (
+ "sort"
+ "strings"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/ncw/rclone/fs/filter"
+ "github.com/pkg/errors"
+)
+
+// DirSorted reads Object and *Dir into entries for the given Fs.
+//
+// dir is the start directory, "" for root
+//
+// If includeAll is specified all files will be added, otherwise only
+// files and directories passing the filter will be added.
+//
+// Files will be returned in sorted order
+func DirSorted(f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) {
+ // Get unfiltered entries from the fs
+ entries, err = f.List(dir)
+ if err != nil {
+ return nil, err
+ }
+ // This should happen only if exclude files lives in the
+ // starting directory, otherwise ListDirSorted should not be
+ // called.
+ if !includeAll && filter.Active.ListContainsExcludeFile(entries) {
+ fs.Debugf(dir, "Excluded from sync (and deletion)")
+ return nil, nil
+ }
+ return filterAndSortDir(entries, includeAll, dir, filter.Active.IncludeObject, filter.Active.IncludeDirectory(f))
+}
+
+// filter (if required) and check the entries, then sort them
+func filterAndSortDir(entries fs.DirEntries, includeAll bool, dir string,
+ IncludeObject func(o fs.Object) bool,
+ IncludeDirectory func(remote string) (bool, error)) (newEntries fs.DirEntries, err error) {
+ newEntries = entries[:0] // in place filter
+ prefix := ""
+ if dir != "" {
+ prefix = dir + "/"
+ }
+ for _, entry := range entries {
+ ok := true
+ // check includes and types
+ switch x := entry.(type) {
+ case fs.Object:
+ // Make sure we don't delete excluded files if not required
+ if !includeAll && !IncludeObject(x) {
+ ok = false
+ fs.Debugf(x, "Excluded from sync (and deletion)")
+ }
+ case fs.Directory:
+ if !includeAll {
+ include, err := IncludeDirectory(x.Remote())
+ if err != nil {
+ return nil, err
+ }
+ if !include {
+ ok = false
+ fs.Debugf(x, "Excluded from sync (and deletion)")
+ }
+ }
+ default:
+ return nil, errors.Errorf("unknown object type %T", entry)
+ }
+ // check remote name belongs in this directry
+ remote := entry.Remote()
+ switch {
+ case !ok:
+ // ignore
+ case !strings.HasPrefix(remote, prefix):
+ ok = false
+ fs.Errorf(entry, "Entry doesn't belong in directory %q (too short) - ignoring", dir)
+ case remote == prefix:
+ ok = false
+ fs.Errorf(entry, "Entry doesn't belong in directory %q (same as directory) - ignoring", dir)
+ case strings.ContainsRune(remote[len(prefix):], '/'):
+ ok = false
+ fs.Errorf(entry, "Entry doesn't belong in directory %q (contains subdir) - ignoring", dir)
+ default:
+ // ok
+ }
+ if ok {
+ newEntries = append(newEntries, entry)
+ }
+ }
+ entries = newEntries
+
+ // Sort the directory entries by Remote
+ //
+ // We use a stable sort here just in case there are
+ // duplicates. Assuming the remote delivers the entries in a
+ // consistent order, this will give the best user experience
+ // in syncing as it will use the first entry for the sync
+ // comparison.
+ sort.Stable(entries)
+ return entries, nil
+}
diff --git a/vendor/github.com/ncw/rclone/fs/log.go b/vendor/github.com/ncw/rclone/fs/log.go
new file mode 100755
index 0000000..94f61b9
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/log.go
@@ -0,0 +1,135 @@
+package fs
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/pkg/errors"
+)
+
+// LogLevel describes rclone's logs. These are a subset of the syslog log levels.
+type LogLevel byte
+
+// Log levels. These are the syslog levels of which we only use a
+// subset.
+//
+// LOG_EMERG system is unusable
+// LOG_ALERT action must be taken immediately
+// LOG_CRIT critical conditions
+// LOG_ERR error conditions
+// LOG_WARNING warning conditions
+// LOG_NOTICE normal, but significant, condition
+// LOG_INFO informational message
+// LOG_DEBUG debug-level message
+const (
+ LogLevelEmergency LogLevel = iota
+ LogLevelAlert
+ LogLevelCritical
+ LogLevelError // Error - can't be suppressed
+ LogLevelWarning
+ LogLevelNotice // Normal logging, -q suppresses
+ LogLevelInfo // Transfers, needs -v
+ LogLevelDebug // Debug level, needs -vv
+)
+
+var logLevelToString = []string{
+ LogLevelEmergency: "EMERGENCY",
+ LogLevelAlert: "ALERT",
+ LogLevelCritical: "CRITICAL",
+ LogLevelError: "ERROR",
+ LogLevelWarning: "WARNING",
+ LogLevelNotice: "NOTICE",
+ LogLevelInfo: "INFO",
+ LogLevelDebug: "DEBUG",
+}
+
+// String turns a LogLevel into a string
+func (l LogLevel) String() string {
+ if l >= LogLevel(len(logLevelToString)) {
+ return fmt.Sprintf("LogLevel(%d)", l)
+ }
+ return logLevelToString[l]
+}
+
+// Set a LogLevel
+func (l *LogLevel) Set(s string) error {
+ for n, name := range logLevelToString {
+ if s != "" && name == s {
+ *l = LogLevel(n)
+ return nil
+ }
+ }
+ return errors.Errorf("Unknown log level %q", s)
+}
+
+// Type of the value
+func (l *LogLevel) Type() string {
+ return "string"
+}
+
+// LogPrint sends the text to the logger of level
+var LogPrint = func(level LogLevel, text string) {
+ text = fmt.Sprintf("%-6s: %s", level, text)
+ log.Print(text)
+}
+
+// LogPrintf produces a log string from the arguments passed in
+func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
+ out := fmt.Sprintf(text, args...)
+ if o != nil {
+ out = fmt.Sprintf("%v: %s", o, out)
+ }
+ LogPrint(level, out)
+}
+
+// LogLevelPrintf writes logs at the given level
+func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
+ if Config.LogLevel >= level {
+ LogPrintf(level, o, text, args...)
+ }
+}
+
+// Errorf writes error log output for this Object or Fs. It
+// should always be seen by the user.
+func Errorf(o interface{}, text string, args ...interface{}) {
+ if Config.LogLevel >= LogLevelError {
+ LogPrintf(LogLevelError, o, text, args...)
+ }
+}
+
+// Logf writes log output for this Object or Fs. This should be
+// considered to be Info level logging. It is the default level. By
+// default rclone should not log very much so only use this for
+// important things the user should see. The user can filter these
+// out with the -q flag.
+func Logf(o interface{}, text string, args ...interface{}) {
+ if Config.LogLevel >= LogLevelNotice {
+ LogPrintf(LogLevelNotice, o, text, args...)
+ }
+}
+
+// Infof writes info on transfers for this Object or Fs. Use this
+// level for logging transfers, deletions and things which should
+// appear with the -v flag.
+func Infof(o interface{}, text string, args ...interface{}) {
+ if Config.LogLevel >= LogLevelInfo {
+ LogPrintf(LogLevelInfo, o, text, args...)
+ }
+}
+
+// Debugf writes debugging output for this Object or Fs. Use this for
+// debug only. The user must have to specify -vv to see this.
+func Debugf(o interface{}, text string, args ...interface{}) {
+ if Config.LogLevel >= LogLevelDebug {
+ LogPrintf(LogLevelDebug, o, text, args...)
+ }
+}
+
+// LogDirName returns an object for the logger, logging a root
+// directory which would normally be "" as the Fs
+func LogDirName(f Fs, dir string) interface{} {
+ if dir != "" {
+ return dir
+ }
+ return f
+}
diff --git a/vendor/github.com/ncw/rclone/fs/mimetype.go b/vendor/github.com/ncw/rclone/fs/mimetype.go
new file mode 100755
index 0000000..7323adf
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/mimetype.go
@@ -0,0 +1,44 @@
+package fs
+
+import (
+ "mime"
+ "path"
+ "strings"
+)
+
+// MimeTypeFromName returns a guess at the mime type from the name
+func MimeTypeFromName(remote string) (mimeType string) {
+ mimeType = mime.TypeByExtension(path.Ext(remote))
+ if !strings.ContainsRune(mimeType, '/') {
+ mimeType = "application/octet-stream"
+ }
+ return mimeType
+}
+
+// MimeType returns the MimeType from the object, either by calling
+// the MimeTyper interface or using MimeTypeFromName
+func MimeType(o ObjectInfo) (mimeType string) {
+ // Read the MimeType from the optional interface if available
+ if do, ok := o.(MimeTyper); ok {
+ mimeType = do.MimeType()
+ // Debugf(o, "Read MimeType as %q", mimeType)
+ if mimeType != "" {
+ return mimeType
+ }
+ }
+ return MimeTypeFromName(o.Remote())
+}
+
+// MimeTypeDirEntry returns the MimeType of a DirEntry
+//
+// It returns "inode/directory" for directories, or uses
+// MimeType(Object)
+func MimeTypeDirEntry(item DirEntry) string {
+ switch x := item.(type) {
+ case Object:
+ return MimeType(x)
+ case Directory:
+ return "inode/directory"
+ }
+ return ""
+}
diff --git a/vendor/github.com/ncw/rclone/fs/object/object.go b/vendor/github.com/ncw/rclone/fs/object/object.go
new file mode 100755
index 0000000..e11827b
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/object/object.go
@@ -0,0 +1,239 @@
+// Package object defines some useful Objects
+package object
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "time"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/ncw/rclone/fs/hash"
+)
+
+// NewStaticObjectInfo returns a static ObjectInfo
+// If hashes is nil and fs is not nil, the hash map will be replaced with
+// empty hashes of the types supported by the fs.
+func NewStaticObjectInfo(remote string, modTime time.Time, size int64, storable bool, hashes map[hash.Type]string, fs fs.Info) fs.ObjectInfo {
+ info := &staticObjectInfo{
+ remote: remote,
+ modTime: modTime,
+ size: size,
+ storable: storable,
+ hashes: hashes,
+ fs: fs,
+ }
+ if fs != nil && hashes == nil {
+ set := fs.Hashes().Array()
+ info.hashes = make(map[hash.Type]string)
+ for _, ht := range set {
+ info.hashes[ht] = ""
+ }
+ }
+ return info
+}
+
+type staticObjectInfo struct {
+ remote string
+ modTime time.Time
+ size int64
+ storable bool
+ hashes map[hash.Type]string
+ fs fs.Info
+}
+
+func (i *staticObjectInfo) Fs() fs.Info { return i.fs }
+func (i *staticObjectInfo) Remote() string { return i.remote }
+func (i *staticObjectInfo) String() string { return i.remote }
+func (i *staticObjectInfo) ModTime() time.Time { return i.modTime }
+func (i *staticObjectInfo) Size() int64 { return i.size }
+func (i *staticObjectInfo) Storable() bool { return i.storable }
+func (i *staticObjectInfo) Hash(h hash.Type) (string, error) {
+ if len(i.hashes) == 0 {
+ return "", hash.ErrUnsupported
+ }
+ if hash, ok := i.hashes[h]; ok {
+ return hash, nil
+ }
+ return "", hash.ErrUnsupported
+}
+
+// MemoryFs is an in memory Fs, it only supports FsInfo and Put
+var MemoryFs memoryFs
+
+// memoryFs is an in memory fs
+type memoryFs struct{}
+
+// Name of the remote (as passed into NewFs)
+func (memoryFs) Name() string { return "memory" }
+
+// Root of the remote (as passed into NewFs)
+func (memoryFs) Root() string { return "" }
+
+// String returns a description of the FS
+func (memoryFs) String() string { return "memory" }
+
+// Precision of the ModTimes in this Fs
+func (memoryFs) Precision() time.Duration { return time.Nanosecond }
+
+// Returns the supported hash types of the filesystem
+func (memoryFs) Hashes() hash.Set { return hash.Supported }
+
+// Features returns the optional features of this Fs
+func (memoryFs) Features() *fs.Features { return &fs.Features{} }
+
+// List the objects and directories in dir into entries. The
+// entries can be returned in any order but should be for a
+// complete directory.
+//
+// dir should be "" to list the root, and should not have
+// trailing slashes.
+//
+// This should return ErrDirNotFound if the directory isn't
+// found.
+func (memoryFs) List(dir string) (entries fs.DirEntries, err error) {
+ return nil, nil
+}
+
+// NewObject finds the Object at remote. If it can't be found
+// it returns the error ErrorObjectNotFound.
+func (memoryFs) NewObject(remote string) (fs.Object, error) {
+ return nil, fs.ErrorObjectNotFound
+}
+
+// Put in to the remote path with the modTime given of the given size
+//
+// May create the object even if it returns an error - if so
+// will return the object and the error, otherwise will return
+// nil and the error
+func (memoryFs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
+ o := NewMemoryObject(src.Remote(), src.ModTime(), nil)
+ return o, o.Update(in, src, options...)
+}
+
+// Mkdir makes the directory (container, bucket)
+//
+// Shouldn't return an error if it already exists
+func (memoryFs) Mkdir(dir string) error {
+ return errors.New("memoryFs: can't make directory")
+}
+
+// Rmdir removes the directory (container, bucket) if empty
+//
+// Return an error if it doesn't exist or isn't empty
+func (memoryFs) Rmdir(dir string) error {
+ return fs.ErrorDirNotFound
+}
+
+var _ fs.Fs = MemoryFs
+
+// MemoryObject is an in memory object
+type MemoryObject struct {
+ remote string
+ modTime time.Time
+ content []byte
+}
+
+// NewMemoryObject returns an in memory Object with the modTime and content passed in
+func NewMemoryObject(remote string, modTime time.Time, content []byte) *MemoryObject {
+ return &MemoryObject{
+ remote: remote,
+ modTime: modTime,
+ content: content,
+ }
+}
+
+// Content returns the underlying buffer
+func (o *MemoryObject) Content() []byte {
+ return o.content
+}
+
+// Fs returns read only access to the Fs that this object is part of
+func (o *MemoryObject) Fs() fs.Info {
+ return MemoryFs
+}
+
+// Remote returns the remote path
+func (o *MemoryObject) Remote() string {
+ return o.remote
+}
+
+// String returns a description of the Object
+func (o *MemoryObject) String() string {
+ return o.remote
+}
+
+// ModTime returns the modification date of the file
+func (o *MemoryObject) ModTime() time.Time {
+ return o.modTime
+}
+
+// Size returns the size of the file
+func (o *MemoryObject) Size() int64 {
+ return int64(len(o.content))
+}
+
+// Storable says whether this object can be stored
+func (o *MemoryObject) Storable() bool {
+ return true
+}
+
+// Hash returns the requested hash of the contents
+func (o *MemoryObject) Hash(h hash.Type) (string, error) {
+ hash, err := hash.NewMultiHasherTypes(hash.Set(h))
+ if err != nil {
+ return "", err
+ }
+ _, err = hash.Write(o.content)
+ if err != nil {
+ return "", err
+ }
+ return hash.Sums()[h], nil
+}
+
+// SetModTime sets the metadata on the object to set the modification date
+func (o *MemoryObject) SetModTime(modTime time.Time) error {
+ o.modTime = modTime
+ return nil
+}
+
+// Open opens the file for read. Call Close() on the returned io.ReadCloser
+func (o *MemoryObject) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
+ content := o.content
+ for _, option := range options {
+ switch x := option.(type) {
+ case *fs.RangeOption:
+ content = o.content[x.Start:x.End]
+ case *fs.SeekOption:
+ content = o.content[x.Offset:]
+ default:
+ if option.Mandatory() {
+ fs.Logf(o, "Unsupported mandatory option: %v", option)
+ }
+ }
+ }
+ return ioutil.NopCloser(bytes.NewBuffer(content)), nil
+}
+
+// Update in to the object with the modTime given of the given size
+//
+// This re-uses the internal buffer if at all possible.
+func (o *MemoryObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
+ size := src.Size()
+ if size == 0 {
+ o.content = nil
+ } else if size < 0 || int64(cap(o.content)) < size {
+ o.content, err = ioutil.ReadAll(in)
+ } else {
+ o.content = o.content[:size]
+ _, err = io.ReadFull(in, o.content)
+ }
+ o.modTime = src.ModTime()
+ return err
+}
+
+// Remove this object
+func (o *MemoryObject) Remove() error {
+ return errors.New("memoryObject.Remove not supported")
+}
diff --git a/vendor/github.com/ncw/rclone/fs/options.go b/vendor/github.com/ncw/rclone/fs/options.go
new file mode 100755
index 0000000..c277b11
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/options.go
@@ -0,0 +1,262 @@
+// Define the options for Open
+
+package fs
+
+import (
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "github.com/ncw/rclone/fs/hash"
+ "github.com/pkg/errors"
+)
+
+// OpenOption is an interface describing options for Open
+type OpenOption interface {
+ fmt.Stringer
+
+ // Header returns the option as an HTTP header
+ Header() (key string, value string)
+
+ // Mandatory returns whether this option can be ignored or not
+ Mandatory() bool
+}
+
+// RangeOption defines an HTTP Range option with start and end. If
+// either start or end are < 0 then they will be omitted.
+//
+// End may be bigger than the Size of the object in which case it will
+// be capped to the size of the object.
+//
+// Note that the End is inclusive, so to fetch 100 bytes you would use
+// RangeOption{Start: 0, End: 99}
+//
+// If Start is specified but End is not then it will fetch from Start
+// to the end of the file.
+//
+// If End is specified, but Start is not then it will fetch the last
+// End bytes.
+//
+// Examples:
+//
+// RangeOption{Start: 0, End: 99} - fetch the first 100 bytes
+// RangeOption{Start: 100, End: 199} - fetch the second 100 bytes
+// RangeOption{Start: 100} - fetch bytes from offset 100 to the end
+// RangeOption{End: 100} - fetch the last 100 bytes
+//
+// A RangeOption implements a single byte-range-spec from
+// https://tools.ietf.org/html/rfc7233#section-2.1
+type RangeOption struct {
+ Start int64
+ End int64
+}
+
+// Header formats the option as an http header
+func (o *RangeOption) Header() (key string, value string) {
+ key = "Range"
+ value = "bytes="
+ if o.Start >= 0 {
+ value += strconv.FormatInt(o.Start, 10)
+
+ }
+ value += "-"
+ if o.End >= 0 {
+ value += strconv.FormatInt(o.End, 10)
+ }
+ return key, value
+}
+
+// ParseRangeOption parses a RangeOption from a Range: header.
+// It only appects single ranges.
+func ParseRangeOption(s string) (po *RangeOption, err error) {
+ const preamble = "bytes="
+ if !strings.HasPrefix(s, preamble) {
+ return nil, errors.New("Range: header invalid: doesn't start with " + preamble)
+ }
+ s = s[len(preamble):]
+ if strings.IndexRune(s, ',') >= 0 {
+ return nil, errors.New("Range: header invalid: contains multiple ranges which isn't supported")
+ }
+ dash := strings.IndexRune(s, '-')
+ if dash < 0 {
+ return nil, errors.New("Range: header invalid: contains no '-'")
+ }
+ start, end := strings.TrimSpace(s[:dash]), strings.TrimSpace(s[dash+1:])
+ o := RangeOption{Start: -1, End: -1}
+ if start != "" {
+ o.Start, err = strconv.ParseInt(start, 10, 64)
+ if err != nil || o.Start < 0 {
+ return nil, errors.New("Range: header invalid: bad start")
+ }
+ }
+ if end != "" {
+ o.End, err = strconv.ParseInt(end, 10, 64)
+ if err != nil || o.End < 0 {
+ return nil, errors.New("Range: header invalid: bad end")
+ }
+ }
+ return &o, nil
+}
+
+// String formats the option into human readable form
+func (o *RangeOption) String() string {
+ return fmt.Sprintf("RangeOption(%d,%d)", o.Start, o.End)
+}
+
+// Mandatory returns whether the option must be parsed or can be ignored
+func (o *RangeOption) Mandatory() bool {
+ return true
+}
+
+// Decode interprets the RangeOption into an offset and a limit
+//
+// The offset is the start of the stream and the limit is how many
+// bytes should be read from it. If the limit is -1 then the stream
+// should be read to the end.
+func (o *RangeOption) Decode(size int64) (offset, limit int64) {
+ if o.Start >= 0 {
+ offset = o.Start
+ if o.End >= 0 {
+ limit = o.End - o.Start + 1
+ } else {
+ limit = -1
+ }
+ } else {
+ if o.End >= 0 {
+ offset = size - o.End
+ } else {
+ offset = 0
+ }
+ limit = -1
+ }
+ return offset, limit
+}
+
+// FixRangeOption looks through the slice of options and adjusts any
+// RangeOption~s found that request a fetch from the end into an
+// absolute fetch using the size passed in and makes sure the range does
+// not exceed filesize. Some remotes (eg Onedrive, Box) don't support
+// range requests which index from the end.
+func FixRangeOption(options []OpenOption, size int64) {
+ for i := range options {
+ option := options[i]
+ if x, ok := option.(*RangeOption); ok {
+ // If start is < 0 then fetch from the end
+ if x.Start < 0 {
+ x = &RangeOption{Start: size - x.End, End: -1}
+ options[i] = x
+ }
+ if x.End > size {
+ x = &RangeOption{Start: x.Start, End: size}
+ options[i] = x
+ }
+ }
+ }
+}
+
+// SeekOption defines an HTTP Range option with start only.
+type SeekOption struct {
+ Offset int64
+}
+
+// Header formats the option as an http header
+func (o *SeekOption) Header() (key string, value string) {
+ key = "Range"
+ value = fmt.Sprintf("bytes=%d-", o.Offset)
+ return key, value
+}
+
+// String formats the option into human readable form
+func (o *SeekOption) String() string {
+ return fmt.Sprintf("SeekOption(%d)", o.Offset)
+}
+
+// Mandatory returns whether the option must be parsed or can be ignored
+func (o *SeekOption) Mandatory() bool {
+ return true
+}
+
+// HTTPOption defines a general purpose HTTP option
+type HTTPOption struct {
+ Key string
+ Value string
+}
+
+// Header formats the option as an http header
+func (o *HTTPOption) Header() (key string, value string) {
+ return o.Key, o.Value
+}
+
+// String formats the option into human readable form
+func (o *HTTPOption) String() string {
+ return fmt.Sprintf("HTTPOption(%q,%q)", o.Key, o.Value)
+}
+
+// Mandatory returns whether the option must be parsed or can be ignored
+func (o *HTTPOption) Mandatory() bool {
+ return false
+}
+
+// HashesOption defines an option used to tell the local fs to limit
+// the number of hashes it calculates.
+type HashesOption struct {
+ Hashes hash.Set
+}
+
+// Header formats the option as an http header
+func (o *HashesOption) Header() (key string, value string) {
+ return "", ""
+}
+
+// String formats the option into human readable form
+func (o *HashesOption) String() string {
+ return fmt.Sprintf("HashesOption(%v)", o.Hashes)
+}
+
+// Mandatory returns whether the option must be parsed or can be ignored
+func (o *HashesOption) Mandatory() bool {
+ return false
+}
+
+// OpenOptionAddHeaders adds each header found in options to the
+// headers map provided the key was non empty.
+func OpenOptionAddHeaders(options []OpenOption, headers map[string]string) {
+ for _, option := range options {
+ key, value := option.Header()
+ if key != "" && value != "" {
+ headers[key] = value
+ }
+ }
+}
+
+// OpenOptionHeaders adds each header found in options to the
+// headers map provided the key was non empty.
+//
+// It returns a nil map if options was empty
+func OpenOptionHeaders(options []OpenOption) (headers map[string]string) {
+ if len(options) == 0 {
+ return nil
+ }
+ headers = make(map[string]string, len(options))
+ OpenOptionAddHeaders(options, headers)
+ return headers
+}
+
+// OpenOptionAddHTTPHeaders Sets each header found in options to the
+// http.Header map provided the key was non empty.
+func OpenOptionAddHTTPHeaders(headers http.Header, options []OpenOption) {
+ for _, option := range options {
+ key, value := option.Header()
+ if key != "" && value != "" {
+ headers.Set(key, value)
+ }
+ }
+}
+
+// check interface
+var (
+ _ OpenOption = (*RangeOption)(nil)
+ _ OpenOption = (*SeekOption)(nil)
+ _ OpenOption = (*HTTPOption)(nil)
+)
diff --git a/vendor/github.com/ncw/rclone/fs/parseduration.go b/vendor/github.com/ncw/rclone/fs/parseduration.go
new file mode 100755
index 0000000..4da162c
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/parseduration.go
@@ -0,0 +1,103 @@
+package fs
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Duration is a time.Duration with some more parsing options
+type Duration time.Duration
+
+// DurationOff is the default value for flags which can be turned off
+const DurationOff = Duration((1 << 63) - 1)
+
+// Turn Duration into a string
+func (d Duration) String() string {
+ if d == DurationOff {
+ return "off"
+ }
+ for i := len(ageSuffixes) - 2; i >= 0; i-- {
+ ageSuffix := &ageSuffixes[i]
+ if math.Abs(float64(d)) >= float64(ageSuffix.Multiplier) {
+ timeUnits := float64(d) / float64(ageSuffix.Multiplier)
+ return strconv.FormatFloat(timeUnits, 'f', -1, 64) + ageSuffix.Suffix
+ }
+ }
+ return time.Duration(d).String()
+}
+
+// IsSet returns if the duration is != DurationOff
+func (d Duration) IsSet() bool {
+ return d != DurationOff
+}
+
+// We use time conventions
+var ageSuffixes = []struct {
+ Suffix string
+ Multiplier time.Duration
+}{
+ {Suffix: "d", Multiplier: time.Hour * 24},
+ {Suffix: "w", Multiplier: time.Hour * 24 * 7},
+ {Suffix: "M", Multiplier: time.Hour * 24 * 30},
+ {Suffix: "y", Multiplier: time.Hour * 24 * 365},
+
+ // Default to second
+ {Suffix: "", Multiplier: time.Second},
+}
+
+// ParseDuration parses a duration string. Accept ms|s|m|h|d|w|M|y suffixes. Defaults to second if not provided
+func ParseDuration(age string) (time.Duration, error) {
+ var period float64
+
+ if age == "off" {
+ return time.Duration(DurationOff), nil
+ }
+
+ // Attempt to parse as a time.Duration first
+ d, err := time.ParseDuration(age)
+ if err == nil {
+ return d, nil
+ }
+
+ for _, ageSuffix := range ageSuffixes {
+ if strings.HasSuffix(age, ageSuffix.Suffix) {
+ numberString := age[:len(age)-len(ageSuffix.Suffix)]
+ var err error
+ period, err = strconv.ParseFloat(numberString, 64)
+ if err != nil {
+ return time.Duration(0), err
+ }
+ period *= float64(ageSuffix.Multiplier)
+ break
+ }
+ }
+
+ return time.Duration(period), nil
+}
+
+// Set a Duration
+func (d *Duration) Set(s string) error {
+ duration, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = Duration(duration)
+ return nil
+}
+
+// Type of the value
+func (d Duration) Type() string {
+ return "duration"
+}
+
+// Scan implements the fmt.Scanner interface
+func (d *Duration) Scan(s fmt.ScanState, ch rune) error {
+ token, err := s.Token(true, nil)
+ if err != nil {
+ return err
+ }
+ return d.Set(string(token))
+}
diff --git a/vendor/github.com/ncw/rclone/fs/rc/internal.go b/vendor/github.com/ncw/rclone/fs/rc/internal.go
new file mode 100755
index 0000000..33228d2
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/rc/internal.go
@@ -0,0 +1,131 @@
+// Define the internal rc functions
+
+package rc
+
+import (
+ "os"
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func init() {
+ Add(Call{
+ Path: "rc/noop",
+ Fn: rcNoop,
+ Title: "Echo the input to the output parameters",
+ Help: `
+This echoes the input parameters to the output parameters for testing
+purposes. It can be used to check that rclone is still alive and to
+check that parameter passing is working properly.`,
+ })
+ Add(Call{
+ Path: "rc/error",
+ Fn: rcError,
+ Title: "This returns an error",
+ Help: `
+This returns an error with the input as part of its error string.
+Useful for testing error handling.`,
+ })
+ Add(Call{
+ Path: "rc/list",
+ Fn: rcList,
+ Title: "List all the registered remote control commands",
+ Help: `
+This lists all the registered remote control commands as a JSON map in
+the commands response.`,
+ })
+ Add(Call{
+ Path: "core/pid",
+ Fn: rcPid,
+ Title: "Return PID of current process",
+ Help: `
+This returns PID of current process.
+Useful for stopping rclone process.`,
+ })
+ Add(Call{
+ Path: "core/memstats",
+ Fn: rcMemStats,
+ Title: "Returns the memory statistics",
+ Help: `
+This returns the memory statistics of the running program. What the values mean
+are explained in the go docs: https://golang.org/pkg/runtime/#MemStats
+
+The most interesting values for most people are:
+
+* HeapAlloc: This is the amount of memory rclone is actually using
+* HeapSys: This is the amount of memory rclone has obtained from the OS
+* Sys: this is the total amount of memory requested from the OS
+ * It is virtual memory so may include unused memory
+`,
+ })
+ Add(Call{
+ Path: "core/gc",
+ Fn: rcGc,
+ Title: "Runs a garbage collection.",
+ Help: `
+This tells the go runtime to do a garbage collection run. It isn't
+necessary to call this normally, but it can be useful for debugging
+memory problems.
+`,
+ })
+}
+
+// Echo the input to the ouput parameters
+func rcNoop(in Params) (out Params, err error) {
+ return in, nil
+}
+
+// Return an error regardless
+func rcError(in Params) (out Params, err error) {
+ return nil, errors.Errorf("arbitrary error on input %+v", in)
+}
+
+// List the registered commands
+func rcList(in Params) (out Params, err error) {
+ out = make(Params)
+ out["commands"] = registry.list()
+ return out, nil
+}
+
+// Return PID of current process
+func rcPid(in Params) (out Params, err error) {
+ out = make(Params)
+ out["pid"] = os.Getpid()
+ return out, nil
+}
+
+// Return the memory statistics
+func rcMemStats(in Params) (out Params, err error) {
+ out = make(Params)
+ var m runtime.MemStats
+ runtime.ReadMemStats(&m)
+ out["Alloc"] = m.Alloc
+ out["TotalAlloc"] = m.TotalAlloc
+ out["Sys"] = m.Sys
+ out["Mallocs"] = m.Mallocs
+ out["Frees"] = m.Frees
+ out["HeapAlloc"] = m.HeapAlloc
+ out["HeapSys"] = m.HeapSys
+ out["HeapIdle"] = m.HeapIdle
+ out["HeapInuse"] = m.HeapInuse
+ out["HeapReleased"] = m.HeapReleased
+ out["HeapObjects"] = m.HeapObjects
+ out["StackInuse"] = m.StackInuse
+ out["StackSys"] = m.StackSys
+ out["MSpanInuse"] = m.MSpanInuse
+ out["MSpanSys"] = m.MSpanSys
+ out["MCacheInuse"] = m.MCacheInuse
+ out["MCacheSys"] = m.MCacheSys
+ out["BuckHashSys"] = m.BuckHashSys
+ out["GCSys"] = m.GCSys
+ out["OtherSys"] = m.OtherSys
+ return out, nil
+}
+
+// Do a garbage collection run
+func rcGc(in Params) (out Params, err error) {
+ out = make(Params)
+ runtime.GC()
+ return out, nil
+}
diff --git a/vendor/github.com/ncw/rclone/fs/rc/rc.go b/vendor/github.com/ncw/rclone/fs/rc/rc.go
new file mode 100755
index 0000000..32e0633
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/rc/rc.go
@@ -0,0 +1,146 @@
+// Package rc implements a remote control server and registry for rclone
+//
+// To register your internal calls, call rc.Add(path, function). Your
+// function should take ane return a Param. It can also return an
+// error. Use rc.NewError to wrap an existing error along with an
+// http response type if another response other than 500 internal
+// error is required on error.
+package rc
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ _ "net/http/pprof" // install the pprof http handlers
+
+ "strings"
+
+ "github.com/ncw/rclone/cmd/serve/httplib"
+ "github.com/ncw/rclone/fs"
+ "github.com/pkg/errors"
+)
+
+// Options contains options for the remote control server
+type Options struct {
+ HTTPOptions httplib.Options
+ Enabled bool
+}
+
+// DefaultOpt is the default values used for Options
+var DefaultOpt = Options{
+ HTTPOptions: httplib.DefaultOpt,
+ Enabled: false,
+}
+
+func init() {
+ DefaultOpt.HTTPOptions.ListenAddr = "localhost:5572"
+}
+
+// Start the remote control server if configured
+func Start(opt *Options) {
+ if opt.Enabled {
+ s := newServer(opt)
+ go s.serve()
+ }
+}
+
+// server contains everything to run the server
+type server struct {
+ srv *httplib.Server
+}
+
+func newServer(opt *Options) *server {
+ // Serve on the DefaultServeMux so can have global registrations appear
+ mux := http.DefaultServeMux
+ s := &server{
+ srv: httplib.NewServer(mux, &opt.HTTPOptions),
+ }
+ mux.HandleFunc("/", s.handler)
+ return s
+}
+
+// serve runs the http server - doesn't return
+func (s *server) serve() {
+ err := s.srv.Serve()
+ if err != nil {
+ fs.Errorf(nil, "Opening listener: %v", err)
+ }
+ fs.Logf(nil, "Serving remote control on %s", s.srv.URL())
+ s.srv.Wait()
+}
+
+// WriteJSON writes JSON in out to w
+func WriteJSON(w io.Writer, out Params) error {
+ enc := json.NewEncoder(w)
+ enc.SetIndent("", "\t")
+ return enc.Encode(out)
+}
+
+// handler reads incoming requests and dispatches them
+func (s *server) handler(w http.ResponseWriter, r *http.Request) {
+ path := strings.Trim(r.URL.Path, "/")
+ in := make(Params)
+
+ writeError := func(err error, status int) {
+ fs.Errorf(nil, "rc: %q: error: %v", path, err)
+ w.WriteHeader(status)
+ err = WriteJSON(w, Params{
+ "error": err.Error(),
+ "input": in,
+ })
+ if err != nil {
+ // can't return the error at this point
+ fs.Errorf(nil, "rc: failed to write JSON output: %v", err)
+ }
+ }
+
+ if r.Method != "POST" {
+ writeError(errors.Errorf("method %q not allowed - POST required", r.Method), http.StatusMethodNotAllowed)
+ return
+ }
+
+ // Find the call
+ call := registry.get(path)
+ if call == nil {
+ writeError(errors.Errorf("couldn't find method %q", path), http.StatusMethodNotAllowed)
+ return
+ }
+
+ // Parse the POST and URL parameters into r.Form
+ err := r.ParseForm()
+ if err != nil {
+ writeError(errors.Wrap(err, "failed to parse form/URL parameters"), http.StatusBadRequest)
+ return
+ }
+
+ // Read the POST and URL parameters into in
+ for k, vs := range r.Form {
+ if len(vs) > 0 {
+ in[k] = vs[len(vs)-1]
+ }
+ }
+ fs.Debugf(nil, "form = %+v", r.Form)
+
+ // Parse a JSON blob from the input
+ if r.Header.Get("Content-Type") == "application/json" {
+ err := json.NewDecoder(r.Body).Decode(&in)
+ if err != nil {
+ writeError(errors.Wrap(err, "failed to read input JSON"), http.StatusBadRequest)
+ return
+ }
+ }
+
+ fs.Debugf(nil, "rc: %q: with parameters %+v", path, in)
+ out, err := call.Fn(in)
+ if err != nil {
+ writeError(errors.Wrap(err, "remote control command failed"), http.StatusInternalServerError)
+ return
+ }
+
+ fs.Debugf(nil, "rc: %q: reply %+v: %v", path, out, err)
+ err = WriteJSON(w, out)
+ if err != nil {
+ // can't return the error at this point
+ fs.Errorf(nil, "rc: failed to write JSON output: %v", err)
+ }
+}
diff --git a/vendor/github.com/ncw/rclone/fs/rc/registry.go b/vendor/github.com/ncw/rclone/fs/rc/registry.go
new file mode 100755
index 0000000..54a4dc9
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/rc/registry.go
@@ -0,0 +1,79 @@
+// Define the registry
+
+package rc
+
+import (
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/ncw/rclone/fs"
+)
+
+// Params is the input and output type for the Func
+type Params map[string]interface{}
+
+// Func defines a type for a remote control function
+type Func func(in Params) (out Params, err error)
+
+// Call defines info about a remote control function and is used in
+// the Add function to create new entry points.
+type Call struct {
+ Path string // path to activate this RC
+ Fn Func `json:"-"` // function to call
+ Title string // help for the function
+ Help string // multi-line markdown formatted help
+}
+
+// Registry holds the list of all the registered remote control functions
+type Registry struct {
+ mu sync.RWMutex
+ call map[string]*Call
+}
+
+// NewRegistry makes a new registry for remote control functions
+func NewRegistry() *Registry {
+ return &Registry{
+ call: make(map[string]*Call),
+ }
+}
+
+// Add a call to the registry
+func (r *Registry) add(call Call) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ call.Path = strings.Trim(call.Path, "/")
+ call.Help = strings.TrimSpace(call.Help)
+ fs.Debugf(nil, "Adding path %q to remote control registry", call.Path)
+ r.call[call.Path] = &call
+}
+
+// get a Call from a path or nil
+func (r *Registry) get(path string) *Call {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ return r.call[path]
+}
+
+// get a list of all calls in alphabetical order
+func (r *Registry) list() (out []*Call) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ var keys []string
+ for key := range r.call {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ for _, key := range keys {
+ out = append(out, r.call[key])
+ }
+ return out
+}
+
+// The global registry
+var registry = NewRegistry()
+
+// Add a function to the global registry
+func Add(call Call) {
+ registry.add(call)
+}
diff --git a/vendor/github.com/ncw/rclone/fs/sizesuffix.go b/vendor/github.com/ncw/rclone/fs/sizesuffix.go
new file mode 100755
index 0000000..e3fe3fd
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/sizesuffix.go
@@ -0,0 +1,132 @@
+package fs
+
+// SizeSuffix is parsed by flag with k/M/G suffixes
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// SizeSuffix is an int64 with a friendly way of printing setting
+type SizeSuffix int64
+
+// Common multipliers for SizeSuffix
+const (
+ Byte SizeSuffix = 1 << (iota * 10)
+ KibiByte
+ MebiByte
+ GibiByte
+ TebiByte
+ PebiByte
+ ExbiByte
+)
+
+// Turn SizeSuffix into a string and a suffix
+func (x SizeSuffix) string() (string, string) {
+ scaled := float64(0)
+ suffix := ""
+ switch {
+ case x < 0:
+ return "off", ""
+ case x == 0:
+ return "0", ""
+ case x < 1<<10:
+ scaled = float64(x)
+ suffix = ""
+ case x < 1<<20:
+ scaled = float64(x) / (1 << 10)
+ suffix = "k"
+ case x < 1<<30:
+ scaled = float64(x) / (1 << 20)
+ suffix = "M"
+ case x < 1<<40:
+ scaled = float64(x) / (1 << 30)
+ suffix = "G"
+ case x < 1<<50:
+ scaled = float64(x) / (1 << 40)
+ suffix = "T"
+ default:
+ scaled = float64(x) / (1 << 50)
+ suffix = "P"
+ }
+ if math.Floor(scaled) == scaled {
+ return fmt.Sprintf("%.0f", scaled), suffix
+ }
+ return fmt.Sprintf("%.3f", scaled), suffix
+}
+
+// String turns SizeSuffix into a string
+func (x SizeSuffix) String() string {
+ val, suffix := x.string()
+ return val + suffix
+}
+
+// Unit turns SizeSuffix into a string with a unit
+func (x SizeSuffix) Unit(unit string) string {
+ val, suffix := x.string()
+ if val == "off" {
+ return val
+ }
+ return val + " " + suffix + unit
+}
+
+// Set a SizeSuffix
+func (x *SizeSuffix) Set(s string) error {
+ if len(s) == 0 {
+ return errors.New("empty string")
+ }
+ if strings.ToLower(s) == "off" {
+ *x = -1
+ return nil
+ }
+ suffix := s[len(s)-1]
+ suffixLen := 1
+ var multiplier float64
+ switch suffix {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
+ suffixLen = 0
+ multiplier = 1 << 10
+ case 'b', 'B':
+ multiplier = 1
+ case 'k', 'K':
+ multiplier = 1 << 10
+ case 'm', 'M':
+ multiplier = 1 << 20
+ case 'g', 'G':
+ multiplier = 1 << 30
+ case 't', 'T':
+ multiplier = 1 << 40
+ case 'p', 'P':
+ multiplier = 1 << 50
+ default:
+ return errors.Errorf("bad suffix %q", suffix)
+ }
+ s = s[:len(s)-suffixLen]
+ value, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return err
+ }
+ if value < 0 {
+ return errors.Errorf("size can't be negative %q", s)
+ }
+ value *= multiplier
+ *x = SizeSuffix(value)
+ return nil
+}
+
+// Type of the value
+func (x *SizeSuffix) Type() string {
+ return "int64"
+}
+
+// Scan implements the fmt.Scanner interface
+func (x *SizeSuffix) Scan(s fmt.ScanState, ch rune) error {
+ token, err := s.Token(true, nil)
+ if err != nil {
+ return err
+ }
+ return x.Set(string(token))
+}
diff --git a/vendor/github.com/ncw/rclone/fs/version.go b/vendor/github.com/ncw/rclone/fs/version.go
new file mode 100755
index 0000000..b584919
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/version.go
@@ -0,0 +1,4 @@
+package fs
+
+// Version of rclone
+var Version = "v1.43.1"
diff --git a/vendor/github.com/ncw/rclone/fs/versioncheck.go b/vendor/github.com/ncw/rclone/fs/versioncheck.go
new file mode 100755
index 0000000..00ce2d9
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/versioncheck.go
@@ -0,0 +1,7 @@
+//+build !go1.7
+
+package fs
+
+// Upgrade to Go version 1.7 to compile rclone - latest stable go
+// compiler recommended.
+func init() { Go_version_1_7_required_for_compilation() }
diff --git a/vendor/github.com/ncw/rclone/fs/walk/walk.go b/vendor/github.com/ncw/rclone/fs/walk/walk.go
new file mode 100755
index 0000000..d92a21e
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/fs/walk/walk.go
@@ -0,0 +1,552 @@
+// Package walk walks directories
+package walk
+
+import (
+ "bytes"
+ "fmt"
+ "path"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/ncw/rclone/fs/filter"
+ "github.com/ncw/rclone/fs/list"
+ "github.com/pkg/errors"
+)
+
+// ErrorSkipDir is used as a return value from Walk to indicate that the
+// directory named in the call is to be skipped. It is not returned as
+// an error by any function.
+var ErrorSkipDir = errors.New("skip this directory")
+
+// ErrorCantListR is returned by WalkR if the underlying Fs isn't
+// capable of doing a recursive listing.
+var ErrorCantListR = errors.New("recursive directory listing not available")
+
+// Func is the type of the function called for directory
+// visited by Walk. The path argument contains remote path to the directory.
+//
+// If there was a problem walking to directory named by path, the
+// incoming error will describe the problem and the function can
+// decide how to handle that error (and Walk will not descend into
+// that directory). If an error is returned, processing stops. The
+// sole exception is when the function returns the special value
+// ErrorSkipDir. If the function returns ErrorSkipDir, Walk skips the
+// directory's contents entirely.
+type Func func(path string, entries fs.DirEntries, err error) error
+
+// Walk lists the directory.
+//
+// If includeAll is not set it will use the filters defined.
+//
+// If maxLevel is < 0 then it will recurse indefinitely, else it will
+// only do maxLevel levels.
+//
+// It calls fn for each tranche of DirEntries read.
+//
+// Note that fn will not be called concurrently whereas the directory
+// listing will proceed concurrently.
+//
+// Parent directories are always listed before their children
+//
+// This is implemented by WalkR if Config.UseRecursiveListing is true
+// and f supports it and level > 1, or WalkN otherwise.
+//
+// NB (f, path) to be replaced by fs.Dir at some point
+func Walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
+ if (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && f.Features().ListR != nil {
+ return walkListR(f, path, includeAll, maxLevel, fn)
+ }
+ return walkListDirSorted(f, path, includeAll, maxLevel, fn)
+}
+
+// walkListDirSorted lists the directory.
+//
+// It implements Walk using non recursive directory listing.
+func walkListDirSorted(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
+ return walk(f, path, includeAll, maxLevel, fn, list.DirSorted)
+}
+
+// walkListR lists the directory.
+//
+// It implements Walk using recursive directory listing if
+// available, or returns ErrorCantListR if not.
+func walkListR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
+ listR := f.Features().ListR
+ if listR == nil {
+ return ErrorCantListR
+ }
+ return walkR(f, path, includeAll, maxLevel, fn, listR)
+}
+
+type listDirFunc func(fs fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error)
+
+func walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listDir listDirFunc) error {
+ var (
+ wg sync.WaitGroup // sync closing of go routines
+ traversing sync.WaitGroup // running directory traversals
+ doClose sync.Once // close the channel once
+ mu sync.Mutex // stop fn being called concurrently
+ )
+ // listJob describe a directory listing that needs to be done
+ type listJob struct {
+ remote string
+ depth int
+ }
+
+ in := make(chan listJob, fs.Config.Checkers)
+ errs := make(chan error, 1)
+ quit := make(chan struct{})
+ closeQuit := func() {
+ doClose.Do(func() {
+ close(quit)
+ go func() {
+ for range in {
+ traversing.Done()
+ }
+ }()
+ })
+ }
+ for i := 0; i < fs.Config.Checkers; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case job, ok := <-in:
+ if !ok {
+ return
+ }
+ entries, err := listDir(f, includeAll, job.remote)
+ var jobs []listJob
+ if err == nil && job.depth != 0 {
+ entries.ForDir(func(dir fs.Directory) {
+ // Recurse for the directory
+ jobs = append(jobs, listJob{
+ remote: dir.Remote(),
+ depth: job.depth - 1,
+ })
+ })
+ }
+ mu.Lock()
+ err = fn(job.remote, entries, err)
+ mu.Unlock()
+ // NB once we have passed entries to fn we mustn't touch it again
+ if err != nil && err != ErrorSkipDir {
+ traversing.Done()
+ fs.CountError(err)
+ fs.Errorf(job.remote, "error listing: %v", err)
+ closeQuit()
+ // Send error to error channel if space
+ select {
+ case errs <- err:
+ default:
+ }
+ continue
+ }
+ if err == nil && len(jobs) > 0 {
+ traversing.Add(len(jobs))
+ go func() {
+ // Now we have traversed this directory, send these
+ // jobs off for traversal in the background
+ for _, newJob := range jobs {
+ in <- newJob
+ }
+ }()
+ }
+ traversing.Done()
+ case <-quit:
+ return
+ }
+ }
+ }()
+ }
+ // Start the process
+ traversing.Add(1)
+ in <- listJob{
+ remote: path,
+ depth: maxLevel - 1,
+ }
+ traversing.Wait()
+ close(in)
+ wg.Wait()
+ close(errs)
+ // return the first error returned or nil
+ return <-errs
+}
+
+// DirTree is a map of directories to entries
+type DirTree map[string]fs.DirEntries
+
+// parentDir finds the parent directory of path
+func parentDir(entryPath string) string {
+ dirPath := path.Dir(entryPath)
+ if dirPath == "." {
+ dirPath = ""
+ }
+ return dirPath
+}
+
+// add an entry to the tree
+func (dt DirTree) add(entry fs.DirEntry) {
+ dirPath := parentDir(entry.Remote())
+ dt[dirPath] = append(dt[dirPath], entry)
+}
+
+// add a directory entry to the tree
+func (dt DirTree) addDir(entry fs.DirEntry) {
+ dt.add(entry)
+ // create the directory itself if it doesn't exist already
+ dirPath := entry.Remote()
+ if _, ok := dt[dirPath]; !ok {
+ dt[dirPath] = nil
+ }
+}
+
+// Find returns the DirEntry for filePath or nil if not found
+func (dt DirTree) Find(filePath string) (parentPath string, entry fs.DirEntry) {
+ parentPath = parentDir(filePath)
+ for _, entry := range dt[parentPath] {
+ if entry.Remote() == filePath {
+ return parentPath, entry
+ }
+ }
+ return parentPath, nil
+}
+
+// check that dirPath has a *Dir in its parent
+func (dt DirTree) checkParent(root, dirPath string) {
+ if dirPath == root {
+ return
+ }
+ parentPath, entry := dt.Find(dirPath)
+ if entry != nil {
+ return
+ }
+ dt[parentPath] = append(dt[parentPath], fs.NewDir(dirPath, time.Now()))
+ dt.checkParent(root, parentPath)
+}
+
+// check every directory in the tree has *Dir in its parent
+func (dt DirTree) checkParents(root string) {
+ for dirPath := range dt {
+ dt.checkParent(root, dirPath)
+ }
+}
+
+// Sort sorts all the Entries
+func (dt DirTree) Sort() {
+ for _, entries := range dt {
+ sort.Stable(entries)
+ }
+}
+
+// Dirs returns the directories in sorted order
+func (dt DirTree) Dirs() (dirNames []string) {
+ for dirPath := range dt {
+ dirNames = append(dirNames, dirPath)
+ }
+ sort.Strings(dirNames)
+ return dirNames
+}
+
+// Prune remove directories from a directory tree. dirNames contains
+// all directories to remove as keys, with true as values. dirNames
+// will be modified in the function.
+func (dt DirTree) Prune(dirNames map[string]bool) error {
+ // We use map[string]bool to avoid recursion (and potential
+ // stack exhaustion).
+
+ // First we need delete directories from their parents.
+ for dName, remove := range dirNames {
+ if !remove {
+ // Currently all values should be
+ // true, therefore this should not
+ // happen. But this makes function
+ // more predictable.
+ fs.Infof(dName, "Directory in the map for prune, but the value is false")
+ continue
+ }
+ if dName == "" {
+ // if dName is root, do nothing (no parent exist)
+ continue
+ }
+ parent := parentDir(dName)
+ // It may happen that dt does not have a dName key,
+ // since directory was excluded based on a filter. In
+ // such case the loop will be skipped.
+ for i, entry := range dt[parent] {
+ switch x := entry.(type) {
+ case fs.Directory:
+ if x.Remote() == dName {
+ // the slice is not sorted yet
+ // to delete item
+ // a) replace it with the last one
+ dt[parent][i] = dt[parent][len(dt[parent])-1]
+ // b) remove last
+ dt[parent] = dt[parent][:len(dt[parent])-1]
+ // we modify a slice within a loop, but we stop
+ // iterating immediately
+ break
+ }
+ case fs.Object:
+ // do nothing
+ default:
+ return errors.Errorf("unknown object type %T", entry)
+
+ }
+ }
+ }
+
+ for len(dirNames) > 0 {
+ // According to golang specs, if new keys were added
+ // during range iteration, they may be skipped.
+ for dName, remove := range dirNames {
+ if !remove {
+ fs.Infof(dName, "Directory in the map for prune, but the value is false")
+ continue
+ }
+ // First, add all subdirectories to dirNames.
+
+ // It may happen that dt[dName] does not exist.
+ // If so, the loop will be skipped.
+ for _, entry := range dt[dName] {
+ switch x := entry.(type) {
+ case fs.Directory:
+ excludeDir := x.Remote()
+ dirNames[excludeDir] = true
+ case fs.Object:
+ // do nothing
+ default:
+ return errors.Errorf("unknown object type %T", entry)
+
+ }
+ }
+ // Then remove current directory from DirTree
+ delete(dt, dName)
+ // and from dirNames
+ delete(dirNames, dName)
+ }
+ }
+ return nil
+}
+
+// String emits a simple representation of the DirTree
+func (dt DirTree) String() string {
+ out := new(bytes.Buffer)
+ for _, dir := range dt.Dirs() {
+ _, _ = fmt.Fprintf(out, "%s/\n", dir)
+ for _, entry := range dt[dir] {
+ flag := ""
+ if _, ok := entry.(fs.Directory); ok {
+ flag = "/"
+ }
+ _, _ = fmt.Fprintf(out, " %s%s\n", path.Base(entry.Remote()), flag)
+ }
+ }
+ return out.String()
+}
+
+func walkRDirTree(f fs.Fs, startPath string, includeAll bool, maxLevel int, listR fs.ListRFn) (DirTree, error) {
+ dirs := make(DirTree)
+ // Entries can come in arbitrary order. We use toPrune to keep
+ // all directories to exclude later.
+ toPrune := make(map[string]bool)
+ includeDirectory := filter.Active.IncludeDirectory(f)
+ var mu sync.Mutex
+ err := listR(startPath, func(entries fs.DirEntries) error {
+ mu.Lock()
+ defer mu.Unlock()
+ for _, entry := range entries {
+ slashes := strings.Count(entry.Remote(), "/")
+ switch x := entry.(type) {
+ case fs.Object:
+ // Make sure we don't delete excluded files if not required
+ if includeAll || filter.Active.IncludeObject(x) {
+ if maxLevel < 0 || slashes <= maxLevel-1 {
+ dirs.add(x)
+ } else {
+ // Make sure we include any parent directories of excluded objects
+ dirPath := x.Remote()
+ for ; slashes > maxLevel-1; slashes-- {
+ dirPath = parentDir(dirPath)
+ }
+ dirs.checkParent(startPath, dirPath)
+ }
+ } else {
+ fs.Debugf(x, "Excluded from sync (and deletion)")
+ }
+ // Check if we need to prune a directory later.
+ if !includeAll && len(filter.Active.Opt.ExcludeFile) > 0 {
+ basename := path.Base(x.Remote())
+ if basename == filter.Active.Opt.ExcludeFile {
+ excludeDir := parentDir(x.Remote())
+ toPrune[excludeDir] = true
+ fs.Debugf(basename, "Excluded from sync (and deletion) based on exclude file")
+ }
+ }
+ case fs.Directory:
+ inc, err := includeDirectory(x.Remote())
+ if err != nil {
+ return err
+ }
+ if includeAll || inc {
+ if maxLevel < 0 || slashes <= maxLevel-1 {
+ if slashes == maxLevel-1 {
+ // Just add the object if at maxLevel
+ dirs.add(x)
+ } else {
+ dirs.addDir(x)
+ }
+ }
+ } else {
+ fs.Debugf(x, "Excluded from sync (and deletion)")
+ }
+ default:
+ return errors.Errorf("unknown object type %T", entry)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ dirs.checkParents(startPath)
+ if len(dirs) == 0 {
+ dirs[startPath] = nil
+ }
+ err = dirs.Prune(toPrune)
+ if err != nil {
+ return nil, err
+ }
+ dirs.Sort()
+ return dirs, nil
+}
+
+// Create a DirTree using List
+func walkNDirTree(f fs.Fs, path string, includeAll bool, maxLevel int, listDir listDirFunc) (DirTree, error) {
+ dirs := make(DirTree)
+ fn := func(dirPath string, entries fs.DirEntries, err error) error {
+ if err == nil {
+ dirs[dirPath] = entries
+ }
+ return err
+ }
+ err := walk(f, path, includeAll, maxLevel, fn, listDir)
+ if err != nil {
+ return nil, err
+ }
+ return dirs, nil
+}
+
+// NewDirTree returns a DirTree filled with the directory listing
+// using the parameters supplied.
+//
+// If includeAll is not set it will use the filters defined.
+//
+// If maxLevel is < 0 then it will recurse indefinitely, else it will
+// only do maxLevel levels.
+//
+// This is implemented by WalkR if Config.UseRecursiveListing is true
+// and f supports it and level > 1, or WalkN otherwise.
+//
+// NB (f, path) to be replaced by fs.Dir at some point
+func NewDirTree(f fs.Fs, path string, includeAll bool, maxLevel int) (DirTree, error) {
+ if ListR := f.Features().ListR; (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && ListR != nil {
+ return walkRDirTree(f, path, includeAll, maxLevel, ListR)
+ }
+ return walkNDirTree(f, path, includeAll, maxLevel, list.DirSorted)
+}
+
+func walkR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listR fs.ListRFn) error {
+ dirs, err := walkRDirTree(f, path, includeAll, maxLevel, listR)
+ if err != nil {
+ return err
+ }
+ skipping := false
+ skipPrefix := ""
+ emptyDir := fs.DirEntries{}
+ for _, dirPath := range dirs.Dirs() {
+ if skipping {
+ // Skip over directories as required
+ if strings.HasPrefix(dirPath, skipPrefix) {
+ continue
+ }
+ skipping = false
+ }
+ entries := dirs[dirPath]
+ if entries == nil {
+ entries = emptyDir
+ }
+ err = fn(dirPath, entries, nil)
+ if err == ErrorSkipDir {
+ skipping = true
+ skipPrefix = dirPath
+ if skipPrefix != "" {
+ skipPrefix += "/"
+ }
+ } else if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetAll runs Walk getting all the results
+func GetAll(f fs.Fs, path string, includeAll bool, maxLevel int) (objs []fs.Object, dirs []fs.Directory, err error) {
+ err = Walk(f, path, includeAll, maxLevel, func(dirPath string, entries fs.DirEntries, err error) error {
+ if err != nil {
+ return err
+ }
+ for _, entry := range entries {
+ switch x := entry.(type) {
+ case fs.Object:
+ objs = append(objs, x)
+ case fs.Directory:
+ dirs = append(dirs, x)
+ }
+ }
+ return nil
+ })
+ return
+}
+
+// ListRHelper is used in the implementation of ListR to accumulate DirEntries
+type ListRHelper struct {
+ callback fs.ListRCallback
+ entries fs.DirEntries
+}
+
+// NewListRHelper should be called from ListR with the callback passed in
+func NewListRHelper(callback fs.ListRCallback) *ListRHelper {
+ return &ListRHelper{
+ callback: callback,
+ }
+}
+
+// send sends the stored entries to the callback if there are >= max
+// entries.
+func (lh *ListRHelper) send(max int) (err error) {
+ if len(lh.entries) >= max {
+ err = lh.callback(lh.entries)
+ lh.entries = lh.entries[:0]
+ }
+ return err
+}
+
+// Add an entry to the stored entries and send them if there are more
+// than a certain amount
+func (lh *ListRHelper) Add(entry fs.DirEntry) error {
+ if entry == nil {
+ return nil
+ }
+ lh.entries = append(lh.entries, entry)
+ return lh.send(100)
+}
+
+// Flush the stored entries (if any) sending them to the callback
+func (lh *ListRHelper) Flush() error {
+ return lh.send(1)
+}
diff --git a/vendor/github.com/ncw/rclone/lib/dircache/dircache.go b/vendor/github.com/ncw/rclone/lib/dircache/dircache.go
new file mode 100755
index 0000000..62564bc
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/lib/dircache/dircache.go
@@ -0,0 +1,313 @@
+// Package dircache provides a simple cache for caching directory to path lookups
+package dircache
+
+// _methods are called without the lock
+
+import (
+ "log"
+ "strings"
+ "sync"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/pkg/errors"
+)
+
+// DirCache caches paths to directory IDs and vice versa
+type DirCache struct {
+ cacheMu sync.RWMutex
+ cache map[string]string
+ invCache map[string]string
+ mu sync.Mutex
+ fs DirCacher // Interface to find and make stuff
+ trueRootID string // ID of the absolute root
+ root string // the path we are working on
+ rootID string // ID of the root directory
+ rootParentID string // ID of the root's parent directory
+ foundRoot bool // Whether we have found the root or not
+}
+
+// DirCacher describes an interface for doing the low level directory work
+type DirCacher interface {
+ FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error)
+ CreateDir(pathID, leaf string) (newID string, err error)
+}
+
+// New makes a DirCache
+//
+// The cache is safe for concurrent use
+func New(root string, trueRootID string, fs DirCacher) *DirCache {
+ d := &DirCache{
+ trueRootID: trueRootID,
+ root: root,
+ fs: fs,
+ }
+ d.Flush()
+ d.ResetRoot()
+ return d
+}
+
+// Get an ID given a path
+func (dc *DirCache) Get(path string) (id string, ok bool) {
+ dc.cacheMu.RLock()
+ id, ok = dc.cache[path]
+ dc.cacheMu.RUnlock()
+ return
+}
+
+// GetInv gets a path given an ID
+func (dc *DirCache) GetInv(id string) (path string, ok bool) {
+ dc.cacheMu.RLock()
+ path, ok = dc.invCache[id]
+ dc.cacheMu.RUnlock()
+ return
+}
+
+// Put a path, id into the map
+func (dc *DirCache) Put(path, id string) {
+ dc.cacheMu.Lock()
+ dc.cache[path] = id
+ dc.invCache[id] = path
+ dc.cacheMu.Unlock()
+}
+
+// Flush the map of all data
+func (dc *DirCache) Flush() {
+ dc.cacheMu.Lock()
+ dc.cache = make(map[string]string)
+ dc.invCache = make(map[string]string)
+ dc.cacheMu.Unlock()
+}
+
+// FlushDir flushes the map of all data starting with dir
+//
+// If dir is empty then this is equivalent to calling ResetRoot
+func (dc *DirCache) FlushDir(dir string) {
+ if dir == "" {
+ dc.ResetRoot()
+ return
+ }
+ dc.cacheMu.Lock()
+
+ // Delete the root dir
+ ID, ok := dc.cache[dir]
+ if ok {
+ delete(dc.cache, dir)
+ delete(dc.invCache, ID)
+ }
+
+ // And any sub directories
+ dir += "/"
+ for key, ID := range dc.cache {
+ if strings.HasPrefix(key, dir) {
+ delete(dc.cache, key)
+ delete(dc.invCache, ID)
+ }
+ }
+
+ dc.cacheMu.Unlock()
+}
+
+// SplitPath splits a path into directory, leaf
+//
+// Path shouldn't start or end with a /
+//
+// If there are no slashes then directory will be "" and leaf = path
+func SplitPath(path string) (directory, leaf string) {
+ lastSlash := strings.LastIndex(path, "/")
+ if lastSlash >= 0 {
+ directory = path[:lastSlash]
+ leaf = path[lastSlash+1:]
+ } else {
+ directory = ""
+ leaf = path
+ }
+ return
+}
+
+// FindDir finds the directory passed in returning the directory ID
+// starting from pathID
+//
+// Path shouldn't start or end with a /
+//
+// If create is set it will make the directory if not found
+//
+// Algorithm:
+// Look in the cache for the path, if found return the pathID
+// If not found strip the last path off the path and recurse
+// Now have a parent directory id, so look in the parent for self and return it
+func (dc *DirCache) FindDir(path string, create bool) (pathID string, err error) {
+ dc.mu.Lock()
+ defer dc.mu.Unlock()
+ return dc._findDir(path, create)
+}
+
+// Look for the root and in the cache - safe to call without the mu
+func (dc *DirCache) _findDirInCache(path string) string {
+ // fmt.Println("Finding",path,"create",create,"cache",cache)
+ // If it is the root, then return it
+ if path == "" {
+ // fmt.Println("Root")
+ return dc.rootID
+ }
+
+ // If it is in the cache then return it
+ pathID, ok := dc.Get(path)
+ if ok {
+ // fmt.Println("Cache hit on", path)
+ return pathID
+ }
+
+ return ""
+}
+
+// Unlocked findDir - must have mu
+func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error) {
+ pathID = dc._findDirInCache(path)
+ if pathID != "" {
+ return pathID, nil
+ }
+
+ // Split the path into directory, leaf
+ directory, leaf := SplitPath(path)
+
+ // Recurse and find pathID for parent directory
+ parentPathID, err := dc._findDir(directory, create)
+ if err != nil {
+ return "", err
+
+ }
+
+ // Find the leaf in parentPathID
+ pathID, found, err := dc.fs.FindLeaf(parentPathID, leaf)
+ if err != nil {
+ return "", err
+ }
+
+ // If not found create the directory if required or return an error
+ if !found {
+ if create {
+ pathID, err = dc.fs.CreateDir(parentPathID, leaf)
+ if err != nil {
+ return "", errors.Wrap(err, "failed to make directory")
+ }
+ } else {
+ return "", fs.ErrorDirNotFound
+ }
+ }
+
+ // Store the leaf directory in the cache
+ dc.Put(path, pathID)
+
+ // fmt.Println("Dir", path, "is", pathID)
+ return pathID, nil
+}
+
+// FindPath finds the leaf and directoryID from a path
+//
+// Do not call FindPath with the root directory - it will return an error
+//
+// If create is set parent directories will be created if they don't exist
+func (dc *DirCache) FindPath(path string, create bool) (leaf, directoryID string, err error) {
+ if path == "" {
+ err = errors.New("internal error: can't call FindPath with root directory")
+ return
+ }
+ dc.mu.Lock()
+ defer dc.mu.Unlock()
+ directory, leaf := SplitPath(path)
+ directoryID, err = dc._findDir(directory, create)
+ return
+}
+
+// FindRoot finds the root directory if not already found
+//
+// Resets the root directory
+//
+// If create is set it will make the directory if not found
+func (dc *DirCache) FindRoot(create bool) error {
+ dc.mu.Lock()
+ defer dc.mu.Unlock()
+ if dc.foundRoot {
+ return nil
+ }
+ rootID, err := dc._findDir(dc.root, create)
+ if err != nil {
+ return err
+ }
+ dc.foundRoot = true
+ dc.rootID = rootID
+
+ // Find the parent of the root while we still have the root
+ // directory tree cached
+ rootParentPath, _ := SplitPath(dc.root)
+ dc.rootParentID, _ = dc.Get(rootParentPath)
+
+ // Reset the tree based on dc.root
+ dc.Flush()
+ // Put the root directory in
+ dc.Put("", dc.rootID)
+ return nil
+}
+
+// FindRootAndPath finds the root first if not found then finds leaf and directoryID from a path
+//
+// If create is set parent directories will be created if they don't exist
+func (dc *DirCache) FindRootAndPath(path string, create bool) (leaf, directoryID string, err error) {
+ err = dc.FindRoot(create)
+ if err != nil {
+ return
+ }
+ return dc.FindPath(path, create)
+}
+
+// FoundRoot returns whether the root directory has been found yet
+//
+// Call this from FindLeaf or CreateDir only
+func (dc *DirCache) FoundRoot() bool {
+ return dc.foundRoot
+}
+
+// RootID returns the ID of the root directory
+//
+// This should be called after FindRoot
+func (dc *DirCache) RootID() string {
+ dc.mu.Lock()
+ defer dc.mu.Unlock()
+ if !dc.foundRoot {
+ log.Fatalf("Internal Error: RootID() called before FindRoot")
+ }
+ return dc.rootID
+}
+
+// RootParentID returns the ID of the parent of the root directory
+//
+// This should be called after FindRoot
+func (dc *DirCache) RootParentID() (string, error) {
+ dc.mu.Lock()
+ defer dc.mu.Unlock()
+ if !dc.foundRoot {
+ return "", errors.New("internal error: RootID() called before FindRoot")
+ }
+ if dc.rootParentID == "" {
+ return "", errors.New("internal error: didn't find rootParentID")
+ }
+ if dc.rootID == dc.trueRootID {
+ return "", errors.New("is root directory")
+ }
+ return dc.rootParentID, nil
+}
+
+// ResetRoot resets the root directory to the absolute root and clears
+// the DirCache
+func (dc *DirCache) ResetRoot() {
+ dc.mu.Lock()
+ defer dc.mu.Unlock()
+ dc.foundRoot = false
+ dc.Flush()
+
+ // Put the true root in
+ dc.rootID = dc.trueRootID
+
+ // Put the root directory in
+ dc.Put("", dc.rootID)
+}
diff --git a/vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil.go b/vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil.go
new file mode 100755
index 0000000..777297b
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil.go
@@ -0,0 +1,545 @@
+package oauthutil
+
+import (
+ "context"
+ "crypto/rand"
+ "encoding/json"
+ "fmt"
+ "html/template"
+ "log"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/ncw/rclone/fs/config"
+ "github.com/ncw/rclone/fs/config/configmap"
+ "github.com/ncw/rclone/fs/fshttp"
+ "github.com/pkg/errors"
+ "github.com/skratchdot/open-golang/open"
+ "golang.org/x/oauth2"
+)
+
+const (
+ // TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization
+ // code should be returned in the title bar of the browser, with the page text
+ // prompting the user to copy the code and paste it in the application.
+ TitleBarRedirectURL = "urn:ietf:wg:oauth:2.0:oob"
+
+ // bindPort is the port that we bind the local webserver to
+ bindPort = "53682"
+
+ // bindAddress is binding for local webserver when active
+ bindAddress = "127.0.0.1:" + bindPort
+
+ // RedirectURL is redirect to local webserver when active
+ RedirectURL = "http://" + bindAddress + "/"
+
+ // RedirectPublicURL is redirect to local webserver when active with public name
+ RedirectPublicURL = "http://localhost.rclone.org:" + bindPort + "/"
+
+ // RedirectLocalhostURL is redirect to local webserver when active with localhost
+ RedirectLocalhostURL = "http://localhost:" + bindPort + "/"
+
+ // AuthResponse is a template to handle the redirect URL for oauth requests
+ AuthResponse = `
+
+
+
+{{ if .OK }}Success!{{ else }}Failure!{{ end }}
+
+
+
{{ if .OK }}Success!{{ else }}Failure!{{ end }}
+
+
+{{ if eq .OK false }}
+Error: {{ .AuthError.Name }}
+{{ if .AuthError.Description }}Description: {{ .AuthError.Description }} {{ end }}
+{{ if .AuthError.Code }}Code: {{ .AuthError.Code }} {{ end }}
+{{ if .AuthError.HelpURL }}Look here for help: {{ .AuthError.HelpURL }} {{ end }}
+{{ else }}
+ {{ if .Code }}
+Please copy this code into rclone:
+{{ .Code }}
+ {{ else }}
+All done. Please go back to rclone.
+ {{ end }}
+{{ end }}
+
+
+
+`
+)
+
+// oldToken contains an end-user's tokens.
+// This is the data you must store to persist authentication.
+//
+// From the original code.google.com/p/goauth2/oauth package - used
+// for backwards compatibility in the rclone config file
+type oldToken struct {
+ AccessToken string
+ RefreshToken string
+ Expiry time.Time
+}
+
+// GetToken returns the token saved in the config file under
+// section name.
+func GetToken(name string, m configmap.Mapper) (*oauth2.Token, error) {
+ tokenString, ok := m.Get(config.ConfigToken)
+ if !ok || tokenString == "" {
+ return nil, errors.New("empty token found - please run rclone config again")
+ }
+ token := new(oauth2.Token)
+ err := json.Unmarshal([]byte(tokenString), token)
+ if err != nil {
+ return nil, err
+ }
+ // if has data then return it
+ if token.AccessToken != "" {
+ return token, nil
+ }
+ // otherwise try parsing as oldToken
+ oldtoken := new(oldToken)
+ err = json.Unmarshal([]byte(tokenString), oldtoken)
+ if err != nil {
+ return nil, err
+ }
+ // Fill in result into new token
+ token.AccessToken = oldtoken.AccessToken
+ token.RefreshToken = oldtoken.RefreshToken
+ token.Expiry = oldtoken.Expiry
+ // Save new format in config file
+ err = PutToken(name, m, token, false)
+ if err != nil {
+ return nil, err
+ }
+ return token, nil
+}
+
+// PutToken stores the token in the config file
+//
+// This saves the config file if it changes
+func PutToken(name string, m configmap.Mapper, token *oauth2.Token, newSection bool) error {
+ tokenBytes, err := json.Marshal(token)
+ if err != nil {
+ return err
+ }
+ tokenString := string(tokenBytes)
+ old, ok := m.Get(config.ConfigToken)
+ if !ok || tokenString != old {
+ err = config.SetValueAndSave(name, config.ConfigToken, tokenString)
+ if newSection && err != nil {
+ fs.Debugf(name, "Added new token to config, still needs to be saved")
+ } else if err != nil {
+ fs.Errorf(nil, "Failed to save new token in config file: %v", err)
+ } else {
+ fs.Debugf(name, "Saved new token in config file")
+ }
+ }
+ return nil
+}
+
+// TokenSource stores updated tokens in the config file
+type TokenSource struct {
+ mu sync.Mutex
+ name string
+ m configmap.Mapper
+ tokenSource oauth2.TokenSource
+ token *oauth2.Token
+ config *oauth2.Config
+ ctx context.Context
+ expiryTimer *time.Timer // signals whenever the token expires
+}
+
+// Token returns a token or an error.
+// Token must be safe for concurrent use by multiple goroutines.
+// The returned Token must not be modified.
+//
+// This saves the token in the config file if it has changed
+func (ts *TokenSource) Token() (*oauth2.Token, error) {
+ ts.mu.Lock()
+ defer ts.mu.Unlock()
+
+ // Make a new token source if required
+ if ts.tokenSource == nil {
+ ts.tokenSource = ts.config.TokenSource(ts.ctx, ts.token)
+ }
+
+ token, err := ts.tokenSource.Token()
+ if err != nil {
+ return nil, err
+ }
+ changed := *token != *ts.token
+ ts.token = token
+ if changed {
+ // Bump on the expiry timer if it is set
+ if ts.expiryTimer != nil {
+ ts.expiryTimer.Reset(ts.timeToExpiry())
+ }
+ err = PutToken(ts.name, ts.m, token, false)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return token, nil
+}
+
+// Invalidate invalidates the token
+func (ts *TokenSource) Invalidate() {
+ ts.mu.Lock()
+ ts.token.AccessToken = ""
+ ts.mu.Unlock()
+}
+
+// timeToExpiry returns how long until the token expires
+//
+// Call with the lock held
+func (ts *TokenSource) timeToExpiry() time.Duration {
+ t := ts.token
+ if t == nil {
+ return 0
+ }
+ if t.Expiry.IsZero() {
+ return 3E9 * time.Second // ~95 years
+ }
+ return t.Expiry.Sub(time.Now())
+}
+
+// OnExpiry returns a channel which has the time written to it when
+// the token expires. Note that there is only one channel so if
+// attaching multiple go routines it will only signal to one of them.
+func (ts *TokenSource) OnExpiry() <-chan time.Time {
+ ts.mu.Lock()
+ defer ts.mu.Unlock()
+ if ts.expiryTimer == nil {
+ ts.expiryTimer = time.NewTimer(ts.timeToExpiry())
+ }
+ return ts.expiryTimer.C
+}
+
+// Check interface satisfied
+var _ oauth2.TokenSource = (*TokenSource)(nil)
+
+// Context returns a context with our HTTP Client baked in for oauth2
+func Context(client *http.Client) context.Context {
+ return context.WithValue(context.Background(), oauth2.HTTPClient, client)
+}
+
+// overrideCredentials sets the ClientID and ClientSecret from the
+// config file if they are not blank.
+// If any value is overridden, true is returned.
+// the origConfig is copied
+func overrideCredentials(name string, m configmap.Mapper, origConfig *oauth2.Config) (newConfig *oauth2.Config, changed bool) {
+ newConfig = new(oauth2.Config)
+ *newConfig = *origConfig
+ changed = false
+ ClientID, ok := m.Get(config.ConfigClientID)
+ if ok && ClientID != "" {
+ newConfig.ClientID = ClientID
+ changed = true
+ }
+ ClientSecret, ok := m.Get(config.ConfigClientSecret)
+ if ok && ClientSecret != "" {
+ newConfig.ClientSecret = ClientSecret
+ changed = true
+ }
+ AuthURL, ok := m.Get(config.ConfigAuthURL)
+ if ok && AuthURL != "" {
+ newConfig.Endpoint.AuthURL = AuthURL
+ changed = true
+ }
+ TokenURL, ok := m.Get(config.ConfigTokenURL)
+ if ok && TokenURL != "" {
+ newConfig.Endpoint.TokenURL = TokenURL
+ changed = true
+ }
+ return newConfig, changed
+}
+
+// NewClientWithBaseClient gets a token from the config file and
+// configures a Client with it. It returns the client and a
+// TokenSource which Invalidate may need to be called on. It uses the
+// httpClient passed in as the base client.
+func NewClientWithBaseClient(name string, m configmap.Mapper, config *oauth2.Config, baseClient *http.Client) (*http.Client, *TokenSource, error) {
+ config, _ = overrideCredentials(name, m, config)
+ token, err := GetToken(name, m)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Set our own http client in the context
+ ctx := Context(baseClient)
+
+ // Wrap the TokenSource in our TokenSource which saves changed
+ // tokens in the config file
+ ts := &TokenSource{
+ name: name,
+ m: m,
+ token: token,
+ config: config,
+ ctx: ctx,
+ }
+ return oauth2.NewClient(ctx, ts), ts, nil
+
+}
+
+// NewClient gets a token from the config file and configures a Client
+// with it. It returns the client and a TokenSource which Invalidate may need to be called on
+func NewClient(name string, m configmap.Mapper, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) {
+ return NewClientWithBaseClient(name, m, oauthConfig, fshttp.NewClient(fs.Config))
+}
+
+// Config does the initial creation of the token
+//
+// It may run an internal webserver to receive the results
+func Config(id, name string, m configmap.Mapper, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
+ return doConfig(id, name, m, nil, config, true, opts)
+}
+
+// ConfigNoOffline does the same as Config but does not pass the
+// "access_type=offline" parameter.
+func ConfigNoOffline(id, name string, m configmap.Mapper, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
+ return doConfig(id, name, m, nil, config, false, opts)
+}
+
+// ConfigErrorCheck does the same as Config, but allows the backend to pass a error handling function
+// This function gets called with the request made to rclone as a parameter if no code was found
+func ConfigErrorCheck(id, name string, m configmap.Mapper, errorHandler func(*http.Request) AuthError, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
+ return doConfig(id, name, m, errorHandler, config, true, opts)
+}
+
+func doConfig(id, name string, m configmap.Mapper, errorHandler func(*http.Request) AuthError, oauthConfig *oauth2.Config, offline bool, opts []oauth2.AuthCodeOption) error {
+ oauthConfig, changed := overrideCredentials(name, m, oauthConfig)
+ auto, ok := m.Get(config.ConfigAutomatic)
+ automatic := ok && auto != ""
+
+ // See if already have a token
+ tokenString, ok := m.Get("token")
+ if ok && tokenString != "" {
+ fmt.Printf("Already have a token - refresh?\n")
+ if !config.Confirm() {
+ return nil
+ }
+ }
+
+ // Detect whether we should use internal web server
+ useWebServer := false
+ switch oauthConfig.RedirectURL {
+ case RedirectURL, RedirectPublicURL, RedirectLocalhostURL:
+ if changed {
+ fmt.Printf("Make sure your Redirect URL is set to %q in your custom config.\n", oauthConfig.RedirectURL)
+ }
+ useWebServer = true
+ if automatic {
+ break
+ }
+ fmt.Printf("Use auto config?\n")
+ fmt.Printf(" * Say Y if not sure\n")
+ fmt.Printf(" * Say N if you are working on a remote or headless machine\n")
+ auto := config.Confirm()
+ if !auto {
+ fmt.Printf("For this to work, you will need rclone available on a machine that has a web browser available.\n")
+ fmt.Printf("Execute the following on your machine:\n")
+ if changed {
+ fmt.Printf("\trclone authorize %q %q %q\n", id, oauthConfig.ClientID, oauthConfig.ClientSecret)
+ } else {
+ fmt.Printf("\trclone authorize %q\n", id)
+ }
+ fmt.Println("Then paste the result below:")
+ code := ""
+ for code == "" {
+ fmt.Printf("result> ")
+ code = strings.TrimSpace(config.ReadLine())
+ }
+ token := &oauth2.Token{}
+ err := json.Unmarshal([]byte(code), token)
+ if err != nil {
+ return err
+ }
+ return PutToken(name, m, token, false)
+ }
+ case TitleBarRedirectURL:
+ useWebServer = automatic
+ if !automatic {
+ fmt.Printf("Use auto config?\n")
+ fmt.Printf(" * Say Y if not sure\n")
+ fmt.Printf(" * Say N if you are working on a remote or headless machine or Y didn't work\n")
+ useWebServer = config.Confirm()
+ }
+ if useWebServer {
+ // copy the config and set to use the internal webserver
+ configCopy := *oauthConfig
+ oauthConfig = &configCopy
+ oauthConfig.RedirectURL = RedirectURL
+ }
+ }
+
+ // Make random state
+ stateBytes := make([]byte, 16)
+ _, err := rand.Read(stateBytes)
+ if err != nil {
+ return err
+ }
+ state := fmt.Sprintf("%x", stateBytes)
+ if offline {
+ opts = append(opts, oauth2.AccessTypeOffline)
+ }
+ authURL := oauthConfig.AuthCodeURL(state, opts...)
+
+ // Prepare webserver
+ server := authServer{
+ state: state,
+ bindAddress: bindAddress,
+ authURL: authURL,
+ errorHandler: errorHandler,
+ }
+ if useWebServer {
+ server.code = make(chan string, 1)
+ server.err = make(chan error, 1)
+ go server.Start()
+ defer server.Stop()
+ authURL = "http://" + bindAddress + "/auth"
+ }
+
+ // Generate a URL for the user to visit for authorization.
+ _ = open.Start(authURL)
+ fmt.Printf("If your browser doesn't open automatically go to the following link: %s\n", authURL)
+ fmt.Printf("Log in and authorize rclone for access\n")
+
+ var authCode string
+ if useWebServer {
+ // Read the code, and exchange it for a token.
+ fmt.Printf("Waiting for code...\n")
+ authCode = <-server.code
+ authError := <-server.err
+ if authCode != "" {
+ fmt.Printf("Got code\n")
+ } else {
+ if authError != nil {
+ return authError
+ }
+ return errors.New("failed to get code")
+ }
+ } else {
+ // Read the code, and exchange it for a token.
+ fmt.Printf("Enter verification code> ")
+ authCode = config.ReadLine()
+ }
+ token, err := oauthConfig.Exchange(oauth2.NoContext, authCode)
+ if err != nil {
+ return errors.Wrap(err, "failed to get token")
+ }
+
+ // Print code if we do automatic retrieval
+ if automatic {
+ result, err := json.Marshal(token)
+ if err != nil {
+ return errors.Wrap(err, "failed to marshal token")
+ }
+ fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste", result)
+ }
+ return PutToken(name, m, token, true)
+}
+
+// Local web server for collecting auth
+type authServer struct {
+ state string
+ listener net.Listener
+ bindAddress string
+ code chan string
+ err chan error
+ authURL string
+ server *http.Server
+ errorHandler func(*http.Request) AuthError
+}
+
+// AuthError gets returned by the backend's errorHandler function
+type AuthError struct {
+ Name string
+ Description string
+ Code string
+ HelpURL string
+}
+
+// AuthResponseData can fill the AuthResponse template
+type AuthResponseData struct {
+ OK bool // Failure or Success?
+ Code string // code to paste into rclone config
+ AuthError
+}
+
+// startWebServer runs an internal web server to receive config details
+func (s *authServer) Start() {
+ fs.Debugf(nil, "Starting auth server on %s", s.bindAddress)
+ mux := http.NewServeMux()
+ s.server = &http.Server{
+ Addr: s.bindAddress,
+ Handler: mux,
+ }
+ s.server.SetKeepAlivesEnabled(false)
+ mux.HandleFunc("/favicon.ico", func(w http.ResponseWriter, req *http.Request) {
+ http.Error(w, "", 404)
+ return
+ })
+ mux.HandleFunc("/auth", func(w http.ResponseWriter, req *http.Request) {
+ http.Redirect(w, req, s.authURL, http.StatusTemporaryRedirect)
+ return
+ })
+ mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+ w.Header().Set("Content-Type", "text/html")
+ fs.Debugf(nil, "Received request on auth server")
+ code := req.FormValue("code")
+ var err error
+ var t = template.Must(template.New("authResponse").Parse(AuthResponse))
+ resp := AuthResponseData{AuthError: AuthError{}}
+ if code != "" {
+ state := req.FormValue("state")
+ if state != s.state {
+ fs.Debugf(nil, "State did not match: want %q got %q", s.state, state)
+ resp.OK = false
+ resp.AuthError = AuthError{
+ Name: "Auth State doesn't match",
+ }
+ } else {
+ fs.Debugf(nil, "Successfully got code")
+ resp.OK = true
+ if s.code == nil {
+ resp.Code = code
+ }
+ }
+ } else {
+ fs.Debugf(nil, "No code found on request")
+ var authError AuthError
+ if s.errorHandler == nil {
+ authError = AuthError{
+ Name: "Auth Error",
+ Description: "No code found returned by remote server.",
+ }
+ } else {
+ authError = s.errorHandler(req)
+ }
+ err = fmt.Errorf("Error: %s\nCode: %s\nDescription: %s\nHelp: %s",
+ authError.Name, authError.Code, authError.Description, authError.HelpURL)
+ resp.OK = false
+ resp.AuthError = authError
+ w.WriteHeader(500)
+ }
+ if err := t.Execute(w, resp); err != nil {
+ fs.Debugf(nil, "Could not execute template for web response.")
+ }
+ if s.code != nil {
+ s.code <- code
+ s.err <- err
+ }
+ })
+
+ var err error
+ s.listener, err = net.Listen("tcp", s.bindAddress)
+ if err != nil {
+ log.Fatalf("Failed to start auth webserver: %v", err)
+ }
+ err = s.server.Serve(s.listener)
+ fs.Debugf(nil, "Closed auth server with error: %v", err)
+}
diff --git a/vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil_new.go b/vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil_new.go
new file mode 100755
index 0000000..9b9acbc
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil_new.go
@@ -0,0 +1,19 @@
+// oauthutil parts go1.8+
+
+//+build go1.8
+
+package oauthutil
+
+import "github.com/ncw/rclone/fs"
+
+func (s *authServer) Stop() {
+ fs.Debugf(nil, "Closing auth server")
+ if s.code != nil {
+ close(s.code)
+ s.code = nil
+ }
+ _ = s.listener.Close()
+
+ // close the server
+ _ = s.server.Close()
+}
diff --git a/vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil_old.go b/vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil_old.go
new file mode 100755
index 0000000..d1a9e45
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil_old.go
@@ -0,0 +1,16 @@
+// oauthutil parts pre go1.8+
+
+//+build !go1.8
+
+package oauthutil
+
+import "github.com/ncw/rclone/fs"
+
+func (s *authServer) Stop() {
+ fs.Debugf(nil, "Closing auth server")
+ if s.code != nil {
+ close(s.code)
+ s.code = nil
+ }
+ _ = s.listener.Close()
+}
diff --git a/vendor/github.com/ncw/rclone/lib/oauthutil/renew.go b/vendor/github.com/ncw/rclone/lib/oauthutil/renew.go
new file mode 100755
index 0000000..3af1218
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/lib/oauthutil/renew.go
@@ -0,0 +1,69 @@
+package oauthutil
+
+import (
+ "sync/atomic"
+
+ "github.com/ncw/rclone/fs"
+)
+
+// Renew allows tokens to be renewed on expiry if uploads are in progress.
+type Renew struct {
+ name string // name to use in logs
+ ts *TokenSource // token source that needs renewing
+ uploads int32 // number of uploads in progress - atomic access required
+ run func() error // a transaction to run to renew the token on
+}
+
+// NewRenew creates a new Renew struct and starts a background process
+// which renews the token whenever it expires. It uses the run() call
+// to run a transaction to do this.
+//
+// It will only renew the token if the number of uploads > 0
+func NewRenew(name string, ts *TokenSource, run func() error) *Renew {
+ r := &Renew{
+ name: name,
+ ts: ts,
+ run: run,
+ }
+ go r.renewOnExpiry()
+ return r
+}
+
+// renewOnExpiry renews the token whenever it expires. Useful when there
+// are lots of uploads in progress and the token doesn't get renewed.
+// Amazon seem to cancel your uploads if you don't renew your token
+// for 2hrs.
+func (r *Renew) renewOnExpiry() {
+ expiry := r.ts.OnExpiry()
+ for {
+ <-expiry
+ uploads := atomic.LoadInt32(&r.uploads)
+ if uploads != 0 {
+ fs.Debugf(r.name, "Token expired - %d uploads in progress - refreshing", uploads)
+ // Do a transaction
+ err := r.run()
+ if err == nil {
+ fs.Debugf(r.name, "Token refresh successful")
+ } else {
+ fs.Errorf(r.name, "Token refresh failed: %v", err)
+ }
+ } else {
+ fs.Debugf(r.name, "Token expired but no uploads in progress - doing nothing")
+ }
+ }
+}
+
+// Start should be called before starting an upload
+func (r *Renew) Start() {
+ atomic.AddInt32(&r.uploads, 1)
+}
+
+// Stop should be called after finishing an upload
+func (r *Renew) Stop() {
+ atomic.AddInt32(&r.uploads, -1)
+}
+
+// Invalidate invalidates the token source
+func (r *Renew) Invalidate() {
+ r.ts.Invalidate()
+}
diff --git a/vendor/github.com/ncw/rclone/lib/pacer/pacer.go b/vendor/github.com/ncw/rclone/lib/pacer/pacer.go
new file mode 100755
index 0000000..fb9ae99
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/lib/pacer/pacer.go
@@ -0,0 +1,368 @@
+// Package pacer makes pacing and retrying API calls easy
+package pacer
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/ncw/rclone/fs/fserrors"
+)
+
+// Pacer state
+type Pacer struct {
+ mu sync.Mutex // Protecting read/writes
+ minSleep time.Duration // minimum sleep time
+ maxSleep time.Duration // maximum sleep time
+ decayConstant uint // decay constant
+ attackConstant uint // attack constant
+ pacer chan struct{} // To pace the operations
+ sleepTime time.Duration // Time to sleep for each transaction
+ retries int // Max number of retries
+ maxConnections int // Maximum number of concurrent connections
+ connTokens chan struct{} // Connection tokens
+ calculatePace func(bool) // switchable pacing algorithm - call with mu held
+ consecutiveRetries int // number of consecutive retries
+}
+
+// Type is for selecting different pacing algorithms
+type Type int
+
+const (
+ // DefaultPacer is a truncated exponential attack and decay.
+ //
+ // On retries the sleep time is doubled, on non errors then
+ // sleeptime decays according to the decay constant as set
+ // with SetDecayConstant.
+ //
+ // The sleep never goes below that set with SetMinSleep or
+ // above that set with SetMaxSleep.
+ DefaultPacer = Type(iota)
+
+ // AmazonCloudDrivePacer is a specialised pacer for Amazon Drive
+ //
+ // It implements a truncated exponential backoff strategy with
+ // randomization. Normally operations are paced at the
+ // interval set with SetMinSleep. On errors the sleep timer
+ // is set to 0..2**retries seconds.
+ //
+ // See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices
+ AmazonCloudDrivePacer
+
+ // GoogleDrivePacer is a specialised pacer for Google Drive
+ //
+ // It implements a truncated exponential backoff strategy with
+ // randomization. Normally operations are paced at the
+ // interval set with SetMinSleep. On errors the sleep timer
+ // is set to (2 ^ n) + random_number_milliseconds seconds
+ //
+ // See https://developers.google.com/drive/v2/web/handle-errors#exponential-backoff
+ GoogleDrivePacer
+)
+
+// Paced is a function which is called by the Call and CallNoRetry
+// methods. It should return a boolean, true if it would like to be
+// retried, and an error. This error may be returned or returned
+// wrapped in a RetryError.
+type Paced func() (bool, error)
+
+// New returns a Pacer with sensible defaults
+func New() *Pacer {
+ p := &Pacer{
+ minSleep: 10 * time.Millisecond,
+ maxSleep: 2 * time.Second,
+ decayConstant: 2,
+ attackConstant: 1,
+ retries: fs.Config.LowLevelRetries,
+ pacer: make(chan struct{}, 1),
+ }
+ p.sleepTime = p.minSleep
+ p.SetPacer(DefaultPacer)
+ p.SetMaxConnections(fs.Config.Checkers + fs.Config.Transfers)
+
+ // Put the first pacing token in
+ p.pacer <- struct{}{}
+
+ return p
+}
+
+// SetSleep sets the current sleep time
+func (p *Pacer) SetSleep(t time.Duration) *Pacer {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.sleepTime = t
+ return p
+}
+
+// GetSleep gets the current sleep time
+func (p *Pacer) GetSleep() time.Duration {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ return p.sleepTime
+}
+
+// SetMinSleep sets the minimum sleep time for the pacer
+func (p *Pacer) SetMinSleep(t time.Duration) *Pacer {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.minSleep = t
+ p.sleepTime = p.minSleep
+ return p
+}
+
+// SetMaxSleep sets the maximum sleep time for the pacer
+func (p *Pacer) SetMaxSleep(t time.Duration) *Pacer {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.maxSleep = t
+ p.sleepTime = p.minSleep
+ return p
+}
+
+// SetMaxConnections sets the maximum number of concurrent connections.
+// Setting the value to 0 will allow unlimited number of connections.
+// Should not be changed once you have started calling the pacer.
+// By default this will be set to fs.Config.Checkers.
+func (p *Pacer) SetMaxConnections(n int) *Pacer {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.maxConnections = n
+ if n <= 0 {
+ p.connTokens = nil
+ } else {
+ p.connTokens = make(chan struct{}, n)
+ for i := 0; i < n; i++ {
+ p.connTokens <- struct{}{}
+ }
+ }
+ return p
+}
+
+// SetDecayConstant sets the decay constant for the pacer
+//
+// This is the speed the time falls back to the minimum after errors
+// have occurred.
+//
+// bigger for slower decay, exponential. 1 is halve, 0 is go straight to minimum
+func (p *Pacer) SetDecayConstant(decay uint) *Pacer {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.decayConstant = decay
+ return p
+}
+
+// SetAttackConstant sets the attack constant for the pacer
+//
+// This is the speed the time grows from the minimum after errors have
+// occurred.
+//
+// bigger for slower attack, 1 is double, 0 is go straight to maximum
+func (p *Pacer) SetAttackConstant(attack uint) *Pacer {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.attackConstant = attack
+ return p
+}
+
+// SetRetries sets the max number of tries for Call
+func (p *Pacer) SetRetries(retries int) *Pacer {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.retries = retries
+ return p
+}
+
+// SetPacer sets the pacing algorithm
+//
+// It will choose the default algorithm if an incorrect value is
+// passed in.
+func (p *Pacer) SetPacer(t Type) *Pacer {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ switch t {
+ case AmazonCloudDrivePacer:
+ p.calculatePace = p.acdPacer
+ case GoogleDrivePacer:
+ p.calculatePace = p.drivePacer
+ default:
+ p.calculatePace = p.defaultPacer
+ }
+ return p
+}
+
+// Start a call to the API
+//
+// This must be called as a pair with endCall
+//
+// This waits for the pacer token
+func (p *Pacer) beginCall() {
+ // pacer starts with a token in and whenever we take one out
+ // XXX ms later we put another in. We could do this with a
+ // Ticker more accurately, but then we'd have to work out how
+ // not to run it when it wasn't needed
+ <-p.pacer
+ if p.maxConnections > 0 {
+ <-p.connTokens
+ }
+
+ p.mu.Lock()
+ // Restart the timer
+ go func(t time.Duration) {
+ // fs.Debugf(f, "New sleep for %v at %v", t, time.Now())
+ time.Sleep(t)
+ p.pacer <- struct{}{}
+ }(p.sleepTime)
+ p.mu.Unlock()
+}
+
+// exponentialImplementation implements a exponentialImplementation up
+// and down pacing algorithm
+//
+// See the description for DefaultPacer
+//
+// This should calculate a new sleepTime. It takes a boolean as to
+// whether the operation should be retried or not.
+//
+// Call with p.mu held
+func (p *Pacer) defaultPacer(retry bool) {
+ oldSleepTime := p.sleepTime
+ if retry {
+ if p.attackConstant == 0 {
+ p.sleepTime = p.maxSleep
+ } else {
+ p.sleepTime = (p.sleepTime << p.attackConstant) / ((1 << p.attackConstant) - 1)
+ }
+ if p.sleepTime > p.maxSleep {
+ p.sleepTime = p.maxSleep
+ }
+ if p.sleepTime != oldSleepTime {
+ fs.Debugf("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
+ }
+ } else {
+ p.sleepTime = (p.sleepTime<> p.decayConstant
+ if p.sleepTime < p.minSleep {
+ p.sleepTime = p.minSleep
+ }
+ if p.sleepTime != oldSleepTime {
+ fs.Debugf("pacer", "Reducing sleep to %v", p.sleepTime)
+ }
+ }
+}
+
+// acdPacer implements a truncated exponential backoff
+// strategy with randomization for Amazon Drive
+//
+// See the description for AmazonCloudDrivePacer
+//
+// This should calculate a new sleepTime. It takes a boolean as to
+// whether the operation should be retried or not.
+//
+// Call with p.mu held
+func (p *Pacer) acdPacer(retry bool) {
+ consecutiveRetries := p.consecutiveRetries
+ if consecutiveRetries == 0 {
+ if p.sleepTime != p.minSleep {
+ p.sleepTime = p.minSleep
+ fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
+ }
+ } else {
+ if consecutiveRetries > 9 {
+ consecutiveRetries = 9
+ }
+ // consecutiveRetries starts at 1 so
+ // maxSleep is 2**(consecutiveRetries-1) seconds
+ maxSleep := time.Second << uint(consecutiveRetries-1)
+ // actual sleep is random from 0..maxSleep
+ p.sleepTime = time.Duration(rand.Int63n(int64(maxSleep)))
+ if p.sleepTime < p.minSleep {
+ p.sleepTime = p.minSleep
+ }
+ fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
+ }
+}
+
+// drivePacer implements a truncated exponential backoff strategy with
+// randomization for Google Drive
+//
+// See the description for GoogleDrivePacer
+//
+// This should calculate a new sleepTime. It takes a boolean as to
+// whether the operation should be retried or not.
+//
+// Call with p.mu held
+func (p *Pacer) drivePacer(retry bool) {
+ consecutiveRetries := p.consecutiveRetries
+ if consecutiveRetries == 0 {
+ if p.sleepTime != p.minSleep {
+ p.sleepTime = p.minSleep
+ fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
+ }
+ } else {
+ if consecutiveRetries > 5 {
+ consecutiveRetries = 5
+ }
+ // consecutiveRetries starts at 1 so go from 1,2,3,4,5,5 => 1,2,4,8,16,16
+ // maxSleep is 2**(consecutiveRetries-1) seconds + random milliseconds
+ p.sleepTime = time.Second< 0 {
+ p.connTokens <- struct{}{}
+ }
+ p.mu.Lock()
+ if retry {
+ p.consecutiveRetries++
+ } else {
+ p.consecutiveRetries = 0
+ }
+ p.calculatePace(retry)
+ p.mu.Unlock()
+}
+
+// call implements Call but with settable retries
+func (p *Pacer) call(fn Paced, retries int) (err error) {
+ var retry bool
+ for i := 1; i <= retries; i++ {
+ p.beginCall()
+ retry, err = fn()
+ p.endCall(retry)
+ if !retry {
+ break
+ }
+ fs.Debugf("pacer", "low level retry %d/%d (error %v)", i, retries, err)
+ }
+ if retry {
+ err = fserrors.RetryError(err)
+ }
+ return err
+}
+
+// Call paces the remote operations to not exceed the limits and retry
+// on rate limit exceeded
+//
+// This calls fn, expecting it to return a retry flag and an
+// error. This error may be returned wrapped in a RetryError if the
+// number of retries is exceeded.
+func (p *Pacer) Call(fn Paced) (err error) {
+ p.mu.Lock()
+ retries := p.retries
+ p.mu.Unlock()
+ return p.call(fn, retries)
+}
+
+// CallNoRetry paces the remote operations to not exceed the limits
+// and return a retry error on rate limit exceeded
+//
+// This calls fn and wraps the output in a RetryError if it would like
+// it to be retried
+func (p *Pacer) CallNoRetry(fn Paced) error {
+ return p.call(fn, 1)
+}
diff --git a/vendor/github.com/ncw/rclone/lib/pacer/tokens.go b/vendor/github.com/ncw/rclone/lib/pacer/tokens.go
new file mode 100755
index 0000000..b4f905b
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/lib/pacer/tokens.go
@@ -0,0 +1,31 @@
+// Tokens for controlling concurrency
+
+package pacer
+
+// TokenDispenser is for controlling concurrency
+type TokenDispenser struct {
+ tokens chan struct{}
+}
+
+// NewTokenDispenser makes a pool of n tokens
+func NewTokenDispenser(n int) *TokenDispenser {
+ td := &TokenDispenser{
+ tokens: make(chan struct{}, n),
+ }
+ // Fill up the upload tokens
+ for i := 0; i < n; i++ {
+ td.tokens <- struct{}{}
+ }
+ return td
+}
+
+// Get gets a token from the pool - don't forget to return it with Put
+func (td *TokenDispenser) Get() {
+ <-td.tokens
+ return
+}
+
+// Put returns a token
+func (td *TokenDispenser) Put() {
+ td.tokens <- struct{}{}
+}
diff --git a/vendor/github.com/ncw/rclone/lib/readers/counting_reader.go b/vendor/github.com/ncw/rclone/lib/readers/counting_reader.go
new file mode 100755
index 0000000..872b4c5
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/lib/readers/counting_reader.go
@@ -0,0 +1,28 @@
+package readers
+
+import "io"
+
+// NewCountingReader returns a CountingReader, which will read from the given
+// reader while keeping track of how many bytes were read.
+func NewCountingReader(in io.Reader) *CountingReader {
+ return &CountingReader{in: in}
+}
+
+// CountingReader holds a reader and a read count of how many bytes were read
+// so far.
+type CountingReader struct {
+ in io.Reader
+ read uint64
+}
+
+// Read reads from the underlying reader.
+func (cr *CountingReader) Read(b []byte) (int, error) {
+ n, err := cr.in.Read(b)
+ cr.read += uint64(n)
+ return n, err
+}
+
+// BytesRead returns how many bytes were read from the underlying reader so far.
+func (cr *CountingReader) BytesRead() uint64 {
+ return cr.read
+}
diff --git a/vendor/github.com/ncw/rclone/lib/readers/limited.go b/vendor/github.com/ncw/rclone/lib/readers/limited.go
new file mode 100755
index 0000000..218dd66
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/lib/readers/limited.go
@@ -0,0 +1,22 @@
+package readers
+
+import "io"
+
+// LimitedReadCloser adds io.Closer to io.LimitedReader. Create one with NewLimitedReadCloser
+type LimitedReadCloser struct {
+ *io.LimitedReader
+ io.Closer
+}
+
+// NewLimitedReadCloser returns a LimitedReadCloser wrapping rc to
+// limit it to reading limit bytes. If limit < 0 then it does not
+// wrap rc, it just returns it.
+func NewLimitedReadCloser(rc io.ReadCloser, limit int64) (lrc io.ReadCloser) {
+ if limit < 0 {
+ return rc
+ }
+ return &LimitedReadCloser{
+ LimitedReader: &io.LimitedReader{R: rc, N: limit},
+ Closer: rc,
+ }
+}
diff --git a/vendor/github.com/ncw/rclone/lib/readers/readfill.go b/vendor/github.com/ncw/rclone/lib/readers/readfill.go
new file mode 100755
index 0000000..64b5de4
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/lib/readers/readfill.go
@@ -0,0 +1,18 @@
+package readers
+
+import "io"
+
+// ReadFill reads as much data from r into buf as it can
+//
+// It reads until the buffer is full or r.Read returned an error.
+//
+// This is io.ReadFull but when you just want as much data as
+// possible, not an exact size of block.
+func ReadFill(r io.Reader, buf []byte) (n int, err error) {
+ var nn int
+ for n < len(buf) && err == nil {
+ nn, err = r.Read(buf[n:])
+ n += nn
+ }
+ return n, err
+}
diff --git a/vendor/github.com/ncw/rclone/lib/readers/repeatable.go b/vendor/github.com/ncw/rclone/lib/readers/repeatable.go
new file mode 100755
index 0000000..f851363
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/lib/readers/repeatable.go
@@ -0,0 +1,96 @@
+package readers
+
+import (
+ "io"
+
+ "github.com/pkg/errors"
+)
+
+// A RepeatableReader implements the io.ReadSeeker it allow to seek cached data
+// back and forth within the reader but will only read data from the internal Reader as necessary
+// and will play nicely with the Account and io.LimitedReader to reflect current speed
+type RepeatableReader struct {
+ in io.Reader // Input reader
+ i int64 // current reading index
+ b []byte // internal cache buffer
+}
+
+var _ io.ReadSeeker = (*RepeatableReader)(nil)
+
+// Seek implements the io.Seeker interface.
+// If seek position is passed the cache buffer length the function will return
+// the maximum offset that can be used and "fs.RepeatableReader.Seek: offset is unavailable" Error
+func (r *RepeatableReader) Seek(offset int64, whence int) (int64, error) {
+ var abs int64
+ cacheLen := int64(len(r.b))
+ switch whence {
+ case io.SeekStart:
+ abs = offset
+ case io.SeekCurrent:
+ abs = r.i + offset
+ case io.SeekEnd:
+ abs = cacheLen + offset
+ default:
+ return 0, errors.New("fs.RepeatableReader.Seek: invalid whence")
+ }
+ if abs < 0 {
+ return 0, errors.New("fs.RepeatableReader.Seek: negative position")
+ }
+ if abs > cacheLen {
+ return offset - (abs - cacheLen), errors.New("fs.RepeatableReader.Seek: offset is unavailable")
+ }
+ r.i = abs
+ return abs, nil
+}
+
+// Read data from original Reader into bytes
+// Data is either served from the underlying Reader or from cache if was already read
+func (r *RepeatableReader) Read(b []byte) (n int, err error) {
+ cacheLen := int64(len(r.b))
+ if r.i == cacheLen {
+ n, err = r.in.Read(b)
+ if n > 0 {
+ r.b = append(r.b, b[:n]...)
+ }
+ } else {
+ n = copy(b, r.b[r.i:])
+ }
+ r.i += int64(n)
+ return n, err
+}
+
+// NewRepeatableReader create new repeatable reader from Reader r
+func NewRepeatableReader(r io.Reader) *RepeatableReader {
+ return &RepeatableReader{in: r}
+}
+
+// NewRepeatableReaderSized create new repeatable reader from Reader r
+// with an initial buffer of size.
+func NewRepeatableReaderSized(r io.Reader, size int) *RepeatableReader {
+ return &RepeatableReader{
+ in: r,
+ b: make([]byte, 0, size),
+ }
+}
+
+// NewRepeatableLimitReader create new repeatable reader from Reader r
+// with an initial buffer of size wrapped in a io.LimitReader to read
+// only size.
+func NewRepeatableLimitReader(r io.Reader, size int) *RepeatableReader {
+ return NewRepeatableReaderSized(io.LimitReader(r, int64(size)), size)
+}
+
+// NewRepeatableReaderBuffer create new repeatable reader from Reader r
+// using the buffer passed in.
+func NewRepeatableReaderBuffer(r io.Reader, buf []byte) *RepeatableReader {
+ return &RepeatableReader{
+ in: r,
+ b: buf[:0],
+ }
+}
+
+// NewRepeatableLimitReaderBuffer create new repeatable reader from
+// Reader r and buf wrapped in a io.LimitReader to read only size.
+func NewRepeatableLimitReaderBuffer(r io.Reader, buf []byte, size int64) *RepeatableReader {
+ return NewRepeatableReaderBuffer(io.LimitReader(r, int64(size)), buf)
+}
diff --git a/vendor/github.com/ncw/rclone/lib/rest/rest.go b/vendor/github.com/ncw/rclone/lib/rest/rest.go
new file mode 100755
index 0000000..76eda84
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/lib/rest/rest.go
@@ -0,0 +1,421 @@
+// Package rest implements a simple REST wrapper
+//
+// All methods are safe for concurrent calling.
+package rest
+
+import (
+ "bytes"
+ "encoding/json"
+ "encoding/xml"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "sync"
+
+ "github.com/ncw/rclone/fs"
+ "github.com/pkg/errors"
+)
+
+// Client contains the info to sustain the API
+type Client struct {
+ mu sync.RWMutex
+ c *http.Client
+ rootURL string
+ errorHandler func(resp *http.Response) error
+ headers map[string]string
+ signer SignerFn
+}
+
+// NewClient takes an oauth http.Client and makes a new api instance
+func NewClient(c *http.Client) *Client {
+ api := &Client{
+ c: c,
+ errorHandler: defaultErrorHandler,
+ headers: make(map[string]string),
+ }
+ return api
+}
+
+// ReadBody reads resp.Body into result, closing the body
+func ReadBody(resp *http.Response) (result []byte, err error) {
+ defer fs.CheckClose(resp.Body, &err)
+ return ioutil.ReadAll(resp.Body)
+}
+
+// defaultErrorHandler doesn't attempt to parse the http body, just
+// returns it in the error message
+func defaultErrorHandler(resp *http.Response) (err error) {
+ body, err := ReadBody(resp)
+ if err != nil {
+ return errors.Wrap(err, "error reading error out of body")
+ }
+ return errors.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
+}
+
+// SetErrorHandler sets the handler to decode an error response when
+// the HTTP status code is not 2xx. The handler should close resp.Body.
+func (api *Client) SetErrorHandler(fn func(resp *http.Response) error) *Client {
+ api.mu.Lock()
+ defer api.mu.Unlock()
+ api.errorHandler = fn
+ return api
+}
+
+// SetRoot sets the default RootURL. You can override this on a per
+// call basis using the RootURL field in Opts.
+func (api *Client) SetRoot(RootURL string) *Client {
+ api.mu.Lock()
+ defer api.mu.Unlock()
+ api.rootURL = RootURL
+ return api
+}
+
+// SetHeader sets a header for all requests
+func (api *Client) SetHeader(key, value string) *Client {
+ api.mu.Lock()
+ defer api.mu.Unlock()
+ api.headers[key] = value
+ return api
+}
+
+// RemoveHeader unsets a header for all requests
+func (api *Client) RemoveHeader(key string) *Client {
+ api.mu.Lock()
+ defer api.mu.Unlock()
+ delete(api.headers, key)
+ return api
+}
+
+// SignerFn is used to sign an outgoing request
+type SignerFn func(*http.Request) error
+
+// SetSigner sets a signer for all requests
+func (api *Client) SetSigner(signer SignerFn) *Client {
+ api.mu.Lock()
+ defer api.mu.Unlock()
+ api.signer = signer
+ return api
+}
+
+// SetUserPass creates an Authorization header for all requests with
+// the UserName and Password passed in
+func (api *Client) SetUserPass(UserName, Password string) *Client {
+ req, _ := http.NewRequest("GET", "http://example.com", nil)
+ req.SetBasicAuth(UserName, Password)
+ api.SetHeader("Authorization", req.Header.Get("Authorization"))
+ return api
+}
+
+// SetCookie creates an Cookies Header for all requests with the supplied
+// cookies passed in.
+// All cookies have to be supplied at once, all cookies will be overwritten
+// on a new call to the method
+func (api *Client) SetCookie(cks ...*http.Cookie) *Client {
+ req, _ := http.NewRequest("GET", "http://example.com", nil)
+ for _, ck := range cks {
+ req.AddCookie(ck)
+ }
+ api.SetHeader("Cookie", req.Header.Get("Cookie"))
+ return api
+}
+
+// Opts contains parameters for Call, CallJSON etc
+type Opts struct {
+ Method string // GET, POST etc
+ Path string // relative to RootURL
+ RootURL string // override RootURL passed into SetRoot()
+ Body io.Reader
+ NoResponse bool // set to close Body
+ ContentType string
+ ContentLength *int64
+ ContentRange string
+ ExtraHeaders map[string]string
+ UserName string // username for Basic Auth
+ Password string // password for Basic Auth
+ Options []fs.OpenOption
+ IgnoreStatus bool // if set then we don't check error status or parse error body
+ MultipartParams url.Values // if set do multipart form upload with attached file
+ MultipartMetadataName string // ..this is used for the name of the metadata form part if set
+ MultipartContentName string // ..name of the parameter which is the attached file
+ MultipartFileName string // ..name of the file for the attached file
+ Parameters url.Values // any parameters for the final URL
+ TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding
+ Close bool // set to close the connection after this transaction
+ NoRedirect bool // if this is set then the client won't follow redirects
+}
+
+// Copy creates a copy of the options
+func (o *Opts) Copy() *Opts {
+ newOpts := *o
+ return &newOpts
+}
+
+// DecodeJSON decodes resp.Body into result
+func DecodeJSON(resp *http.Response, result interface{}) (err error) {
+ defer fs.CheckClose(resp.Body, &err)
+ decoder := json.NewDecoder(resp.Body)
+ return decoder.Decode(result)
+}
+
+// DecodeXML decodes resp.Body into result
+func DecodeXML(resp *http.Response, result interface{}) (err error) {
+ defer fs.CheckClose(resp.Body, &err)
+ decoder := xml.NewDecoder(resp.Body)
+ return decoder.Decode(result)
+}
+
+// ClientWithHeaderReset makes a new http client which resets the
+// headers passed in on redirect
+//
+// FIXME This is now unecessary with go1.8
+func ClientWithHeaderReset(c *http.Client, headers map[string]string) *http.Client {
+ if len(headers) == 0 {
+ return c
+ }
+ clientCopy := *c
+ clientCopy.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ if len(via) >= 10 {
+ return errors.New("stopped after 10 redirects")
+ }
+ // Reset the headers in the new request
+ for k, v := range headers {
+ if v != "" {
+ req.Header.Set(k, v)
+ }
+ }
+ return nil
+ }
+ return &clientCopy
+}
+
+// ClientWithNoRedirects makes a new http client which won't follow redirects
+func ClientWithNoRedirects(c *http.Client) *http.Client {
+ clientCopy := *c
+ clientCopy.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ }
+ return &clientCopy
+}
+
+// Call makes the call and returns the http.Response
+//
+// if err != nil then resp.Body will need to be closed
+//
+// it will return resp if at all possible, even if err is set
+func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
+ api.mu.RLock()
+ defer api.mu.RUnlock()
+ if opts == nil {
+ return nil, errors.New("call() called with nil opts")
+ }
+ url := api.rootURL
+ if opts.RootURL != "" {
+ url = opts.RootURL
+ }
+ if url == "" {
+ return nil, errors.New("RootURL not set")
+ }
+ url += opts.Path
+ if opts.Parameters != nil && len(opts.Parameters) > 0 {
+ url += "?" + opts.Parameters.Encode()
+ }
+ req, err := http.NewRequest(opts.Method, url, opts.Body)
+ if err != nil {
+ return
+ }
+ headers := make(map[string]string)
+ // Set default headers
+ for k, v := range api.headers {
+ headers[k] = v
+ }
+ if opts.ContentType != "" {
+ headers["Content-Type"] = opts.ContentType
+ }
+ if opts.ContentLength != nil {
+ req.ContentLength = *opts.ContentLength
+ }
+ if opts.ContentRange != "" {
+ headers["Content-Range"] = opts.ContentRange
+ }
+ if len(opts.TransferEncoding) != 0 {
+ req.TransferEncoding = opts.TransferEncoding
+ }
+ if opts.Close {
+ req.Close = true
+ }
+ // Set any extra headers
+ if opts.ExtraHeaders != nil {
+ for k, v := range opts.ExtraHeaders {
+ headers[k] = v
+ }
+ }
+ // add any options to the headers
+ fs.OpenOptionAddHeaders(opts.Options, headers)
+ // Now set the headers
+ for k, v := range headers {
+ if v != "" {
+ req.Header.Add(k, v)
+ }
+ }
+ if opts.UserName != "" || opts.Password != "" {
+ req.SetBasicAuth(opts.UserName, opts.Password)
+ }
+ var c *http.Client
+ if opts.NoRedirect {
+ c = ClientWithNoRedirects(api.c)
+ } else {
+ c = ClientWithHeaderReset(api.c, headers)
+ }
+ if api.signer != nil {
+ err = api.signer(req)
+ if err != nil {
+ return nil, errors.Wrap(err, "signer failed")
+ }
+ }
+ api.mu.RUnlock()
+ resp, err = c.Do(req)
+ api.mu.RLock()
+ if err != nil {
+ return nil, err
+ }
+ if !opts.IgnoreStatus {
+ if resp.StatusCode < 200 || resp.StatusCode > 299 {
+ err = api.errorHandler(resp)
+ if err.Error() == "" {
+ // replace empty errors with something
+ err = errors.Errorf("http error %d: %v", resp.StatusCode, resp.Status)
+ }
+ return resp, err
+ }
+ }
+ if opts.NoResponse {
+ return resp, resp.Body.Close()
+ }
+ return resp, nil
+}
+
+// MultipartUpload creates an io.Reader which produces an encoded a
+// multipart form upload from the params passed in and the passed in
+//
+// in - the body of the file
+// params - the form parameters
+// fileName - is the name of the attached file
+// contentName - the name of the parameter for the file
+//
+// NB This doesn't allow setting the content type of the attachment
+func MultipartUpload(in io.Reader, params url.Values, contentName, fileName string) (io.ReadCloser, string, error) {
+ bodyReader, bodyWriter := io.Pipe()
+ writer := multipart.NewWriter(bodyWriter)
+ contentType := writer.FormDataContentType()
+
+ // Pump the data in the background
+ go func() {
+ var err error
+
+ for key, vals := range params {
+ for _, val := range vals {
+ err = writer.WriteField(key, val)
+ if err != nil {
+ _ = bodyWriter.CloseWithError(errors.Wrap(err, "create metadata part"))
+ return
+ }
+ }
+ }
+
+ part, err := writer.CreateFormFile(contentName, fileName)
+ if err != nil {
+ _ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to create form file"))
+ return
+ }
+
+ _, err = io.Copy(part, in)
+ if err != nil {
+ _ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to copy data"))
+ return
+ }
+
+ err = writer.Close()
+ if err != nil {
+ _ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to close form"))
+ return
+ }
+
+ _ = bodyWriter.Close()
+ }()
+
+ return bodyReader, contentType, nil
+}
+
+// CallJSON runs Call and decodes the body as a JSON object into response (if not nil)
+//
+// If request is not nil then it will be JSON encoded as the body of the request
+//
+// If (opts.MultipartParams or opts.MultipartContentName) and
+// opts.Body are set then CallJSON will do a multipart upload with a
+// file attached. opts.MultipartContentName is the name of the
+// parameter and opts.MultipartFileName is the name of the file. If
+// MultpartContentName is set, and request != nil is supplied, then
+// the request will be marshalled into JSON and added to the form with
+// parameter name MultipartMetadataName.
+//
+// It will return resp if at all possible, even if err is set
+func (api *Client) CallJSON(opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
+ return api.callCodec(opts, request, response, json.Marshal, DecodeJSON, "application/json")
+}
+
+// CallXML runs Call and decodes the body as a XML object into response (if not nil)
+//
+// If request is not nil then it will be XML encoded as the body of the request
+//
+// See CallJSON for a description of MultipartParams and related opts
+//
+// It will return resp if at all possible, even if err is set
+func (api *Client) CallXML(opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
+ return api.callCodec(opts, request, response, xml.Marshal, DecodeXML, "application/xml")
+}
+
+type marshalFn func(v interface{}) ([]byte, error)
+type decodeFn func(resp *http.Response, result interface{}) (err error)
+
+func (api *Client) callCodec(opts *Opts, request interface{}, response interface{}, marshal marshalFn, decode decodeFn, contentType string) (resp *http.Response, err error) {
+ var requestBody []byte
+ // Marshal the request if given
+ if request != nil {
+ requestBody, err = marshal(request)
+ if err != nil {
+ return nil, err
+ }
+ // Set the body up as a marshalled object if no body passed in
+ if opts.Body == nil {
+ opts = opts.Copy()
+ opts.ContentType = contentType
+ opts.Body = bytes.NewBuffer(requestBody)
+ }
+ }
+ isMultipart := (opts.MultipartParams != nil || opts.MultipartMetadataName != "") && opts.Body != nil
+ if isMultipart {
+ params := opts.MultipartParams
+ if params == nil {
+ params = url.Values{}
+ }
+ if opts.MultipartMetadataName != "" {
+ params.Add(opts.MultipartMetadataName, string(requestBody))
+ }
+ opts = opts.Copy()
+ opts.Body, opts.ContentType, err = MultipartUpload(opts.Body, params, opts.MultipartContentName, opts.MultipartFileName)
+ if err != nil {
+ return nil, err
+ }
+ }
+ resp, err = api.Call(opts)
+ if err != nil {
+ return resp, err
+ }
+ if response == nil || opts.NoResponse {
+ return resp, nil
+ }
+ err = decode(resp, response)
+ return resp, err
+}
diff --git a/vendor/github.com/ncw/rclone/lib/rest/url.go b/vendor/github.com/ncw/rclone/lib/rest/url.go
new file mode 100755
index 0000000..07ce159
--- /dev/null
+++ b/vendor/github.com/ncw/rclone/lib/rest/url.go
@@ -0,0 +1,27 @@
+package rest
+
+import (
+ "net/url"
+
+ "github.com/pkg/errors"
+)
+
+// URLJoin joins a URL and a path returning a new URL
+//
+// path should be URL escaped
+func URLJoin(base *url.URL, path string) (*url.URL, error) {
+ rel, err := url.Parse(path)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error parsing %q as URL", path)
+ }
+ return base.ResolveReference(rel), nil
+}
+
+// URLPathEscape escapes URL path the in string using URL escaping rules
+//
+// This mimics url.PathEscape which only available from go 1.8
+func URLPathEscape(in string) string {
+ var u url.URL
+ u.Path = in
+ return u.String()
+}
diff --git a/vendor/github.com/ncw/swift/COPYING b/vendor/github.com/ncw/swift/COPYING
new file mode 100755
index 0000000..8c27c67
--- /dev/null
+++ b/vendor/github.com/ncw/swift/COPYING
@@ -0,0 +1,20 @@
+Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
diff --git a/vendor/github.com/ncw/swift/README.md b/vendor/github.com/ncw/swift/README.md
new file mode 100755
index 0000000..57fc459
--- /dev/null
+++ b/vendor/github.com/ncw/swift/README.md
@@ -0,0 +1,158 @@
+Swift
+=====
+
+This package provides an easy to use library for interfacing with
+Swift / Openstack Object Storage / Rackspace cloud files from the Go
+Language
+
+See here for package docs
+
+ http://godoc.org/github.com/ncw/swift
+
+[](https://travis-ci.org/ncw/swift) [](https://godoc.org/github.com/ncw/swift)
+
+Install
+-------
+
+Use go to install the library
+
+ go get github.com/ncw/swift
+
+Usage
+-----
+
+See here for full package docs
+
+- http://godoc.org/github.com/ncw/swift
+
+Here is a short example from the docs
+```go
+import "github.com/ncw/swift"
+
+// Create a connection
+c := swift.Connection{
+ UserName: "user",
+ ApiKey: "key",
+ AuthUrl: "auth_url",
+ Domain: "domain", // Name of the domain (v3 auth only)
+ Tenant: "tenant", // Name of the tenant (v2 auth only)
+}
+// Authenticate
+err := c.Authenticate()
+if err != nil {
+ panic(err)
+}
+// List all the containers
+containers, err := c.ContainerNames(nil)
+fmt.Println(containers)
+// etc...
+```
+
+Additions
+---------
+
+The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface.
+
+Testing
+-------
+
+To run the tests you can either use an embedded fake Swift server
+either use a real Openstack Swift server or a Rackspace Cloud files account.
+
+When using a real Swift server, you need to set these environment variables
+before running the tests
+
+ export SWIFT_API_USER='user'
+ export SWIFT_API_KEY='key'
+ export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0'
+
+And optionally these if using v2 authentication
+
+ export SWIFT_TENANT='TenantName'
+ export SWIFT_TENANT_ID='TenantId'
+
+And optionally these if using v3 authentication
+
+ export SWIFT_TENANT='TenantName'
+ export SWIFT_TENANT_ID='TenantId'
+ export SWIFT_API_DOMAIN_ID='domain id'
+ export SWIFT_API_DOMAIN='domain name'
+
+And optionally these if using v3 trust
+
+ export SWIFT_TRUST_ID='TrustId'
+
+And optionally this if you want to skip server certificate validation
+
+ export SWIFT_AUTH_INSECURE=1
+
+And optionally this to configure the connect channel timeout, in seconds
+
+ export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60
+
+And optionally this to configure the data channel timeout, in seconds
+
+ export SWIFT_DATA_CHANNEL_TIMEOUT=60
+
+Then run the tests with `go test`
+
+License
+-------
+
+This is free software under the terms of MIT license (check COPYING file
+included in this package).
+
+Contact and support
+-------------------
+
+The project website is at:
+
+- https://github.com/ncw/swift
+
+There you can file bug reports, ask for help or contribute patches.
+
+Authors
+-------
+
+- Nick Craig-Wood
+
+Contributors
+------------
+
+- Brian "bojo" Jones
+- Janika Liiv
+- Yamamoto, Hirotaka
+- Stephen
+- platformpurple
+- Paul Querna
+- Livio Soares
+- thesyncim
+- lsowen
+- Sylvain Baubeau
+- Chris Kastorff
+- Dai HaoJun
+- Hua Wang
+- Fabian Ruff
+- Arturo Reuschenbach Puncernau
+- Petr Kotek
+- Stefan Majewsky
+- Cezar Sa Espinola
+- Sam Gunaratne
+- Richard Scothern
+- Michel Couillard
+- Christopher Waldon
+- dennis
+- hag
+- Alexander Neumann
+- eclipseo <30413512+eclipseo@users.noreply.github.com>
+- Yuri Per
+- Falk Reimann
+- Arthur Paim Arnold
+- Bruno Michel
+- Charles Hsu
+- Omar Ali
+- Andreas Andersen
+- kayrus
+- CodeLingo Bot
+- Jérémy Clerc
+- 4xicom <37339705+4xicom@users.noreply.github.com>
diff --git a/vendor/github.com/ncw/swift/auth.go b/vendor/github.com/ncw/swift/auth.go
new file mode 100755
index 0000000..25654f4
--- /dev/null
+++ b/vendor/github.com/ncw/swift/auth.go
@@ -0,0 +1,335 @@
+package swift
+
+import (
+ "bytes"
+ "encoding/json"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// Auth defines the operations needed to authenticate with swift
+//
+// This encapsulates the different authentication schemes in use
+type Authenticator interface {
+ // Request creates an http.Request for the auth - return nil if not needed
+ Request(*Connection) (*http.Request, error)
+ // Response parses the http.Response
+ Response(resp *http.Response) error
+ // The public storage URL - set Internal to true to read
+ // internal/service net URL
+ StorageUrl(Internal bool) string
+ // The access token
+ Token() string
+ // The CDN url if available
+ CdnUrl() string
+}
+
+// Expireser is an optional interface to read the expiration time of the token
+type Expireser interface {
+ Expires() time.Time
+}
+
+type CustomEndpointAuthenticator interface {
+ StorageUrlForEndpoint(endpointType EndpointType) string
+}
+
+type EndpointType string
+
+const (
+ // Use public URL as storage URL
+ EndpointTypePublic = EndpointType("public")
+
+ // Use internal URL as storage URL
+ EndpointTypeInternal = EndpointType("internal")
+
+ // Use admin URL as storage URL
+ EndpointTypeAdmin = EndpointType("admin")
+)
+
+// newAuth - create a new Authenticator from the AuthUrl
+//
+// A hint for AuthVersion can be provided
+func newAuth(c *Connection) (Authenticator, error) {
+ AuthVersion := c.AuthVersion
+ if AuthVersion == 0 {
+ if strings.Contains(c.AuthUrl, "v3") {
+ AuthVersion = 3
+ } else if strings.Contains(c.AuthUrl, "v2") {
+ AuthVersion = 2
+ } else if strings.Contains(c.AuthUrl, "v1") {
+ AuthVersion = 1
+ } else {
+ return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly")
+ }
+ }
+ switch AuthVersion {
+ case 1:
+ return &v1Auth{}, nil
+ case 2:
+ return &v2Auth{
+ // Guess as to whether using API key or
+ // password it will try both eventually so
+ // this is just an optimization.
+ useApiKey: len(c.ApiKey) >= 32,
+ }, nil
+ case 3:
+ return &v3Auth{}, nil
+ }
+ return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion)
+}
+
+// ------------------------------------------------------------
+
+// v1 auth
+type v1Auth struct {
+ Headers http.Header // V1 auth: the authentication headers so extensions can access them
+}
+
+// v1 Authentication - make request
+func (auth *v1Auth) Request(c *Connection) (*http.Request, error) {
+ req, err := http.NewRequest("GET", c.AuthUrl, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("User-Agent", c.UserAgent)
+ req.Header.Set("X-Auth-Key", c.ApiKey)
+ req.Header.Set("X-Auth-User", c.UserName)
+ return req, nil
+}
+
+// v1 Authentication - read response
+func (auth *v1Auth) Response(resp *http.Response) error {
+ auth.Headers = resp.Header
+ return nil
+}
+
+// v1 Authentication - read storage url
+func (auth *v1Auth) StorageUrl(Internal bool) string {
+ storageUrl := auth.Headers.Get("X-Storage-Url")
+ if Internal {
+ newUrl, err := url.Parse(storageUrl)
+ if err != nil {
+ return storageUrl
+ }
+ newUrl.Host = "snet-" + newUrl.Host
+ storageUrl = newUrl.String()
+ }
+ return storageUrl
+}
+
+// v1 Authentication - read auth token
+func (auth *v1Auth) Token() string {
+ return auth.Headers.Get("X-Auth-Token")
+}
+
+// v1 Authentication - read cdn url
+func (auth *v1Auth) CdnUrl() string {
+ return auth.Headers.Get("X-CDN-Management-Url")
+}
+
+// ------------------------------------------------------------
+
+// v2 Authentication
+type v2Auth struct {
+ Auth *v2AuthResponse
+ Region string
+ useApiKey bool // if set will use API key not Password
+ useApiKeyOk bool // if set won't change useApiKey any more
+ notFirst bool // set after first run
+}
+
+// v2 Authentication - make request
+func (auth *v2Auth) Request(c *Connection) (*http.Request, error) {
+ auth.Region = c.Region
+ // Toggle useApiKey if not first run and not OK yet
+ if auth.notFirst && !auth.useApiKeyOk {
+ auth.useApiKey = !auth.useApiKey
+ }
+ auth.notFirst = true
+ // Create a V2 auth request for the body of the connection
+ var v2i interface{}
+ if !auth.useApiKey {
+ // Normal swift authentication
+ v2 := v2AuthRequest{}
+ v2.Auth.PasswordCredentials.UserName = c.UserName
+ v2.Auth.PasswordCredentials.Password = c.ApiKey
+ v2.Auth.Tenant = c.Tenant
+ v2.Auth.TenantId = c.TenantId
+ v2i = v2
+ } else {
+ // Rackspace special with API Key
+ v2 := v2AuthRequestRackspace{}
+ v2.Auth.ApiKeyCredentials.UserName = c.UserName
+ v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey
+ v2.Auth.Tenant = c.Tenant
+ v2.Auth.TenantId = c.TenantId
+ v2i = v2
+ }
+ body, err := json.Marshal(v2i)
+ if err != nil {
+ return nil, err
+ }
+ url := c.AuthUrl
+ if !strings.HasSuffix(url, "/") {
+ url += "/"
+ }
+ url += "tokens"
+ req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("User-Agent", c.UserAgent)
+ return req, nil
+}
+
+// v2 Authentication - read response
+func (auth *v2Auth) Response(resp *http.Response) error {
+ auth.Auth = new(v2AuthResponse)
+ err := readJson(resp, auth.Auth)
+ // If successfully read Auth then no need to toggle useApiKey any more
+ if err == nil {
+ auth.useApiKeyOk = true
+ }
+ return err
+}
+
+// Finds the Endpoint Url of "type" from the v2AuthResponse using the
+// Region if set or defaulting to the first one if not
+//
+// Returns "" if not found
+func (auth *v2Auth) endpointUrl(Type string, endpointType EndpointType) string {
+ for _, catalog := range auth.Auth.Access.ServiceCatalog {
+ if catalog.Type == Type {
+ for _, endpoint := range catalog.Endpoints {
+ if auth.Region == "" || (auth.Region == endpoint.Region) {
+ switch endpointType {
+ case EndpointTypeInternal:
+ return endpoint.InternalUrl
+ case EndpointTypePublic:
+ return endpoint.PublicUrl
+ case EndpointTypeAdmin:
+ return endpoint.AdminUrl
+ default:
+ return ""
+ }
+ }
+ }
+ }
+ }
+ return ""
+}
+
+// v2 Authentication - read storage url
+//
+// If Internal is true then it reads the private (internal / service
+// net) URL.
+func (auth *v2Auth) StorageUrl(Internal bool) string {
+ endpointType := EndpointTypePublic
+ if Internal {
+ endpointType = EndpointTypeInternal
+ }
+ return auth.StorageUrlForEndpoint(endpointType)
+}
+
+// v2 Authentication - read storage url
+//
+// Use the indicated endpointType to choose a URL.
+func (auth *v2Auth) StorageUrlForEndpoint(endpointType EndpointType) string {
+ return auth.endpointUrl("object-store", endpointType)
+}
+
+// v2 Authentication - read auth token
+func (auth *v2Auth) Token() string {
+ return auth.Auth.Access.Token.Id
+}
+
+// v2 Authentication - read expires
+func (auth *v2Auth) Expires() time.Time {
+ t, err := time.Parse(time.RFC3339, auth.Auth.Access.Token.Expires)
+ if err != nil {
+ return time.Time{} // return Zero if not parsed
+ }
+ return t
+}
+
+// v2 Authentication - read cdn url
+func (auth *v2Auth) CdnUrl() string {
+ return auth.endpointUrl("rax:object-cdn", EndpointTypePublic)
+}
+
+// ------------------------------------------------------------
+
+// V2 Authentication request
+//
+// http://docs.openstack.org/developer/keystone/api_curl_examples.html
+// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
+// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
+type v2AuthRequest struct {
+ Auth struct {
+ PasswordCredentials struct {
+ UserName string `json:"username"`
+ Password string `json:"password"`
+ } `json:"passwordCredentials"`
+ Tenant string `json:"tenantName,omitempty"`
+ TenantId string `json:"tenantId,omitempty"`
+ } `json:"auth"`
+}
+
+// V2 Authentication request - Rackspace variant
+//
+// http://docs.openstack.org/developer/keystone/api_curl_examples.html
+// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
+// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
+type v2AuthRequestRackspace struct {
+ Auth struct {
+ ApiKeyCredentials struct {
+ UserName string `json:"username"`
+ ApiKey string `json:"apiKey"`
+ } `json:"RAX-KSKEY:apiKeyCredentials"`
+ Tenant string `json:"tenantName,omitempty"`
+ TenantId string `json:"tenantId,omitempty"`
+ } `json:"auth"`
+}
+
+// V2 Authentication reply
+//
+// http://docs.openstack.org/developer/keystone/api_curl_examples.html
+// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
+// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
+type v2AuthResponse struct {
+ Access struct {
+ ServiceCatalog []struct {
+ Endpoints []struct {
+ InternalUrl string
+ PublicUrl string
+ AdminUrl string
+ Region string
+ TenantId string
+ }
+ Name string
+ Type string
+ }
+ Token struct {
+ Expires string
+ Id string
+ Tenant struct {
+ Id string
+ Name string
+ }
+ }
+ User struct {
+ DefaultRegion string `json:"RAX-AUTH:defaultRegion"`
+ Id string
+ Name string
+ Roles []struct {
+ Description string
+ Id string
+ Name string
+ TenantId string
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/ncw/swift/auth_v3.go b/vendor/github.com/ncw/swift/auth_v3.go
new file mode 100755
index 0000000..1e34ad8
--- /dev/null
+++ b/vendor/github.com/ncw/swift/auth_v3.go
@@ -0,0 +1,300 @@
+package swift
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+)
+
+const (
+ v3AuthMethodToken = "token"
+ v3AuthMethodPassword = "password"
+ v3AuthMethodApplicationCredential = "application_credential"
+ v3CatalogTypeObjectStore = "object-store"
+)
+
+// V3 Authentication request
+// http://docs.openstack.org/developer/keystone/api_curl_examples.html
+// http://developer.openstack.org/api-ref-identity-v3.html
+type v3AuthRequest struct {
+ Auth struct {
+ Identity struct {
+ Methods []string `json:"methods"`
+ Password *v3AuthPassword `json:"password,omitempty"`
+ Token *v3AuthToken `json:"token,omitempty"`
+ ApplicationCredential *v3AuthApplicationCredential `json:"application_credential,omitempty"`
+ } `json:"identity"`
+ Scope *v3Scope `json:"scope,omitempty"`
+ } `json:"auth"`
+}
+
+type v3Scope struct {
+ Project *v3Project `json:"project,omitempty"`
+ Domain *v3Domain `json:"domain,omitempty"`
+ Trust *v3Trust `json:"OS-TRUST:trust,omitempty"`
+}
+
+type v3Domain struct {
+ Id string `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+}
+
+type v3Project struct {
+ Name string `json:"name,omitempty"`
+ Id string `json:"id,omitempty"`
+ Domain *v3Domain `json:"domain,omitempty"`
+}
+
+type v3Trust struct {
+ Id string `json:"id"`
+}
+
+type v3User struct {
+ Domain *v3Domain `json:"domain,omitempty"`
+ Id string `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Password string `json:"password,omitempty"`
+}
+
+type v3AuthToken struct {
+ Id string `json:"id"`
+}
+
+type v3AuthPassword struct {
+ User v3User `json:"user"`
+}
+
+type v3AuthApplicationCredential struct {
+ Id string `json:"id,omitempty"`
+ Name string `json:"name,omitempty"`
+ Secret string `json:"secret,omitempty"`
+ User *v3User `json:"user,omitempty"`
+}
+
+// V3 Authentication response
+type v3AuthResponse struct {
+ Token struct {
+ ExpiresAt string `json:"expires_at"`
+ IssuedAt string `json:"issued_at"`
+ Methods []string
+ Roles []struct {
+ Id, Name string
+ Links struct {
+ Self string
+ }
+ }
+
+ Project struct {
+ Domain struct {
+ Id, Name string
+ }
+ Id, Name string
+ }
+
+ Catalog []struct {
+ Id, Namem, Type string
+ Endpoints []struct {
+ Id, Region_Id, Url, Region string
+ Interface EndpointType
+ }
+ }
+
+ User struct {
+ Id, Name string
+ Domain struct {
+ Id, Name string
+ Links struct {
+ Self string
+ }
+ }
+ }
+
+ Audit_Ids []string
+ }
+}
+
+type v3Auth struct {
+ Region string
+ Auth *v3AuthResponse
+ Headers http.Header
+}
+
+func (auth *v3Auth) Request(c *Connection) (*http.Request, error) {
+ auth.Region = c.Region
+
+ var v3i interface{}
+
+ v3 := v3AuthRequest{}
+
+ if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret != "" {
+ var user *v3User
+
+ if c.ApplicationCredentialId != "" {
+ c.ApplicationCredentialName = ""
+ user = &v3User{}
+ }
+
+ if user == nil && c.UserId != "" {
+ // UserID could be used without the domain information
+ user = &v3User{
+ Id: c.UserId,
+ }
+ }
+
+ if user == nil && c.UserName == "" {
+ // Make sure that Username or UserID are provided
+ return nil, fmt.Errorf("UserID or Name should be provided")
+ }
+
+ if user == nil && c.DomainId != "" {
+ user = &v3User{
+ Name: c.UserName,
+ Domain: &v3Domain{
+ Id: c.DomainId,
+ },
+ }
+ }
+
+ if user == nil && c.Domain != "" {
+ user = &v3User{
+ Name: c.UserName,
+ Domain: &v3Domain{
+ Name: c.Domain,
+ },
+ }
+ }
+
+ // Make sure that DomainID or DomainName are provided among Username
+ if user == nil {
+ return nil, fmt.Errorf("DomainID or Domain should be provided")
+ }
+
+ v3.Auth.Identity.Methods = []string{v3AuthMethodApplicationCredential}
+ v3.Auth.Identity.ApplicationCredential = &v3AuthApplicationCredential{
+ Id: c.ApplicationCredentialId,
+ Name: c.ApplicationCredentialName,
+ Secret: c.ApplicationCredentialSecret,
+ User: user,
+ }
+ } else if c.UserName == "" && c.UserId == "" {
+ v3.Auth.Identity.Methods = []string{v3AuthMethodToken}
+ v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey}
+ } else {
+ v3.Auth.Identity.Methods = []string{v3AuthMethodPassword}
+ v3.Auth.Identity.Password = &v3AuthPassword{
+ User: v3User{
+ Name: c.UserName,
+ Id: c.UserId,
+ Password: c.ApiKey,
+ },
+ }
+
+ var domain *v3Domain
+
+ if c.Domain != "" {
+ domain = &v3Domain{Name: c.Domain}
+ } else if c.DomainId != "" {
+ domain = &v3Domain{Id: c.DomainId}
+ }
+ v3.Auth.Identity.Password.User.Domain = domain
+ }
+
+ if v3.Auth.Identity.Methods[0] != v3AuthMethodApplicationCredential {
+ if c.TrustId != "" {
+ v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}}
+ } else if c.TenantId != "" || c.Tenant != "" {
+
+ v3.Auth.Scope = &v3Scope{Project: &v3Project{}}
+
+ if c.TenantId != "" {
+ v3.Auth.Scope.Project.Id = c.TenantId
+ } else if c.Tenant != "" {
+ v3.Auth.Scope.Project.Name = c.Tenant
+ switch {
+ case c.TenantDomain != "":
+ v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.TenantDomain}
+ case c.TenantDomainId != "":
+ v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.TenantDomainId}
+ case c.Domain != "":
+ v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.Domain}
+ case c.DomainId != "":
+ v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.DomainId}
+ default:
+ v3.Auth.Scope.Project.Domain = &v3Domain{Name: "Default"}
+ }
+ }
+ }
+ }
+
+ v3i = v3
+
+ body, err := json.Marshal(v3i)
+
+ if err != nil {
+ return nil, err
+ }
+
+ url := c.AuthUrl
+ if !strings.HasSuffix(url, "/") {
+ url += "/"
+ }
+ url += "auth/tokens"
+ req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("User-Agent", c.UserAgent)
+ return req, nil
+}
+
+func (auth *v3Auth) Response(resp *http.Response) error {
+ auth.Auth = &v3AuthResponse{}
+ auth.Headers = resp.Header
+ err := readJson(resp, auth.Auth)
+ return err
+}
+
+func (auth *v3Auth) endpointUrl(Type string, endpointType EndpointType) string {
+ for _, catalog := range auth.Auth.Token.Catalog {
+ if catalog.Type == Type {
+ for _, endpoint := range catalog.Endpoints {
+ if endpoint.Interface == endpointType && (auth.Region == "" || (auth.Region == endpoint.Region)) {
+ return endpoint.Url
+ }
+ }
+ }
+ }
+ return ""
+}
+
+func (auth *v3Auth) StorageUrl(Internal bool) string {
+ endpointType := EndpointTypePublic
+ if Internal {
+ endpointType = EndpointTypeInternal
+ }
+ return auth.StorageUrlForEndpoint(endpointType)
+}
+
+func (auth *v3Auth) StorageUrlForEndpoint(endpointType EndpointType) string {
+ return auth.endpointUrl("object-store", endpointType)
+}
+
+func (auth *v3Auth) Token() string {
+ return auth.Headers.Get("X-Subject-Token")
+}
+
+func (auth *v3Auth) Expires() time.Time {
+ t, err := time.Parse(time.RFC3339, auth.Auth.Token.ExpiresAt)
+ if err != nil {
+ return time.Time{} // return Zero if not parsed
+ }
+ return t
+}
+
+func (auth *v3Auth) CdnUrl() string {
+ return ""
+}
diff --git a/vendor/github.com/ncw/swift/compatibility_1_0.go b/vendor/github.com/ncw/swift/compatibility_1_0.go
new file mode 100755
index 0000000..7b69a75
--- /dev/null
+++ b/vendor/github.com/ncw/swift/compatibility_1_0.go
@@ -0,0 +1,28 @@
+// Go 1.0 compatibility functions
+
+// +build !go1.1
+
+package swift
+
+import (
+ "log"
+ "net/http"
+ "time"
+)
+
+// Cancel the request - doesn't work under < go 1.1
+func cancelRequest(transport http.RoundTripper, req *http.Request) {
+ log.Printf("Tried to cancel a request but couldn't - recompile with go 1.1")
+}
+
+// Reset a timer - Doesn't work properly < go 1.1
+//
+// This is quite hard to do properly under go < 1.1 so we do a crude
+// approximation and hope that everyone upgrades to go 1.1 quickly
+func resetTimer(t *time.Timer, d time.Duration) {
+ t.Stop()
+ // Very likely this doesn't actually work if we are already
+ // selecting on t.C. However we've stopped the original timer
+ // so won't break transfers but may not time them out :-(
+ *t = *time.NewTimer(d)
+}
diff --git a/vendor/github.com/ncw/swift/compatibility_1_1.go b/vendor/github.com/ncw/swift/compatibility_1_1.go
new file mode 100755
index 0000000..a4f9c3a
--- /dev/null
+++ b/vendor/github.com/ncw/swift/compatibility_1_1.go
@@ -0,0 +1,24 @@
+// Go 1.1 and later compatibility functions
+//
+// +build go1.1
+
+package swift
+
+import (
+ "net/http"
+ "time"
+)
+
+// Cancel the request
+func cancelRequest(transport http.RoundTripper, req *http.Request) {
+ if tr, ok := transport.(interface {
+ CancelRequest(*http.Request)
+ }); ok {
+ tr.CancelRequest(req)
+ }
+}
+
+// Reset a timer
+func resetTimer(t *time.Timer, d time.Duration) {
+ t.Reset(d)
+}
diff --git a/vendor/github.com/ncw/swift/compatibility_1_6.go b/vendor/github.com/ncw/swift/compatibility_1_6.go
new file mode 100755
index 0000000..b443d01
--- /dev/null
+++ b/vendor/github.com/ncw/swift/compatibility_1_6.go
@@ -0,0 +1,23 @@
+// +build go1.6
+
+package swift
+
+import (
+ "net/http"
+ "time"
+)
+
+const IS_AT_LEAST_GO_16 = true
+
+func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {
+ tr.ExpectContinueTimeout = t
+}
+
+func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {
+ if req.Body != nil {
+ req.Header.Add("Expect", "100-continue")
+ }
+ if !hasContentLength {
+ req.TransferEncoding = []string{"chunked"}
+ }
+}
diff --git a/vendor/github.com/ncw/swift/compatibility_not_1_6.go b/vendor/github.com/ncw/swift/compatibility_not_1_6.go
new file mode 100755
index 0000000..aabb44e
--- /dev/null
+++ b/vendor/github.com/ncw/swift/compatibility_not_1_6.go
@@ -0,0 +1,13 @@
+// +build !go1.6
+
+package swift
+
+import (
+ "net/http"
+ "time"
+)
+
+const IS_AT_LEAST_GO_16 = false
+
+func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {}
+func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {}
diff --git a/vendor/github.com/ncw/swift/dlo.go b/vendor/github.com/ncw/swift/dlo.go
new file mode 100755
index 0000000..05a1927
--- /dev/null
+++ b/vendor/github.com/ncw/swift/dlo.go
@@ -0,0 +1,149 @@
+package swift
+
+import (
+ "os"
+ "strings"
+)
+
+// DynamicLargeObjectCreateFile represents an open static large object
+type DynamicLargeObjectCreateFile struct {
+ largeObjectCreateFile
+}
+
+// DynamicLargeObjectCreateFile creates a dynamic large object
+// returning an object which satisfies io.Writer, io.Seeker, io.Closer
+// and io.ReaderFrom. The flags are as passes to the
+// largeObjectCreate method.
+func (c *Connection) DynamicLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
+ lo, err := c.largeObjectCreate(opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return withBuffer(opts, &DynamicLargeObjectCreateFile{
+ largeObjectCreateFile: *lo,
+ }), nil
+}
+
+// DynamicLargeObjectCreate creates or truncates an existing dynamic
+// large object returning a writeable object. This sets opts.Flags to
+// an appropriate value before calling DynamicLargeObjectCreateFile
+func (c *Connection) DynamicLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
+ opts.Flags = os.O_TRUNC | os.O_CREATE
+ return c.DynamicLargeObjectCreateFile(opts)
+}
+
+// DynamicLargeObjectDelete deletes a dynamic large object and all of its segments.
+func (c *Connection) DynamicLargeObjectDelete(container string, path string) error {
+ return c.LargeObjectDelete(container, path)
+}
+
+// DynamicLargeObjectMove moves a dynamic large object from srcContainer, srcObjectName to dstContainer, dstObjectName
+func (c *Connection) DynamicLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
+ info, headers, err := c.Object(srcContainer, srcObjectName)
+ if err != nil {
+ return err
+ }
+
+ segmentContainer, segmentPath := parseFullPath(headers["X-Object-Manifest"])
+ if err := c.createDLOManifest(dstContainer, dstObjectName, segmentContainer+"/"+segmentPath, info.ContentType, sanitizeLargeObjectMoveHeaders(headers)); err != nil {
+ return err
+ }
+
+ if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func sanitizeLargeObjectMoveHeaders(headers Headers) Headers {
+ sanitizedHeaders := make(map[string]string, len(headers))
+ for k, v := range headers {
+ if strings.HasPrefix(k, "X-") { //Some of the fields does not effect the request e,g, X-Timestamp, X-Trans-Id, X-Openstack-Request-Id. Open stack will generate new ones anyway.
+ sanitizedHeaders[k] = v
+ }
+ }
+ return sanitizedHeaders
+}
+
+// createDLOManifest creates a dynamic large object manifest
+func (c *Connection) createDLOManifest(container string, objectName string, prefix string, contentType string, headers Headers) error {
+ if headers == nil {
+ headers = make(Headers)
+ }
+ headers["X-Object-Manifest"] = prefix
+ manifest, err := c.ObjectCreate(container, objectName, false, "", contentType, headers)
+ if err != nil {
+ return err
+ }
+
+ if err := manifest.Close(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Close satisfies the io.Closer interface
+func (file *DynamicLargeObjectCreateFile) Close() error {
+ return file.Flush()
+}
+
+func (file *DynamicLargeObjectCreateFile) Flush() error {
+ err := file.conn.createDLOManifest(file.container, file.objectName, file.segmentContainer+"/"+file.prefix, file.contentType, file.headers)
+ if err != nil {
+ return err
+ }
+ return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size())
+}
+
+func (c *Connection) getAllDLOSegments(segmentContainer, segmentPath string) ([]Object, error) {
+ //a simple container listing works 99.9% of the time
+ segments, err := c.ObjectsAll(segmentContainer, &ObjectsOpts{Prefix: segmentPath})
+ if err != nil {
+ return nil, err
+ }
+
+ hasObjectName := make(map[string]struct{})
+ for _, segment := range segments {
+ hasObjectName[segment.Name] = struct{}{}
+ }
+
+ //The container listing might be outdated (i.e. not contain all existing
+ //segment objects yet) because of temporary inconsistency (Swift is only
+ //eventually consistent!). Check its completeness.
+ segmentNumber := 0
+ for {
+ segmentNumber++
+ segmentName := getSegment(segmentPath, segmentNumber)
+ if _, seen := hasObjectName[segmentName]; seen {
+ continue
+ }
+
+ //This segment is missing in the container listing. Use a more reliable
+ //request to check its existence. (HEAD requests on segments are
+ //guaranteed to return the correct metadata, except for the pathological
+ //case of an outage of large parts of the Swift cluster or its network,
+ //since every segment is only written once.)
+ segment, _, err := c.Object(segmentContainer, segmentName)
+ switch err {
+ case nil:
+ //found new segment -> add it in the correct position and keep
+ //going, more might be missing
+ if segmentNumber <= len(segments) {
+ segments = append(segments[:segmentNumber], segments[segmentNumber-1:]...)
+ segments[segmentNumber-1] = segment
+ } else {
+ segments = append(segments, segment)
+ }
+ continue
+ case ObjectNotFound:
+ //This segment is missing. Since we upload segments sequentially,
+ //there won't be any more segments after it.
+ return segments, nil
+ default:
+ return nil, err //unexpected error
+ }
+ }
+}
diff --git a/vendor/github.com/ncw/swift/doc.go b/vendor/github.com/ncw/swift/doc.go
new file mode 100755
index 0000000..44efde7
--- /dev/null
+++ b/vendor/github.com/ncw/swift/doc.go
@@ -0,0 +1,19 @@
+/*
+Package swift provides an easy to use interface to Swift / Openstack Object Storage / Rackspace Cloud Files
+
+Standard Usage
+
+Most of the work is done through the Container*() and Object*() methods.
+
+All methods are safe to use concurrently in multiple go routines.
+
+Object Versioning
+
+As defined by http://docs.openstack.org/api/openstack-object-storage/1.0/content/Object_Versioning-e1e3230.html#d6e983 one can create a container which allows for version control of files. The suggested method is to create a version container for holding all non-current files, and a current container for holding the latest version that the file points to. The container and objects inside it can be used in the standard manner, however, pushing a file multiple times will result in it being copied to the version container and the new file put in it's place. If the current file is deleted, the previous file in the version container will replace it. This means that if a file is updated 5 times, it must be deleted 5 times to be completely removed from the system.
+
+Rackspace Sub Module
+
+This module specifically allows the enabling/disabling of Rackspace Cloud File CDN management on a container. This is specific to the Rackspace API and not Swift/Openstack, therefore it has been placed in a submodule. One can easily create a RsConnection and use it like the standard Connection to access and manipulate containers and objects.
+
+*/
+package swift
diff --git a/vendor/github.com/ncw/swift/go.mod b/vendor/github.com/ncw/swift/go.mod
new file mode 100755
index 0000000..29f6ee2
--- /dev/null
+++ b/vendor/github.com/ncw/swift/go.mod
@@ -0,0 +1 @@
+module github.com/ncw/swift
diff --git a/vendor/github.com/ncw/swift/largeobjects.go b/vendor/github.com/ncw/swift/largeobjects.go
new file mode 100755
index 0000000..bec640b
--- /dev/null
+++ b/vendor/github.com/ncw/swift/largeobjects.go
@@ -0,0 +1,448 @@
+package swift
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ gopath "path"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// NotLargeObject is returned if an operation is performed on an object which isn't large.
+var NotLargeObject = errors.New("Not a large object")
+
+// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded
+var readAfterWriteTimeout = 15 * time.Second
+
+// readAfterWriteWait defines the time to sleep between two retries
+var readAfterWriteWait = 200 * time.Millisecond
+
+// largeObjectCreateFile represents an open static or dynamic large object
+type largeObjectCreateFile struct {
+ conn *Connection
+ container string
+ objectName string
+ currentLength int64
+ filePos int64
+ chunkSize int64
+ segmentContainer string
+ prefix string
+ contentType string
+ checkHash bool
+ segments []Object
+ headers Headers
+ minChunkSize int64
+}
+
+func swiftSegmentPath(path string) (string, error) {
+ checksum := sha1.New()
+ random := make([]byte, 32)
+ if _, err := rand.Read(random); err != nil {
+ return "", err
+ }
+ path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...)))
+ return strings.TrimLeft(strings.TrimRight("segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil
+}
+
+func getSegment(segmentPath string, partNumber int) string {
+ return fmt.Sprintf("%s/%016d", segmentPath, partNumber)
+}
+
+func parseFullPath(manifest string) (container string, prefix string) {
+ components := strings.SplitN(manifest, "/", 2)
+ container = components[0]
+ if len(components) > 1 {
+ prefix = components[1]
+ }
+ return container, prefix
+}
+
+func (headers Headers) IsLargeObjectDLO() bool {
+ _, isDLO := headers["X-Object-Manifest"]
+ return isDLO
+}
+
+func (headers Headers) IsLargeObjectSLO() bool {
+ _, isSLO := headers["X-Static-Large-Object"]
+ return isSLO
+}
+
+func (headers Headers) IsLargeObject() bool {
+ return headers.IsLargeObjectSLO() || headers.IsLargeObjectDLO()
+}
+
+func (c *Connection) getAllSegments(container string, path string, headers Headers) (string, []Object, error) {
+ if manifest, isDLO := headers["X-Object-Manifest"]; isDLO {
+ segmentContainer, segmentPath := parseFullPath(manifest)
+ segments, err := c.getAllDLOSegments(segmentContainer, segmentPath)
+ return segmentContainer, segments, err
+ }
+ if headers.IsLargeObjectSLO() {
+ return c.getAllSLOSegments(container, path)
+ }
+ return "", nil, NotLargeObject
+}
+
+// LargeObjectOpts describes how a large object should be created
+type LargeObjectOpts struct {
+ Container string // Name of container to place object
+ ObjectName string // Name of object
+ Flags int // Creation flags
+ CheckHash bool // If set Check the hash
+ Hash string // If set use this hash to check
+ ContentType string // Content-Type of the object
+ Headers Headers // Additional headers to upload the object with
+ ChunkSize int64 // Size of chunks of the object, defaults to 10MB if not set
+ MinChunkSize int64 // Minimum chunk size, automatically set for SLO's based on info
+ SegmentContainer string // Name of the container to place segments
+ SegmentPrefix string // Prefix to use for the segments
+ NoBuffer bool // Prevents using a bufio.Writer to write segments
+}
+
+type LargeObjectFile interface {
+ io.Writer
+ io.Seeker
+ io.Closer
+ Size() int64
+ Flush() error
+}
+
+// largeObjectCreate creates a large object at opts.Container, opts.ObjectName.
+//
+// opts.Flags can have the following bits set
+// os.TRUNC - remove the contents of the large object if it exists
+// os.APPEND - write at the end of the large object
+func (c *Connection) largeObjectCreate(opts *LargeObjectOpts) (*largeObjectCreateFile, error) {
+ var (
+ segmentPath string
+ segmentContainer string
+ segments []Object
+ currentLength int64
+ err error
+ )
+
+ if opts.SegmentPrefix != "" {
+ segmentPath = opts.SegmentPrefix
+ } else if segmentPath, err = swiftSegmentPath(opts.ObjectName); err != nil {
+ return nil, err
+ }
+
+ if info, headers, err := c.Object(opts.Container, opts.ObjectName); err == nil {
+ if opts.Flags&os.O_TRUNC != 0 {
+ c.LargeObjectDelete(opts.Container, opts.ObjectName)
+ } else {
+ currentLength = info.Bytes
+ if headers.IsLargeObject() {
+ segmentContainer, segments, err = c.getAllSegments(opts.Container, opts.ObjectName, headers)
+ if err != nil {
+ return nil, err
+ }
+ if len(segments) > 0 {
+ segmentPath = gopath.Dir(segments[0].Name)
+ }
+ } else {
+ if err = c.ObjectMove(opts.Container, opts.ObjectName, opts.Container, getSegment(segmentPath, 1)); err != nil {
+ return nil, err
+ }
+ segments = append(segments, info)
+ }
+ }
+ } else if err != ObjectNotFound {
+ return nil, err
+ }
+
+ // segmentContainer is not empty when the manifest already existed
+ if segmentContainer == "" {
+ if opts.SegmentContainer != "" {
+ segmentContainer = opts.SegmentContainer
+ } else {
+ segmentContainer = opts.Container + "_segments"
+ }
+ }
+
+ file := &largeObjectCreateFile{
+ conn: c,
+ checkHash: opts.CheckHash,
+ container: opts.Container,
+ objectName: opts.ObjectName,
+ chunkSize: opts.ChunkSize,
+ minChunkSize: opts.MinChunkSize,
+ headers: opts.Headers,
+ segmentContainer: segmentContainer,
+ prefix: segmentPath,
+ segments: segments,
+ currentLength: currentLength,
+ }
+
+ if file.chunkSize == 0 {
+ file.chunkSize = 10 * 1024 * 1024
+ }
+
+ if file.minChunkSize > file.chunkSize {
+ file.chunkSize = file.minChunkSize
+ }
+
+ if opts.Flags&os.O_APPEND != 0 {
+ file.filePos = currentLength
+ }
+
+ return file, nil
+}
+
+// LargeObjectDelete deletes the large object named by container, path
+func (c *Connection) LargeObjectDelete(container string, objectName string) error {
+ _, headers, err := c.Object(container, objectName)
+ if err != nil {
+ return err
+ }
+
+ var objects [][]string
+ if headers.IsLargeObject() {
+ segmentContainer, segments, err := c.getAllSegments(container, objectName, headers)
+ if err != nil {
+ return err
+ }
+ for _, obj := range segments {
+ objects = append(objects, []string{segmentContainer, obj.Name})
+ }
+ }
+ objects = append(objects, []string{container, objectName})
+
+ info, err := c.cachedQueryInfo()
+ if err == nil && info.SupportsBulkDelete() && len(objects) > 0 {
+ filenames := make([]string, len(objects))
+ for i, obj := range objects {
+ filenames[i] = obj[0] + "/" + obj[1]
+ }
+ _, err = c.doBulkDelete(filenames)
+ // Don't fail on ObjectNotFound because eventual consistency
+ // makes this situation normal.
+ if err != nil && err != Forbidden && err != ObjectNotFound {
+ return err
+ }
+ } else {
+ for _, obj := range objects {
+ if err := c.ObjectDelete(obj[0], obj[1]); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// LargeObjectGetSegments returns all the segments that compose an object
+// If the object is a Dynamic Large Object (DLO), it just returns the objects
+// that have the prefix as indicated by the manifest.
+// If the object is a Static Large Object (SLO), it retrieves the JSON content
+// of the manifest and return all the segments of it.
+func (c *Connection) LargeObjectGetSegments(container string, path string) (string, []Object, error) {
+ _, headers, err := c.Object(container, path)
+ if err != nil {
+ return "", nil, err
+ }
+
+ return c.getAllSegments(container, path, headers)
+}
+
+// Seek sets the offset for the next write operation
+func (file *largeObjectCreateFile) Seek(offset int64, whence int) (int64, error) {
+ switch whence {
+ case 0:
+ file.filePos = offset
+ case 1:
+ file.filePos += offset
+ case 2:
+ file.filePos = file.currentLength + offset
+ default:
+ return -1, fmt.Errorf("invalid value for whence")
+ }
+ if file.filePos < 0 {
+ return -1, fmt.Errorf("negative offset")
+ }
+ return file.filePos, nil
+}
+
+func (file *largeObjectCreateFile) Size() int64 {
+ return file.currentLength
+}
+
+func withLORetry(expectedSize int64, fn func() (Headers, int64, error)) (err error) {
+ endTimer := time.NewTimer(readAfterWriteTimeout)
+ defer endTimer.Stop()
+ waitingTime := readAfterWriteWait
+ for {
+ var headers Headers
+ var sz int64
+ if headers, sz, err = fn(); err == nil {
+ if !headers.IsLargeObjectDLO() || (expectedSize == 0 && sz > 0) || expectedSize == sz {
+ return
+ }
+ } else {
+ return
+ }
+ waitTimer := time.NewTimer(waitingTime)
+ select {
+ case <-endTimer.C:
+ waitTimer.Stop()
+ err = fmt.Errorf("Timeout expired while waiting for object to have size == %d, got: %d", expectedSize, sz)
+ return
+ case <-waitTimer.C:
+ waitingTime *= 2
+ }
+ }
+}
+
+func (c *Connection) waitForSegmentsToShowUp(container, objectName string, expectedSize int64) (err error) {
+ err = withLORetry(expectedSize, func() (Headers, int64, error) {
+ var info Object
+ var headers Headers
+ info, headers, err = c.objectBase(container, objectName)
+ if err != nil {
+ return headers, 0, err
+ }
+ return headers, info.Bytes, nil
+ })
+ return
+}
+
+// Write satisfies the io.Writer interface
+func (file *largeObjectCreateFile) Write(buf []byte) (int, error) {
+ var sz int64
+ var relativeFilePos int
+ writeSegmentIdx := 0
+ for i, obj := range file.segments {
+ if file.filePos < sz+obj.Bytes || (i == len(file.segments)-1 && file.filePos < sz+file.minChunkSize) {
+ relativeFilePos = int(file.filePos - sz)
+ break
+ }
+ writeSegmentIdx++
+ sz += obj.Bytes
+ }
+ sizeToWrite := len(buf)
+ for offset := 0; offset < sizeToWrite; {
+ newSegment, n, err := file.writeSegment(buf[offset:], writeSegmentIdx, relativeFilePos)
+ if err != nil {
+ return 0, err
+ }
+ if writeSegmentIdx < len(file.segments) {
+ file.segments[writeSegmentIdx] = *newSegment
+ } else {
+ file.segments = append(file.segments, *newSegment)
+ }
+ offset += n
+ writeSegmentIdx++
+ relativeFilePos = 0
+ }
+ file.filePos += int64(sizeToWrite)
+ file.currentLength = 0
+ for _, obj := range file.segments {
+ file.currentLength += obj.Bytes
+ }
+ return sizeToWrite, nil
+}
+
+func (file *largeObjectCreateFile) writeSegment(buf []byte, writeSegmentIdx int, relativeFilePos int) (*Object, int, error) {
+ var (
+ readers []io.Reader
+ existingSegment *Object
+ segmentSize int
+ )
+ segmentName := getSegment(file.prefix, writeSegmentIdx+1)
+ sizeToRead := int(file.chunkSize)
+ if writeSegmentIdx < len(file.segments) {
+ existingSegment = &file.segments[writeSegmentIdx]
+ if writeSegmentIdx != len(file.segments)-1 {
+ sizeToRead = int(existingSegment.Bytes)
+ }
+ if relativeFilePos > 0 {
+ headers := make(Headers)
+ headers["Range"] = "bytes=0-" + strconv.FormatInt(int64(relativeFilePos-1), 10)
+ existingSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer existingSegmentReader.Close()
+ sizeToRead -= relativeFilePos
+ segmentSize += relativeFilePos
+ readers = []io.Reader{existingSegmentReader}
+ }
+ }
+ if sizeToRead > len(buf) {
+ sizeToRead = len(buf)
+ }
+ segmentSize += sizeToRead
+ readers = append(readers, bytes.NewReader(buf[:sizeToRead]))
+ if existingSegment != nil && segmentSize < int(existingSegment.Bytes) {
+ headers := make(Headers)
+ headers["Range"] = "bytes=" + strconv.FormatInt(int64(segmentSize), 10) + "-"
+ tailSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer tailSegmentReader.Close()
+ segmentSize = int(existingSegment.Bytes)
+ readers = append(readers, tailSegmentReader)
+ }
+ segmentReader := io.MultiReader(readers...)
+ headers, err := file.conn.ObjectPut(file.segmentContainer, segmentName, segmentReader, true, "", file.contentType, nil)
+ if err != nil {
+ return nil, 0, err
+ }
+ return &Object{Name: segmentName, Bytes: int64(segmentSize), Hash: headers["Etag"]}, sizeToRead, nil
+}
+
+func withBuffer(opts *LargeObjectOpts, lo LargeObjectFile) LargeObjectFile {
+ if !opts.NoBuffer {
+ return &bufferedLargeObjectFile{
+ LargeObjectFile: lo,
+ bw: bufio.NewWriterSize(lo, int(opts.ChunkSize)),
+ }
+ }
+ return lo
+}
+
+type bufferedLargeObjectFile struct {
+ LargeObjectFile
+ bw *bufio.Writer
+}
+
+func (blo *bufferedLargeObjectFile) Close() error {
+ err := blo.bw.Flush()
+ if err != nil {
+ return err
+ }
+ return blo.LargeObjectFile.Close()
+}
+
+func (blo *bufferedLargeObjectFile) Write(p []byte) (n int, err error) {
+ return blo.bw.Write(p)
+}
+
+func (blo *bufferedLargeObjectFile) Seek(offset int64, whence int) (int64, error) {
+ err := blo.bw.Flush()
+ if err != nil {
+ return 0, err
+ }
+ return blo.LargeObjectFile.Seek(offset, whence)
+}
+
+func (blo *bufferedLargeObjectFile) Size() int64 {
+ return blo.LargeObjectFile.Size() + int64(blo.bw.Buffered())
+}
+
+func (blo *bufferedLargeObjectFile) Flush() error {
+ err := blo.bw.Flush()
+ if err != nil {
+ return err
+ }
+ return blo.LargeObjectFile.Flush()
+}
diff --git a/vendor/github.com/ncw/swift/meta.go b/vendor/github.com/ncw/swift/meta.go
new file mode 100755
index 0000000..7e149e1
--- /dev/null
+++ b/vendor/github.com/ncw/swift/meta.go
@@ -0,0 +1,174 @@
+// Metadata manipulation in and out of Headers
+
+package swift
+
+import (
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Metadata stores account, container or object metadata.
+type Metadata map[string]string
+
+// Metadata gets the Metadata starting with the metaPrefix out of the Headers.
+//
+// The keys in the Metadata will be converted to lower case
+func (h Headers) Metadata(metaPrefix string) Metadata {
+ m := Metadata{}
+ metaPrefix = http.CanonicalHeaderKey(metaPrefix)
+ for key, value := range h {
+ if strings.HasPrefix(key, metaPrefix) {
+ metaKey := strings.ToLower(key[len(metaPrefix):])
+ m[metaKey] = value
+ }
+ }
+ return m
+}
+
+// AccountMetadata converts Headers from account to a Metadata.
+//
+// The keys in the Metadata will be converted to lower case.
+func (h Headers) AccountMetadata() Metadata {
+ return h.Metadata("X-Account-Meta-")
+}
+
+// ContainerMetadata converts Headers from container to a Metadata.
+//
+// The keys in the Metadata will be converted to lower case.
+func (h Headers) ContainerMetadata() Metadata {
+ return h.Metadata("X-Container-Meta-")
+}
+
+// ObjectMetadata converts Headers from object to a Metadata.
+//
+// The keys in the Metadata will be converted to lower case.
+func (h Headers) ObjectMetadata() Metadata {
+ return h.Metadata("X-Object-Meta-")
+}
+
+// Headers convert the Metadata starting with the metaPrefix into a
+// Headers.
+//
+// The keys in the Metadata will be converted from lower case to http
+// Canonical (see http.CanonicalHeaderKey).
+func (m Metadata) Headers(metaPrefix string) Headers {
+ h := Headers{}
+ for key, value := range m {
+ key = http.CanonicalHeaderKey(metaPrefix + key)
+ h[key] = value
+ }
+ return h
+}
+
+// AccountHeaders converts the Metadata for the account.
+func (m Metadata) AccountHeaders() Headers {
+ return m.Headers("X-Account-Meta-")
+}
+
+// ContainerHeaders converts the Metadata for the container.
+func (m Metadata) ContainerHeaders() Headers {
+ return m.Headers("X-Container-Meta-")
+}
+
+// ObjectHeaders converts the Metadata for the object.
+func (m Metadata) ObjectHeaders() Headers {
+ return m.Headers("X-Object-Meta-")
+}
+
+// Turns a number of ns into a floating point string in seconds
+//
+// Trims trailing zeros and guaranteed to be perfectly accurate
+func nsToFloatString(ns int64) string {
+ if ns < 0 {
+ return "-" + nsToFloatString(-ns)
+ }
+ result := fmt.Sprintf("%010d", ns)
+ split := len(result) - 9
+ result, decimals := result[:split], result[split:]
+ decimals = strings.TrimRight(decimals, "0")
+ if decimals != "" {
+ result += "."
+ result += decimals
+ }
+ return result
+}
+
+// Turns a floating point string in seconds into a ns integer
+//
+// Guaranteed to be perfectly accurate
+func floatStringToNs(s string) (int64, error) {
+ const zeros = "000000000"
+ if point := strings.IndexRune(s, '.'); point >= 0 {
+ tail := s[point+1:]
+ if fill := 9 - len(tail); fill < 0 {
+ tail = tail[:9]
+ } else {
+ tail += zeros[:fill]
+ }
+ s = s[:point] + tail
+ } else if len(s) > 0 { // Make sure empty string produces an error
+ s += zeros
+ }
+ return strconv.ParseInt(s, 10, 64)
+}
+
+// FloatStringToTime converts a floating point number string to a time.Time
+//
+// The string is floating point number of seconds since the epoch
+// (Unix time). The number should be in fixed point format (not
+// exponential), eg "1354040105.123456789" which represents the time
+// "2012-11-27T18:15:05.123456789Z"
+//
+// Some care is taken to preserve all the accuracy in the time.Time
+// (which wouldn't happen with a naive conversion through float64) so
+// a round trip conversion won't change the data.
+//
+// If an error is returned then time will be returned as the zero time.
+func FloatStringToTime(s string) (t time.Time, err error) {
+ ns, err := floatStringToNs(s)
+ if err != nil {
+ return
+ }
+ t = time.Unix(0, ns)
+ return
+}
+
+// TimeToFloatString converts a time.Time object to a floating point string
+//
+// The string is floating point number of seconds since the epoch
+// (Unix time). The number is in fixed point format (not
+// exponential), eg "1354040105.123456789" which represents the time
+// "2012-11-27T18:15:05.123456789Z". Trailing zeros will be dropped
+// from the output.
+//
+// Some care is taken to preserve all the accuracy in the time.Time
+// (which wouldn't happen with a naive conversion through float64) so
+// a round trip conversion won't change the data.
+func TimeToFloatString(t time.Time) string {
+ return nsToFloatString(t.UnixNano())
+}
+
+// GetModTime reads a modification time (mtime) from a Metadata object
+//
+// This is a defacto standard (used in the official python-swiftclient
+// amongst others) for storing the modification time (as read using
+// os.Stat) for an object. It is stored using the key 'mtime', which
+// for example when written to an object will be 'X-Object-Meta-Mtime'.
+//
+// If an error is returned then time will be returned as the zero time.
+func (m Metadata) GetModTime() (t time.Time, err error) {
+ return FloatStringToTime(m["mtime"])
+}
+
+// SetModTime writes an modification time (mtime) to a Metadata object
+//
+// This is a defacto standard (used in the official python-swiftclient
+// amongst others) for storing the modification time (as read using
+// os.Stat) for an object. It is stored using the key 'mtime', which
+// for example when written to an object will be 'X-Object-Meta-Mtime'.
+func (m Metadata) SetModTime(t time.Time) {
+ m["mtime"] = TimeToFloatString(t)
+}
diff --git a/vendor/github.com/ncw/swift/notes.txt b/vendor/github.com/ncw/swift/notes.txt
new file mode 100755
index 0000000..f738552
--- /dev/null
+++ b/vendor/github.com/ncw/swift/notes.txt
@@ -0,0 +1,55 @@
+Notes on Go Swift
+=================
+
+Make a builder style interface like the Google Go APIs? Advantages
+are that it is easy to add named methods to the service object to do
+specific things. Slightly less efficient. Not sure about how to
+return extra stuff though - in an object?
+
+Make a container struct so these could be methods on it?
+
+Make noResponse check for 204?
+
+Make storage public so it can be extended easily?
+
+Rename to go-swift to match user agent string?
+
+Reconnect on auth error - 401 when token expires isn't tested
+
+Make more api compatible with python cloudfiles?
+
+Retry operations on timeout / network errors?
+- also 408 error
+- GET requests only?
+
+Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock
+
+Add extra headers field to Connection (for via etc)
+
+Make errors use an error heirachy then can catch them with a type assertion
+
+ Error(...)
+ ObjectCorrupted{ Error }
+
+Make a Debug flag in connection for logging stuff
+
+Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc
+
+Object range
+
+Object create, update with X-Delete-At or X-Delete-After
+
+Large object support
+- check uploads are less than 5GB in normal mode?
+
+Access control CORS?
+
+Swift client retries and backs off for all types of errors
+
+Implement net error interface?
+
+type Error interface {
+ error
+ Timeout() bool // Is the error a timeout?
+ Temporary() bool // Is the error temporary?
+}
diff --git a/vendor/github.com/ncw/swift/slo.go b/vendor/github.com/ncw/swift/slo.go
new file mode 100755
index 0000000..6a10ddf
--- /dev/null
+++ b/vendor/github.com/ncw/swift/slo.go
@@ -0,0 +1,171 @@
+package swift
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+)
+
+// StaticLargeObjectCreateFile represents an open static large object
+type StaticLargeObjectCreateFile struct {
+ largeObjectCreateFile
+}
+
+var SLONotSupported = errors.New("SLO not supported")
+
+type swiftSegment struct {
+ Path string `json:"path,omitempty"`
+ Etag string `json:"etag,omitempty"`
+ Size int64 `json:"size_bytes,omitempty"`
+ // When uploading a manifest, the attributes must be named `path`, `etag` and `size_bytes`
+ // but when querying the JSON content of a manifest with the `multipart-manifest=get`
+ // parameter, Swift names those attributes `name`, `hash` and `bytes`.
+ // We use all the different attributes names in this structure to be able to use
+ // the same structure for both uploading and retrieving.
+ Name string `json:"name,omitempty"`
+ Hash string `json:"hash,omitempty"`
+ Bytes int64 `json:"bytes,omitempty"`
+ ContentType string `json:"content_type,omitempty"`
+ LastModified string `json:"last_modified,omitempty"`
+}
+
+// StaticLargeObjectCreateFile creates a static large object returning
+// an object which satisfies io.Writer, io.Seeker, io.Closer and
+// io.ReaderFrom. The flags are as passed to the largeObjectCreate
+// method.
+func (c *Connection) StaticLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
+ info, err := c.cachedQueryInfo()
+ if err != nil || !info.SupportsSLO() {
+ return nil, SLONotSupported
+ }
+ realMinChunkSize := info.SLOMinSegmentSize()
+ if realMinChunkSize > opts.MinChunkSize {
+ opts.MinChunkSize = realMinChunkSize
+ }
+ lo, err := c.largeObjectCreate(opts)
+ if err != nil {
+ return nil, err
+ }
+ return withBuffer(opts, &StaticLargeObjectCreateFile{
+ largeObjectCreateFile: *lo,
+ }), nil
+}
+
+// StaticLargeObjectCreate creates or truncates an existing static
+// large object returning a writeable object. This sets opts.Flags to
+// an appropriate value before calling StaticLargeObjectCreateFile
+func (c *Connection) StaticLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
+ opts.Flags = os.O_TRUNC | os.O_CREATE
+ return c.StaticLargeObjectCreateFile(opts)
+}
+
+// StaticLargeObjectDelete deletes a static large object and all of its segments.
+func (c *Connection) StaticLargeObjectDelete(container string, path string) error {
+ info, err := c.cachedQueryInfo()
+ if err != nil || !info.SupportsSLO() {
+ return SLONotSupported
+ }
+ return c.LargeObjectDelete(container, path)
+}
+
+// StaticLargeObjectMove moves a static large object from srcContainer, srcObjectName to dstContainer, dstObjectName
+func (c *Connection) StaticLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
+ swiftInfo, err := c.cachedQueryInfo()
+ if err != nil || !swiftInfo.SupportsSLO() {
+ return SLONotSupported
+ }
+ info, headers, err := c.Object(srcContainer, srcObjectName)
+ if err != nil {
+ return err
+ }
+
+ container, segments, err := c.getAllSegments(srcContainer, srcObjectName, headers)
+ if err != nil {
+ return err
+ }
+
+ //copy only metadata during move (other headers might not be safe for copying)
+ headers = headers.ObjectMetadata().ObjectHeaders()
+
+ if err := c.createSLOManifest(dstContainer, dstObjectName, info.ContentType, container, segments, headers); err != nil {
+ return err
+ }
+
+ if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// createSLOManifest creates a static large object manifest
+func (c *Connection) createSLOManifest(container string, path string, contentType string, segmentContainer string, segments []Object, h Headers) error {
+ sloSegments := make([]swiftSegment, len(segments))
+ for i, segment := range segments {
+ sloSegments[i].Path = fmt.Sprintf("%s/%s", segmentContainer, segment.Name)
+ sloSegments[i].Etag = segment.Hash
+ sloSegments[i].Size = segment.Bytes
+ }
+
+ content, err := json.Marshal(sloSegments)
+ if err != nil {
+ return err
+ }
+
+ values := url.Values{}
+ values.Set("multipart-manifest", "put")
+ if _, err := c.objectPut(container, path, bytes.NewBuffer(content), false, "", contentType, h, values); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (file *StaticLargeObjectCreateFile) Close() error {
+ return file.Flush()
+}
+
+func (file *StaticLargeObjectCreateFile) Flush() error {
+ if err := file.conn.createSLOManifest(file.container, file.objectName, file.contentType, file.segmentContainer, file.segments, file.headers); err != nil {
+ return err
+ }
+ return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size())
+}
+
+func (c *Connection) getAllSLOSegments(container, path string) (string, []Object, error) {
+ var (
+ segmentList []swiftSegment
+ segments []Object
+ segPath string
+ segmentContainer string
+ )
+
+ values := url.Values{}
+ values.Set("multipart-manifest", "get")
+
+ file, _, err := c.objectOpen(container, path, true, nil, values)
+ if err != nil {
+ return "", nil, err
+ }
+
+ content, err := ioutil.ReadAll(file)
+ if err != nil {
+ return "", nil, err
+ }
+
+ json.Unmarshal(content, &segmentList)
+ for _, segment := range segmentList {
+ segmentContainer, segPath = parseFullPath(segment.Name[1:])
+ segments = append(segments, Object{
+ Name: segPath,
+ Bytes: segment.Bytes,
+ Hash: segment.Hash,
+ })
+ }
+
+ return segmentContainer, segments, nil
+}
diff --git a/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/ncw/swift/swift.go
new file mode 100755
index 0000000..26f9955
--- /dev/null
+++ b/vendor/github.com/ncw/swift/swift.go
@@ -0,0 +1,2264 @@
+package swift
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/hmac"
+ "crypto/md5"
+ "crypto/sha1"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ DefaultUserAgent = "goswift/1.0" // Default user agent
+ DefaultRetries = 3 // Default number of retries on token expiry
+ TimeFormat = "2006-01-02T15:04:05" // Python date format for json replies parsed as UTC
+ UploadTar = "tar" // Data format specifier for Connection.BulkUpload().
+ UploadTarGzip = "tar.gz" // Data format specifier for Connection.BulkUpload().
+ UploadTarBzip2 = "tar.bz2" // Data format specifier for Connection.BulkUpload().
+ allContainersLimit = 10000 // Number of containers to fetch at once
+ allObjectsLimit = 10000 // Number objects to fetch at once
+ allObjectsChanLimit = 1000 // ...when fetching to a channel
+)
+
+// ObjectType is the type of the swift object, regular, static large,
+// or dynamic large.
+type ObjectType int
+
+// Values that ObjectType can take
+const (
+ RegularObjectType ObjectType = iota
+ StaticLargeObjectType
+ DynamicLargeObjectType
+)
+
+// Connection holds the details of the connection to the swift server.
+//
+// You need to provide UserName, ApiKey and AuthUrl when you create a
+// connection then call Authenticate on it.
+//
+// The auth version in use will be detected from the AuthURL - you can
+// override this with the AuthVersion parameter.
+//
+// If using v2 auth you can also set Region in the Connection
+// structure. If you don't set Region you will get the default region
+// which may not be what you want.
+//
+// For reference some common AuthUrls looks like this:
+//
+// Rackspace US https://auth.api.rackspacecloud.com/v1.0
+// Rackspace UK https://lon.auth.api.rackspacecloud.com/v1.0
+// Rackspace v2 https://identity.api.rackspacecloud.com/v2.0
+// Memset Memstore UK https://auth.storage.memset.com/v1.0
+// Memstore v2 https://auth.storage.memset.com/v2.0
+//
+// When using Google Appengine you must provide the Connection with an
+// appengine-specific Transport:
+//
+// import (
+// "appengine/urlfetch"
+// "fmt"
+// "github.com/ncw/swift"
+// )
+//
+// func handler(w http.ResponseWriter, r *http.Request) {
+// ctx := appengine.NewContext(r)
+// tr := urlfetch.Transport{Context: ctx}
+// c := swift.Connection{
+// UserName: "user",
+// ApiKey: "key",
+// AuthUrl: "auth_url",
+// Transport: tr,
+// }
+// _ := c.Authenticate()
+// containers, _ := c.ContainerNames(nil)
+// fmt.Fprintf(w, "containers: %q", containers)
+// }
+//
+// If you don't supply a Transport, one is made which relies on
+// http.ProxyFromEnvironment (http://golang.org/pkg/net/http/#ProxyFromEnvironment).
+// This means that the connection will respect the HTTP proxy specified by the
+// environment variables $HTTP_PROXY and $NO_PROXY.
+type Connection struct {
+ // Parameters - fill these in before calling Authenticate
+ // They are all optional except UserName, ApiKey and AuthUrl
+ Domain string // User's domain name
+ DomainId string // User's domain Id
+ UserName string // UserName for api
+ UserId string // User Id
+ ApiKey string // Key for api access
+ ApplicationCredentialId string // Application Credential ID
+ ApplicationCredentialName string // Application Credential Name
+ ApplicationCredentialSecret string // Application Credential Secret
+ AuthUrl string // Auth URL
+ Retries int // Retries on error (default is 3)
+ UserAgent string // Http User agent (default goswift/1.0)
+ ConnectTimeout time.Duration // Connect channel timeout (default 10s)
+ Timeout time.Duration // Data channel timeout (default 60s)
+ Region string // Region to use eg "LON", "ORD" - default is use first region (v2,v3 auth only)
+ AuthVersion int // Set to 1, 2 or 3 or leave at 0 for autodetect
+ Internal bool // Set this to true to use the the internal / service network
+ Tenant string // Name of the tenant (v2,v3 auth only)
+ TenantId string // Id of the tenant (v2,v3 auth only)
+ EndpointType EndpointType // Endpoint type (v2,v3 auth only) (default is public URL unless Internal is set)
+ TenantDomain string // Name of the tenant's domain (v3 auth only), only needed if it differs from the user domain
+ TenantDomainId string // Id of the tenant's domain (v3 auth only), only needed if it differs the from user domain
+ TrustId string // Id of the trust (v3 auth only)
+ Transport http.RoundTripper `json:"-" xml:"-"` // Optional specialised http.Transport (eg. for Google Appengine)
+ // These are filled in after Authenticate is called as are the defaults for above
+ StorageUrl string
+ AuthToken string
+ Expires time.Time // time the token expires, may be Zero if unknown
+ client *http.Client
+ Auth Authenticator `json:"-" xml:"-"` // the current authenticator
+ authLock sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth
+ // swiftInfo is filled after QueryInfo is called
+ swiftInfo SwiftInfo
+}
+
+// setFromEnv reads the value that param points to (it must be a
+// pointer), if it isn't the zero value then it reads the environment
+// variable name passed in, parses it according to the type and writes
+// it to the pointer.
+func setFromEnv(param interface{}, name string) (err error) {
+ val := os.Getenv(name)
+ if val == "" {
+ return
+ }
+ switch result := param.(type) {
+ case *string:
+ if *result == "" {
+ *result = val
+ }
+ case *int:
+ if *result == 0 {
+ *result, err = strconv.Atoi(val)
+ }
+ case *bool:
+ if *result == false {
+ *result, err = strconv.ParseBool(val)
+ }
+ case *time.Duration:
+ if *result == 0 {
+ *result, err = time.ParseDuration(val)
+ }
+ case *EndpointType:
+ if *result == EndpointType("") {
+ *result = EndpointType(val)
+ }
+ default:
+ return newErrorf(0, "can't set var of type %T", param)
+ }
+ return err
+}
+
+// ApplyEnvironment reads environment variables and applies them to
+// the Connection structure. It won't overwrite any parameters which
+// are already set in the Connection struct.
+//
+// To make a new Connection object entirely from the environment you
+// would do:
+//
+// c := new(Connection)
+// err := c.ApplyEnvironment()
+// if err != nil { log.Fatal(err) }
+//
+// The naming of these variables follows the official Openstack naming
+// scheme so it should be compatible with OpenStack rc files.
+//
+// For v1 authentication (obsolete)
+// ST_AUTH - Auth URL
+// ST_USER - UserName for api
+// ST_KEY - Key for api access
+//
+// For v2 authentication
+// OS_AUTH_URL - Auth URL
+// OS_USERNAME - UserName for api
+// OS_PASSWORD - Key for api access
+// OS_TENANT_NAME - Name of the tenant
+// OS_TENANT_ID - Id of the tenant
+// OS_REGION_NAME - Region to use - default is use first region
+//
+// For v3 authentication
+// OS_AUTH_URL - Auth URL
+// OS_USERNAME - UserName for api
+// OS_USER_ID - User Id
+// OS_PASSWORD - Key for api access
+// OS_APPLICATION_CREDENTIAL_ID - Application Credential ID
+// OS_APPLICATION_CREDENTIAL_NAME - Application Credential Name
+// OS_APPLICATION_CREDENTIAL_SECRET - Application Credential Secret
+// OS_USER_DOMAIN_NAME - User's domain name
+// OS_USER_DOMAIN_ID - User's domain Id
+// OS_PROJECT_NAME - Name of the project
+// OS_PROJECT_DOMAIN_NAME - Name of the tenant's domain, only needed if it differs from the user domain
+// OS_PROJECT_DOMAIN_ID - Id of the tenant's domain, only needed if it differs the from user domain
+// OS_TRUST_ID - If of the trust
+// OS_REGION_NAME - Region to use - default is use first region
+//
+// Other
+// OS_ENDPOINT_TYPE - Endpoint type public, internal or admin
+// ST_AUTH_VERSION - Choose auth version - 1, 2 or 3 or leave at 0 for autodetect
+//
+// For manual authentication
+// OS_STORAGE_URL - storage URL from alternate authentication
+// OS_AUTH_TOKEN - Auth Token from alternate authentication
+//
+// Library specific
+// GOSWIFT_RETRIES - Retries on error (default is 3)
+// GOSWIFT_USER_AGENT - HTTP User agent (default goswift/1.0)
+// GOSWIFT_CONNECT_TIMEOUT - Connect channel timeout with unit, eg "10s", "100ms" (default "10s")
+// GOSWIFT_TIMEOUT - Data channel timeout with unit, eg "10s", "100ms" (default "60s")
+// GOSWIFT_INTERNAL - Set this to "true" to use the the internal network (obsolete - use OS_ENDPOINT_TYPE)
+func (c *Connection) ApplyEnvironment() (err error) {
+ for _, item := range []struct {
+ result interface{}
+ name string
+ }{
+ // Environment variables - keep in same order as Connection
+ {&c.Domain, "OS_USER_DOMAIN_NAME"},
+ {&c.DomainId, "OS_USER_DOMAIN_ID"},
+ {&c.UserName, "OS_USERNAME"},
+ {&c.UserId, "OS_USER_ID"},
+ {&c.ApiKey, "OS_PASSWORD"},
+ {&c.ApplicationCredentialId, "OS_APPLICATION_CREDENTIAL_ID"},
+ {&c.ApplicationCredentialName, "OS_APPLICATION_CREDENTIAL_NAME"},
+ {&c.ApplicationCredentialSecret, "OS_APPLICATION_CREDENTIAL_SECRET"},
+ {&c.AuthUrl, "OS_AUTH_URL"},
+ {&c.Retries, "GOSWIFT_RETRIES"},
+ {&c.UserAgent, "GOSWIFT_USER_AGENT"},
+ {&c.ConnectTimeout, "GOSWIFT_CONNECT_TIMEOUT"},
+ {&c.Timeout, "GOSWIFT_TIMEOUT"},
+ {&c.Region, "OS_REGION_NAME"},
+ {&c.AuthVersion, "ST_AUTH_VERSION"},
+ {&c.Internal, "GOSWIFT_INTERNAL"},
+ {&c.Tenant, "OS_TENANT_NAME"}, //v2
+ {&c.Tenant, "OS_PROJECT_NAME"}, // v3
+ {&c.TenantId, "OS_TENANT_ID"},
+ {&c.EndpointType, "OS_ENDPOINT_TYPE"},
+ {&c.TenantDomain, "OS_PROJECT_DOMAIN_NAME"},
+ {&c.TenantDomainId, "OS_PROJECT_DOMAIN_ID"},
+ {&c.TrustId, "OS_TRUST_ID"},
+ {&c.StorageUrl, "OS_STORAGE_URL"},
+ {&c.AuthToken, "OS_AUTH_TOKEN"},
+ // v1 auth alternatives
+ {&c.ApiKey, "ST_KEY"},
+ {&c.UserName, "ST_USER"},
+ {&c.AuthUrl, "ST_AUTH"},
+ } {
+ err = setFromEnv(item.result, item.name)
+ if err != nil {
+ return newErrorf(0, "failed to read env var %q: %v", item.name, err)
+ }
+ }
+ return nil
+}
+
+// Error - all errors generated by this package are of this type. Other error
+// may be passed on from library functions though.
+type Error struct {
+ StatusCode int // HTTP status code if relevant or 0 if not
+ Text string
+}
+
+// Error satisfy the error interface.
+func (e *Error) Error() string {
+ return e.Text
+}
+
+// newError make a new error from a string.
+func newError(StatusCode int, Text string) *Error {
+ return &Error{
+ StatusCode: StatusCode,
+ Text: Text,
+ }
+}
+
+// newErrorf makes a new error from sprintf parameters.
+func newErrorf(StatusCode int, Text string, Parameters ...interface{}) *Error {
+ return newError(StatusCode, fmt.Sprintf(Text, Parameters...))
+}
+
+// errorMap defines http error codes to error mappings.
+type errorMap map[int]error
+
+var (
+ // Specific Errors you might want to check for equality
+ NotModified = newError(304, "Not Modified")
+ BadRequest = newError(400, "Bad Request")
+ AuthorizationFailed = newError(401, "Authorization Failed")
+ ContainerNotFound = newError(404, "Container Not Found")
+ ContainerNotEmpty = newError(409, "Container Not Empty")
+ ObjectNotFound = newError(404, "Object Not Found")
+ ObjectCorrupted = newError(422, "Object Corrupted")
+ TimeoutError = newError(408, "Timeout when reading or writing data")
+ Forbidden = newError(403, "Operation forbidden")
+ TooLargeObject = newError(413, "Too Large Object")
+ RateLimit = newError(498, "Rate Limit")
+ TooManyRequests = newError(429, "TooManyRequests")
+
+ // Mappings for authentication errors
+ authErrorMap = errorMap{
+ 400: BadRequest,
+ 401: AuthorizationFailed,
+ 403: Forbidden,
+ }
+
+ // Mappings for container errors
+ ContainerErrorMap = errorMap{
+ 400: BadRequest,
+ 403: Forbidden,
+ 404: ContainerNotFound,
+ 409: ContainerNotEmpty,
+ 498: RateLimit,
+ }
+
+ // Mappings for object errors
+ objectErrorMap = errorMap{
+ 304: NotModified,
+ 400: BadRequest,
+ 403: Forbidden,
+ 404: ObjectNotFound,
+ 413: TooLargeObject,
+ 422: ObjectCorrupted,
+ 429: TooManyRequests,
+ 498: RateLimit,
+ }
+)
+
+// checkClose is used to check the return from Close in a defer
+// statement.
+func checkClose(c io.Closer, err *error) {
+ cerr := c.Close()
+ if *err == nil {
+ *err = cerr
+ }
+}
+
+// drainAndClose discards all data from rd and closes it.
+// If an error occurs during Read, it is discarded.
+func drainAndClose(rd io.ReadCloser, err *error) {
+ if rd == nil {
+ return
+ }
+
+ _, _ = io.Copy(ioutil.Discard, rd)
+ cerr := rd.Close()
+ if err != nil && *err == nil {
+ *err = cerr
+ }
+}
+
+// parseHeaders checks a response for errors and translates into
+// standard errors if necessary. If an error is returned, resp.Body
+// has been drained and closed.
+func (c *Connection) parseHeaders(resp *http.Response, errorMap errorMap) error {
+ if errorMap != nil {
+ if err, ok := errorMap[resp.StatusCode]; ok {
+ drainAndClose(resp.Body, nil)
+ return err
+ }
+ }
+ if resp.StatusCode < 200 || resp.StatusCode > 299 {
+ drainAndClose(resp.Body, nil)
+ return newErrorf(resp.StatusCode, "HTTP Error: %d: %s", resp.StatusCode, resp.Status)
+ }
+ return nil
+}
+
+// readHeaders returns a Headers object from the http.Response.
+//
+// If it receives multiple values for a key (which should never
+// happen) it will use the first one
+func readHeaders(resp *http.Response) Headers {
+ headers := Headers{}
+ for key, values := range resp.Header {
+ headers[key] = values[0]
+ }
+ return headers
+}
+
+// Headers stores HTTP headers (can only have one of each header like Swift).
+type Headers map[string]string
+
+// Does an http request using the running timer passed in
+func (c *Connection) doTimeoutRequest(timer *time.Timer, req *http.Request) (*http.Response, error) {
+ // Do the request in the background so we can check the timeout
+ type result struct {
+ resp *http.Response
+ err error
+ }
+ done := make(chan result, 1)
+ go func() {
+ resp, err := c.client.Do(req)
+ done <- result{resp, err}
+ }()
+ // Wait for the read or the timeout
+ select {
+ case r := <-done:
+ return r.resp, r.err
+ case <-timer.C:
+ // Kill the connection on timeout so we don't leak sockets or goroutines
+ cancelRequest(c.Transport, req)
+ return nil, TimeoutError
+ }
+ panic("unreachable") // For Go 1.0
+}
+
+// Set defaults for any unset values
+//
+// Call with authLock held
+func (c *Connection) setDefaults() {
+ if c.UserAgent == "" {
+ c.UserAgent = DefaultUserAgent
+ }
+ if c.Retries == 0 {
+ c.Retries = DefaultRetries
+ }
+ if c.ConnectTimeout == 0 {
+ c.ConnectTimeout = 10 * time.Second
+ }
+ if c.Timeout == 0 {
+ c.Timeout = 60 * time.Second
+ }
+ if c.Transport == nil {
+ t := &http.Transport{
+ // TLSClientConfig: &tls.Config{RootCAs: pool},
+ // DisableCompression: true,
+ Proxy: http.ProxyFromEnvironment,
+ // Half of linux's default open files limit (1024).
+ MaxIdleConnsPerHost: 512,
+ }
+ SetExpectContinueTimeout(t, 5*time.Second)
+ c.Transport = t
+ }
+ if c.client == nil {
+ c.client = &http.Client{
+ // CheckRedirect: redirectPolicyFunc,
+ Transport: c.Transport,
+ }
+ }
+}
+
+// Authenticate connects to the Swift server.
+//
+// If you don't call it before calling one of the connection methods
+// then it will be called for you on the first access.
+func (c *Connection) Authenticate() (err error) {
+ c.authLock.Lock()
+ defer c.authLock.Unlock()
+ return c.authenticate()
+}
+
+// Internal implementation of Authenticate
+//
+// Call with authLock held
+func (c *Connection) authenticate() (err error) {
+ c.setDefaults()
+
+ // Flush the keepalives connection - if we are
+ // re-authenticating then stuff has gone wrong
+ flushKeepaliveConnections(c.Transport)
+
+ if c.Auth == nil {
+ c.Auth, err = newAuth(c)
+ if err != nil {
+ return
+ }
+ }
+
+ retries := 1
+again:
+ var req *http.Request
+ req, err = c.Auth.Request(c)
+ if err != nil {
+ return
+ }
+ if req != nil {
+ timer := time.NewTimer(c.ConnectTimeout)
+ defer timer.Stop()
+ var resp *http.Response
+ resp, err = c.doTimeoutRequest(timer, req)
+ if err != nil {
+ return
+ }
+ defer func() {
+ drainAndClose(resp.Body, &err)
+ // Flush the auth connection - we don't want to keep
+ // it open if keepalives were enabled
+ flushKeepaliveConnections(c.Transport)
+ }()
+ if err = c.parseHeaders(resp, authErrorMap); err != nil {
+ // Try again for a limited number of times on
+ // AuthorizationFailed or BadRequest. This allows us
+ // to try some alternate forms of the request
+ if (err == AuthorizationFailed || err == BadRequest) && retries > 0 {
+ retries--
+ goto again
+ }
+ return
+ }
+ err = c.Auth.Response(resp)
+ if err != nil {
+ return
+ }
+ }
+ if customAuth, isCustom := c.Auth.(CustomEndpointAuthenticator); isCustom && c.EndpointType != "" {
+ c.StorageUrl = customAuth.StorageUrlForEndpoint(c.EndpointType)
+ } else {
+ c.StorageUrl = c.Auth.StorageUrl(c.Internal)
+ }
+ c.AuthToken = c.Auth.Token()
+ if do, ok := c.Auth.(Expireser); ok {
+ c.Expires = do.Expires()
+ } else {
+ c.Expires = time.Time{}
+ }
+
+ if !c.authenticated() {
+ err = newError(0, "Response didn't have storage url and auth token")
+ return
+ }
+ return
+}
+
+// Get an authToken and url
+//
+// The Url may be updated if it needed to authenticate using the OnReAuth function
+func (c *Connection) getUrlAndAuthToken(targetUrlIn string, OnReAuth func() (string, error)) (targetUrlOut, authToken string, err error) {
+ c.authLock.Lock()
+ defer c.authLock.Unlock()
+ targetUrlOut = targetUrlIn
+ if !c.authenticated() {
+ err = c.authenticate()
+ if err != nil {
+ return
+ }
+ if OnReAuth != nil {
+ targetUrlOut, err = OnReAuth()
+ if err != nil {
+ return
+ }
+ }
+ }
+ authToken = c.AuthToken
+ return
+}
+
+// flushKeepaliveConnections is called to flush pending requests after an error.
+func flushKeepaliveConnections(transport http.RoundTripper) {
+ if tr, ok := transport.(interface {
+ CloseIdleConnections()
+ }); ok {
+ tr.CloseIdleConnections()
+ }
+}
+
+// UnAuthenticate removes the authentication from the Connection.
+func (c *Connection) UnAuthenticate() {
+ c.authLock.Lock()
+ c.StorageUrl = ""
+ c.AuthToken = ""
+ c.authLock.Unlock()
+}
+
+// Authenticated returns a boolean to show if the current connection
+// is authenticated.
+//
+// Doesn't actually check the credentials against the server.
+func (c *Connection) Authenticated() bool {
+ c.authLock.Lock()
+ defer c.authLock.Unlock()
+ return c.authenticated()
+}
+
+// Internal version of Authenticated()
+//
+// Call with authLock held
+func (c *Connection) authenticated() bool {
+ if c.StorageUrl == "" || c.AuthToken == "" {
+ return false
+ }
+ if c.Expires.IsZero() {
+ return true
+ }
+ timeUntilExpiry := c.Expires.Sub(time.Now())
+ return timeUntilExpiry >= 60*time.Second
+}
+
+// SwiftInfo contains the JSON object returned by Swift when the /info
+// route is queried. The object contains, among others, the Swift version,
+// the enabled middlewares and their configuration
+type SwiftInfo map[string]interface{}
+
+func (i SwiftInfo) SupportsBulkDelete() bool {
+ _, val := i["bulk_delete"]
+ return val
+}
+
+func (i SwiftInfo) SupportsSLO() bool {
+ _, val := i["slo"]
+ return val
+}
+
+func (i SwiftInfo) SLOMinSegmentSize() int64 {
+ if slo, ok := i["slo"].(map[string]interface{}); ok {
+ val, _ := slo["min_segment_size"].(float64)
+ return int64(val)
+ }
+ return 1
+}
+
+// Discover Swift configuration by doing a request against /info
+func (c *Connection) QueryInfo() (infos SwiftInfo, err error) {
+ infoUrl, err := url.Parse(c.StorageUrl)
+ if err != nil {
+ return nil, err
+ }
+ infoUrl.Path = path.Join(infoUrl.Path, "..", "..", "info")
+ resp, err := c.client.Get(infoUrl.String())
+ if err == nil {
+ if resp.StatusCode != http.StatusOK {
+ drainAndClose(resp.Body, nil)
+ return nil, fmt.Errorf("Invalid status code for info request: %d", resp.StatusCode)
+ }
+ err = readJson(resp, &infos)
+ if err == nil {
+ c.authLock.Lock()
+ c.swiftInfo = infos
+ c.authLock.Unlock()
+ }
+ return infos, err
+ }
+ return nil, err
+}
+
+func (c *Connection) cachedQueryInfo() (infos SwiftInfo, err error) {
+ c.authLock.Lock()
+ infos = c.swiftInfo
+ c.authLock.Unlock()
+ if infos == nil {
+ infos, err = c.QueryInfo()
+ if err != nil {
+ return
+ }
+ }
+ return infos, nil
+}
+
+// RequestOpts contains parameters for Connection.storage.
+type RequestOpts struct {
+ Container string
+ ObjectName string
+ Operation string
+ Parameters url.Values
+ Headers Headers
+ ErrorMap errorMap
+ NoResponse bool
+ Body io.Reader
+ Retries int
+ // if set this is called on re-authentication to refresh the targetUrl
+ OnReAuth func() (string, error)
+}
+
+// Call runs a remote command on the targetUrl, returns a
+// response, headers and possible error.
+//
+// operation is GET, HEAD etc
+// container is the name of a container
+// Any other parameters (if not None) are added to the targetUrl
+//
+// Returns a response or an error. If response is returned then
+// the resp.Body must be read completely and
+// resp.Body.Close() must be called on it, unless noResponse is set in
+// which case the body will be closed in this function
+//
+// If "Content-Length" is set in p.Headers it will be used - this can
+// be used to override the default chunked transfer encoding for
+// uploads.
+//
+// This will Authenticate if necessary, and re-authenticate if it
+// receives a 401 error which means the token has expired
+//
+// This method is exported so extensions can call it.
+func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response, headers Headers, err error) {
+ c.authLock.Lock()
+ c.setDefaults()
+ c.authLock.Unlock()
+ retries := p.Retries
+ if retries == 0 {
+ retries = c.Retries
+ }
+ var req *http.Request
+ for {
+ var authToken string
+ if targetUrl, authToken, err = c.getUrlAndAuthToken(targetUrl, p.OnReAuth); err != nil {
+ return //authentication failure
+ }
+ var URL *url.URL
+ URL, err = url.Parse(targetUrl)
+ if err != nil {
+ return
+ }
+ if p.Container != "" {
+ URL.Path += "/" + p.Container
+ if p.ObjectName != "" {
+ URL.Path += "/" + p.ObjectName
+ }
+ }
+ if p.Parameters != nil {
+ URL.RawQuery = p.Parameters.Encode()
+ }
+ timer := time.NewTimer(c.ConnectTimeout)
+ defer timer.Stop()
+ reader := p.Body
+ if reader != nil {
+ reader = newWatchdogReader(reader, c.Timeout, timer)
+ }
+ req, err = http.NewRequest(p.Operation, URL.String(), reader)
+ if err != nil {
+ return
+ }
+ if p.Headers != nil {
+ for k, v := range p.Headers {
+ // Set ContentLength in req if the user passed it in in the headers
+ if k == "Content-Length" {
+ req.ContentLength, err = strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ err = fmt.Errorf("Invalid %q header %q: %v", k, v, err)
+ return
+ }
+ } else {
+ req.Header.Add(k, v)
+ }
+ }
+ }
+ req.Header.Add("User-Agent", c.UserAgent)
+ req.Header.Add("X-Auth-Token", authToken)
+
+ _, hasCL := p.Headers["Content-Length"]
+ AddExpectAndTransferEncoding(req, hasCL)
+
+ resp, err = c.doTimeoutRequest(timer, req)
+ if err != nil {
+ if (p.Operation == "HEAD" || p.Operation == "GET") && retries > 0 {
+ retries--
+ continue
+ }
+ return
+ }
+ // Check to see if token has expired
+ if resp.StatusCode == 401 && retries > 0 {
+ drainAndClose(resp.Body, nil)
+ c.UnAuthenticate()
+ retries--
+ } else {
+ break
+ }
+ }
+
+ headers = readHeaders(resp)
+ if err = c.parseHeaders(resp, p.ErrorMap); err != nil {
+ return
+ }
+ if p.NoResponse {
+ drainAndClose(resp.Body, &err)
+ if err != nil {
+ return
+ }
+ } else {
+ // Cancel the request on timeout
+ cancel := func() {
+ cancelRequest(c.Transport, req)
+ }
+ // Wrap resp.Body to make it obey an idle timeout
+ resp.Body = newTimeoutReader(resp.Body, c.Timeout, cancel)
+ }
+ return
+}
+
+// storage runs a remote command on a the storage url, returns a
+// response, headers and possible error.
+//
+// operation is GET, HEAD etc
+// container is the name of a container
+// Any other parameters (if not None) are added to the storage url
+//
+// Returns a response or an error. If response is returned then
+// resp.Body.Close() must be called on it, unless noResponse is set in
+// which case the body will be closed in this function
+//
+// This will Authenticate if necessary, and re-authenticate if it
+// receives a 401 error which means the token has expired
+func (c *Connection) storage(p RequestOpts) (resp *http.Response, headers Headers, err error) {
+ p.OnReAuth = func() (string, error) {
+ return c.StorageUrl, nil
+ }
+ c.authLock.Lock()
+ url := c.StorageUrl
+ c.authLock.Unlock()
+ return c.Call(url, p)
+}
+
+// readLines reads the response into an array of strings.
+//
+// Closes the response when done
+func readLines(resp *http.Response) (lines []string, err error) {
+ defer drainAndClose(resp.Body, &err)
+ reader := bufio.NewReader(resp.Body)
+ buffer := bytes.NewBuffer(make([]byte, 0, 128))
+ var part []byte
+ var prefix bool
+ for {
+ if part, prefix, err = reader.ReadLine(); err != nil {
+ break
+ }
+ buffer.Write(part)
+ if !prefix {
+ lines = append(lines, buffer.String())
+ buffer.Reset()
+ }
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ return
+}
+
+// readJson reads the response into the json type passed in
+//
+// Closes the response when done
+func readJson(resp *http.Response, result interface{}) (err error) {
+ defer drainAndClose(resp.Body, &err)
+ decoder := json.NewDecoder(resp.Body)
+ return decoder.Decode(result)
+}
+
+/* ------------------------------------------------------------ */
+
+// ContainersOpts is options for Containers() and ContainerNames()
+type ContainersOpts struct {
+ Limit int // For an integer value n, limits the number of results to at most n values.
+ Prefix string // Given a string value x, return container names matching the specified prefix.
+ Marker string // Given a string value x, return container names greater in value than the specified marker.
+ EndMarker string // Given a string value x, return container names less in value than the specified marker.
+ Headers Headers // Any additional HTTP headers - can be nil
+}
+
+// parse the ContainerOpts
+func (opts *ContainersOpts) parse() (url.Values, Headers) {
+ v := url.Values{}
+ var h Headers
+ if opts != nil {
+ if opts.Limit > 0 {
+ v.Set("limit", strconv.Itoa(opts.Limit))
+ }
+ if opts.Prefix != "" {
+ v.Set("prefix", opts.Prefix)
+ }
+ if opts.Marker != "" {
+ v.Set("marker", opts.Marker)
+ }
+ if opts.EndMarker != "" {
+ v.Set("end_marker", opts.EndMarker)
+ }
+ h = opts.Headers
+ }
+ return v, h
+}
+
+// ContainerNames returns a slice of names of containers in this account.
+func (c *Connection) ContainerNames(opts *ContainersOpts) ([]string, error) {
+ v, h := opts.parse()
+ resp, _, err := c.storage(RequestOpts{
+ Operation: "GET",
+ Parameters: v,
+ ErrorMap: ContainerErrorMap,
+ Headers: h,
+ })
+ if err != nil {
+ return nil, err
+ }
+ lines, err := readLines(resp)
+ return lines, err
+}
+
+// Container contains information about a container
+type Container struct {
+ Name string // Name of the container
+ Count int64 // Number of objects in the container
+ Bytes int64 // Total number of bytes used in the container
+}
+
+// Containers returns a slice of structures with full information as
+// described in Container.
+func (c *Connection) Containers(opts *ContainersOpts) ([]Container, error) {
+ v, h := opts.parse()
+ v.Set("format", "json")
+ resp, _, err := c.storage(RequestOpts{
+ Operation: "GET",
+ Parameters: v,
+ ErrorMap: ContainerErrorMap,
+ Headers: h,
+ })
+ if err != nil {
+ return nil, err
+ }
+ var containers []Container
+ err = readJson(resp, &containers)
+ return containers, err
+}
+
+// containersAllOpts makes a copy of opts if set or makes a new one and
+// overrides Limit and Marker
+func containersAllOpts(opts *ContainersOpts) *ContainersOpts {
+ var newOpts ContainersOpts
+ if opts != nil {
+ newOpts = *opts
+ }
+ if newOpts.Limit == 0 {
+ newOpts.Limit = allContainersLimit
+ }
+ newOpts.Marker = ""
+ return &newOpts
+}
+
+// ContainersAll is like Containers but it returns all the Containers
+//
+// It calls Containers multiple times using the Marker parameter
+//
+// It has a default Limit parameter but you may pass in your own
+func (c *Connection) ContainersAll(opts *ContainersOpts) ([]Container, error) {
+ opts = containersAllOpts(opts)
+ containers := make([]Container, 0)
+ for {
+ newContainers, err := c.Containers(opts)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, newContainers...)
+ if len(newContainers) < opts.Limit {
+ break
+ }
+ opts.Marker = newContainers[len(newContainers)-1].Name
+ }
+ return containers, nil
+}
+
+// ContainerNamesAll is like ContainerNamess but it returns all the Containers
+//
+// It calls ContainerNames multiple times using the Marker parameter
+//
+// It has a default Limit parameter but you may pass in your own
+func (c *Connection) ContainerNamesAll(opts *ContainersOpts) ([]string, error) {
+ opts = containersAllOpts(opts)
+ containers := make([]string, 0)
+ for {
+ newContainers, err := c.ContainerNames(opts)
+ if err != nil {
+ return nil, err
+ }
+ containers = append(containers, newContainers...)
+ if len(newContainers) < opts.Limit {
+ break
+ }
+ opts.Marker = newContainers[len(newContainers)-1]
+ }
+ return containers, nil
+}
+
+/* ------------------------------------------------------------ */
+
+// ObjectOpts is options for Objects() and ObjectNames()
+type ObjectsOpts struct {
+ Limit int // For an integer value n, limits the number of results to at most n values.
+ Marker string // Given a string value x, return object names greater in value than the specified marker.
+ EndMarker string // Given a string value x, return object names less in value than the specified marker
+ Prefix string // For a string value x, causes the results to be limited to object names beginning with the substring x.
+ Path string // For a string value x, return the object names nested in the pseudo path
+ Delimiter rune // For a character c, return all the object names nested in the container
+ Headers Headers // Any additional HTTP headers - can be nil
+ KeepMarker bool // Do not reset Marker when using ObjectsAll or ObjectNamesAll
+}
+
+// parse reads values out of ObjectsOpts
+func (opts *ObjectsOpts) parse() (url.Values, Headers) {
+ v := url.Values{}
+ var h Headers
+ if opts != nil {
+ if opts.Limit > 0 {
+ v.Set("limit", strconv.Itoa(opts.Limit))
+ }
+ if opts.Marker != "" {
+ v.Set("marker", opts.Marker)
+ }
+ if opts.EndMarker != "" {
+ v.Set("end_marker", opts.EndMarker)
+ }
+ if opts.Prefix != "" {
+ v.Set("prefix", opts.Prefix)
+ }
+ if opts.Path != "" {
+ v.Set("path", opts.Path)
+ }
+ if opts.Delimiter != 0 {
+ v.Set("delimiter", string(opts.Delimiter))
+ }
+ h = opts.Headers
+ }
+ return v, h
+}
+
+// ObjectNames returns a slice of names of objects in a given container.
+func (c *Connection) ObjectNames(container string, opts *ObjectsOpts) ([]string, error) {
+ v, h := opts.parse()
+ resp, _, err := c.storage(RequestOpts{
+ Container: container,
+ Operation: "GET",
+ Parameters: v,
+ ErrorMap: ContainerErrorMap,
+ Headers: h,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return readLines(resp)
+}
+
+// Object contains information about an object
+type Object struct {
+ Name string `json:"name"` // object name
+ ContentType string `json:"content_type"` // eg application/directory
+ Bytes int64 `json:"bytes"` // size in bytes
+ ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server
+ LastModified time.Time // Last modified time converted to a time.Time
+ Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e"
+ SLOHash string `json:"slo_etag"` // MD5 hash of all segments' MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e"
+ PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist
+ SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories"
+ ObjectType ObjectType // type of this object
+}
+
+// Objects returns a slice of Object with information about each
+// object in the container.
+//
+// If Delimiter is set in the opts then PseudoDirectory may be set,
+// with ContentType 'application/directory'. These are not real
+// objects but represent directories of objects which haven't had an
+// object created for them.
+func (c *Connection) Objects(container string, opts *ObjectsOpts) ([]Object, error) {
+ v, h := opts.parse()
+ v.Set("format", "json")
+ resp, _, err := c.storage(RequestOpts{
+ Container: container,
+ Operation: "GET",
+ Parameters: v,
+ ErrorMap: ContainerErrorMap,
+ Headers: h,
+ })
+ if err != nil {
+ return nil, err
+ }
+ var objects []Object
+ err = readJson(resp, &objects)
+ // Convert Pseudo directories and dates
+ for i := range objects {
+ object := &objects[i]
+ if object.SubDir != "" {
+ object.Name = object.SubDir
+ object.PseudoDirectory = true
+ object.ContentType = "application/directory"
+ }
+ if object.ServerLastModified != "" {
+ // 2012-11-11T14:49:47.887250
+ //
+ // Remove fractional seconds if present. This
+ // then keeps it consistent with Object
+ // which can only return timestamps accurate
+ // to 1 second
+ //
+ // The TimeFormat will parse fractional
+ // seconds if desired though
+ datetime := strings.SplitN(object.ServerLastModified, ".", 2)[0]
+ object.LastModified, err = time.Parse(TimeFormat, datetime)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if object.SLOHash != "" {
+ object.ObjectType = StaticLargeObjectType
+ }
+ }
+ return objects, err
+}
+
+// objectsAllOpts makes a copy of opts if set or makes a new one and
+// overrides Limit and Marker
+// Marker is not overriden if KeepMarker is set
+func objectsAllOpts(opts *ObjectsOpts, Limit int) *ObjectsOpts {
+ var newOpts ObjectsOpts
+ if opts != nil {
+ newOpts = *opts
+ }
+ if newOpts.Limit == 0 {
+ newOpts.Limit = Limit
+ }
+ if !newOpts.KeepMarker {
+ newOpts.Marker = ""
+ }
+ return &newOpts
+}
+
+// A closure defined by the caller to iterate through all objects
+//
+// Call Objects or ObjectNames from here with the *ObjectOpts passed in
+//
+// Do whatever is required with the results then return them
+type ObjectsWalkFn func(*ObjectsOpts) (interface{}, error)
+
+// ObjectsWalk is uses to iterate through all the objects in chunks as
+// returned by Objects or ObjectNames using the Marker and Limit
+// parameters in the ObjectsOpts.
+//
+// Pass in a closure `walkFn` which calls Objects or ObjectNames with
+// the *ObjectsOpts passed to it and does something with the results.
+//
+// Errors will be returned from this function
+//
+// It has a default Limit parameter but you may pass in your own
+func (c *Connection) ObjectsWalk(container string, opts *ObjectsOpts, walkFn ObjectsWalkFn) error {
+ opts = objectsAllOpts(opts, allObjectsChanLimit)
+ for {
+ objects, err := walkFn(opts)
+ if err != nil {
+ return err
+ }
+ var n int
+ var last string
+ switch objects := objects.(type) {
+ case []string:
+ n = len(objects)
+ if n > 0 {
+ last = objects[len(objects)-1]
+ }
+ case []Object:
+ n = len(objects)
+ if n > 0 {
+ last = objects[len(objects)-1].Name
+ }
+ default:
+ panic("Unknown type returned to ObjectsWalk")
+ }
+ if n < opts.Limit {
+ break
+ }
+ opts.Marker = last
+ }
+ return nil
+}
+
+// ObjectsAll is like Objects but it returns an unlimited number of Objects in a slice
+//
+// It calls Objects multiple times using the Marker parameter
+func (c *Connection) ObjectsAll(container string, opts *ObjectsOpts) ([]Object, error) {
+ objects := make([]Object, 0)
+ err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) {
+ newObjects, err := c.Objects(container, opts)
+ if err == nil {
+ objects = append(objects, newObjects...)
+ }
+ return newObjects, err
+ })
+ return objects, err
+}
+
+// ObjectNamesAll is like ObjectNames but it returns all the Objects
+//
+// It calls ObjectNames multiple times using the Marker parameter. Marker is
+// reset unless KeepMarker is set
+//
+// It has a default Limit parameter but you may pass in your own
+func (c *Connection) ObjectNamesAll(container string, opts *ObjectsOpts) ([]string, error) {
+ objects := make([]string, 0)
+ err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) {
+ newObjects, err := c.ObjectNames(container, opts)
+ if err == nil {
+ objects = append(objects, newObjects...)
+ }
+ return newObjects, err
+ })
+ return objects, err
+}
+
+// Account contains information about this account.
+type Account struct {
+ BytesUsed int64 // total number of bytes used
+ Containers int64 // total number of containers
+ Objects int64 // total number of objects
+}
+
+// getInt64FromHeader is a helper function to decode int64 from header.
+func getInt64FromHeader(resp *http.Response, header string) (result int64, err error) {
+ value := resp.Header.Get(header)
+ result, err = strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ err = newErrorf(0, "Bad Header '%s': '%s': %s", header, value, err)
+ }
+ return
+}
+
+// Account returns info about the account in an Account struct.
+func (c *Connection) Account() (info Account, headers Headers, err error) {
+ var resp *http.Response
+ resp, headers, err = c.storage(RequestOpts{
+ Operation: "HEAD",
+ ErrorMap: ContainerErrorMap,
+ NoResponse: true,
+ })
+ if err != nil {
+ return
+ }
+ // Parse the headers into a dict
+ //
+ // {'Accept-Ranges': 'bytes',
+ // 'Content-Length': '0',
+ // 'Date': 'Tue, 05 Jul 2011 16:37:06 GMT',
+ // 'X-Account-Bytes-Used': '316598182',
+ // 'X-Account-Container-Count': '4',
+ // 'X-Account-Object-Count': '1433'}
+ if info.BytesUsed, err = getInt64FromHeader(resp, "X-Account-Bytes-Used"); err != nil {
+ return
+ }
+ if info.Containers, err = getInt64FromHeader(resp, "X-Account-Container-Count"); err != nil {
+ return
+ }
+ if info.Objects, err = getInt64FromHeader(resp, "X-Account-Object-Count"); err != nil {
+ return
+ }
+ return
+}
+
+// AccountUpdate adds, replaces or remove account metadata.
+//
+// Add or update keys by mentioning them in the Headers.
+//
+// Remove keys by setting them to an empty string.
+func (c *Connection) AccountUpdate(h Headers) error {
+ _, _, err := c.storage(RequestOpts{
+ Operation: "POST",
+ ErrorMap: ContainerErrorMap,
+ NoResponse: true,
+ Headers: h,
+ })
+ return err
+}
+
+// ContainerCreate creates a container.
+//
+// If you don't want to add Headers just pass in nil
+//
+// No error is returned if it already exists but the metadata if any will be updated.
+func (c *Connection) ContainerCreate(container string, h Headers) error {
+ _, _, err := c.storage(RequestOpts{
+ Container: container,
+ Operation: "PUT",
+ ErrorMap: ContainerErrorMap,
+ NoResponse: true,
+ Headers: h,
+ })
+ return err
+}
+
+// ContainerDelete deletes a container.
+//
+// May return ContainerDoesNotExist or ContainerNotEmpty
+func (c *Connection) ContainerDelete(container string) error {
+ _, _, err := c.storage(RequestOpts{
+ Container: container,
+ Operation: "DELETE",
+ ErrorMap: ContainerErrorMap,
+ NoResponse: true,
+ })
+ return err
+}
+
+// Container returns info about a single container including any
+// metadata in the headers.
+func (c *Connection) Container(container string) (info Container, headers Headers, err error) {
+ var resp *http.Response
+ resp, headers, err = c.storage(RequestOpts{
+ Container: container,
+ Operation: "HEAD",
+ ErrorMap: ContainerErrorMap,
+ NoResponse: true,
+ })
+ if err != nil {
+ return
+ }
+ // Parse the headers into the struct
+ info.Name = container
+ if info.Bytes, err = getInt64FromHeader(resp, "X-Container-Bytes-Used"); err != nil {
+ return
+ }
+ if info.Count, err = getInt64FromHeader(resp, "X-Container-Object-Count"); err != nil {
+ return
+ }
+ return
+}
+
+// ContainerUpdate adds, replaces or removes container metadata.
+//
+// Add or update keys by mentioning them in the Metadata.
+//
+// Remove keys by setting them to an empty string.
+//
+// Container metadata can only be read with Container() not with Containers().
+func (c *Connection) ContainerUpdate(container string, h Headers) error {
+ _, _, err := c.storage(RequestOpts{
+ Container: container,
+ Operation: "POST",
+ ErrorMap: ContainerErrorMap,
+ NoResponse: true,
+ Headers: h,
+ })
+ return err
+}
+
+// ------------------------------------------------------------
+
+// ObjectCreateFile represents a swift object open for writing
+type ObjectCreateFile struct {
+ checkHash bool // whether we are checking the hash
+ pipeReader *io.PipeReader // pipe for the caller to use
+ pipeWriter *io.PipeWriter
+ hash hash.Hash // hash being build up as we go along
+ done chan struct{} // signals when the upload has finished
+ resp *http.Response // valid when done has signalled
+ err error // ditto
+ headers Headers // ditto
+}
+
+// Write bytes to the object - see io.Writer
+func (file *ObjectCreateFile) Write(p []byte) (n int, err error) {
+ n, err = file.pipeWriter.Write(p)
+ if err == io.ErrClosedPipe {
+ if file.err != nil {
+ return 0, file.err
+ }
+ return 0, newError(500, "Write on closed file")
+ }
+ if err == nil && file.checkHash {
+ _, _ = file.hash.Write(p)
+ }
+ return
+}
+
+// Close the object and checks the md5sum if it was required.
+//
+// Also returns any other errors from the server (eg container not
+// found) so it is very important to check the errors on this method.
+func (file *ObjectCreateFile) Close() error {
+ // Close the body
+ err := file.pipeWriter.Close()
+ if err != nil {
+ return err
+ }
+
+ // Wait for the HTTP operation to complete
+ <-file.done
+
+ // Check errors
+ if file.err != nil {
+ return file.err
+ }
+ if file.checkHash {
+ receivedMd5 := strings.ToLower(file.headers["Etag"])
+ calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil))
+ if receivedMd5 != calculatedMd5 {
+ return ObjectCorrupted
+ }
+ }
+ return nil
+}
+
+// Headers returns the response headers from the created object if the upload
+// has been completed. The Close() method must be called on an ObjectCreateFile
+// before this method.
+func (file *ObjectCreateFile) Headers() (Headers, error) {
+ // error out if upload is not complete.
+ select {
+ case <-file.done:
+ default:
+ return nil, fmt.Errorf("Cannot get metadata, object upload failed or has not yet completed.")
+ }
+ return file.headers, nil
+}
+
+// Check it satisfies the interface
+var _ io.WriteCloser = &ObjectCreateFile{}
+
+// objectPutHeaders create a set of headers for a PUT
+//
+// It guesses the contentType from the objectName if it isn't set
+//
+// checkHash may be changed
+func objectPutHeaders(objectName string, checkHash *bool, Hash string, contentType string, h Headers) Headers {
+ if contentType == "" {
+ contentType = mime.TypeByExtension(path.Ext(objectName))
+ if contentType == "" {
+ contentType = "application/octet-stream"
+ }
+ }
+ // Meta stuff
+ extraHeaders := map[string]string{
+ "Content-Type": contentType,
+ }
+ for key, value := range h {
+ extraHeaders[key] = value
+ }
+ if Hash != "" {
+ extraHeaders["Etag"] = Hash
+ *checkHash = false // the server will do it
+ }
+ return extraHeaders
+}
+
+// ObjectCreate creates or updates the object in the container. It
+// returns an io.WriteCloser you should write the contents to. You
+// MUST call Close() on it and you MUST check the error return from
+// Close().
+//
+// If checkHash is True then it will calculate the MD5 Hash of the
+// file as it is being uploaded and check it against that returned
+// from the server. If it is wrong then it will return
+// ObjectCorrupted on Close()
+//
+// If you know the MD5 hash of the object ahead of time then set the
+// Hash parameter and it will be sent to the server (as an Etag
+// header) and the server will check the MD5 itself after the upload,
+// and this will return ObjectCorrupted on Close() if it is incorrect.
+//
+// If you don't want any error protection (not recommended) then set
+// checkHash to false and Hash to "".
+//
+// If contentType is set it will be used, otherwise one will be
+// guessed from objectName using mime.TypeByExtension
+func (c *Connection) ObjectCreate(container string, objectName string, checkHash bool, Hash string, contentType string, h Headers) (file *ObjectCreateFile, err error) {
+ extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h)
+ pipeReader, pipeWriter := io.Pipe()
+ file = &ObjectCreateFile{
+ hash: md5.New(),
+ checkHash: checkHash,
+ pipeReader: pipeReader,
+ pipeWriter: pipeWriter,
+ done: make(chan struct{}),
+ }
+ // Run the PUT in the background piping it data
+ go func() {
+ opts := RequestOpts{
+ Container: container,
+ ObjectName: objectName,
+ Operation: "PUT",
+ Headers: extraHeaders,
+ Body: pipeReader,
+ NoResponse: true,
+ ErrorMap: objectErrorMap,
+ }
+ file.resp, file.headers, file.err = c.storage(opts)
+ // Signal finished
+ pipeReader.Close()
+ close(file.done)
+ }()
+ return
+}
+
+func (c *Connection) ObjectSymlinkCreate(container string, symlink string, targetAccount string, targetContainer string, targetObject string, targetEtag string) (headers Headers, err error) {
+
+ EMPTY_MD5 := "d41d8cd98f00b204e9800998ecf8427e"
+ symHeaders := Headers{}
+ contents := bytes.NewBufferString("")
+ if targetAccount != "" {
+ symHeaders["X-Symlink-Target-Account"] = targetAccount
+ }
+ if targetEtag != "" {
+ symHeaders["X-Symlink-Target-Etag"] = targetEtag
+ }
+ symHeaders["X-Symlink-Target"] = fmt.Sprintf("%s/%s", targetContainer, targetObject)
+ _, err = c.ObjectPut(container, symlink, contents, true, EMPTY_MD5, "application/symlink", symHeaders)
+ return
+}
+
+func (c *Connection) objectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers, parameters url.Values) (headers Headers, err error) {
+ extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h)
+ hash := md5.New()
+ var body io.Reader = contents
+ if checkHash {
+ body = io.TeeReader(contents, hash)
+ }
+ _, headers, err = c.storage(RequestOpts{
+ Container: container,
+ ObjectName: objectName,
+ Operation: "PUT",
+ Headers: extraHeaders,
+ Body: body,
+ NoResponse: true,
+ ErrorMap: objectErrorMap,
+ Parameters: parameters,
+ })
+ if err != nil {
+ return
+ }
+ if checkHash {
+ receivedMd5 := strings.ToLower(headers["Etag"])
+ calculatedMd5 := fmt.Sprintf("%x", hash.Sum(nil))
+ if receivedMd5 != calculatedMd5 {
+ err = ObjectCorrupted
+ return
+ }
+ }
+ return
+}
+
+// ObjectPut creates or updates the path in the container from
+// contents. contents should be an open io.Reader which will have all
+// its contents read.
+//
+// This is a low level interface.
+//
+// If checkHash is True then it will calculate the MD5 Hash of the
+// file as it is being uploaded and check it against that returned
+// from the server. If it is wrong then it will return
+// ObjectCorrupted.
+//
+// If you know the MD5 hash of the object ahead of time then set the
+// Hash parameter and it will be sent to the server (as an Etag
+// header) and the server will check the MD5 itself after the upload,
+// and this will return ObjectCorrupted if it is incorrect.
+//
+// If you don't want any error protection (not recommended) then set
+// checkHash to false and Hash to "".
+//
+// If contentType is set it will be used, otherwise one will be
+// guessed from objectName using mime.TypeByExtension
+func (c *Connection) ObjectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers) (headers Headers, err error) {
+ return c.objectPut(container, objectName, contents, checkHash, Hash, contentType, h, nil)
+}
+
+// ObjectPutBytes creates an object from a []byte in a container.
+//
+// This is a simplified interface which checks the MD5.
+func (c *Connection) ObjectPutBytes(container string, objectName string, contents []byte, contentType string) (err error) {
+ buf := bytes.NewBuffer(contents)
+ h := Headers{"Content-Length": strconv.Itoa(len(contents))}
+ _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, h)
+ return
+}
+
+// ObjectPutString creates an object from a string in a container.
+//
+// This is a simplified interface which checks the MD5
+func (c *Connection) ObjectPutString(container string, objectName string, contents string, contentType string) (err error) {
+ buf := strings.NewReader(contents)
+ h := Headers{"Content-Length": strconv.Itoa(len(contents))}
+ _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, h)
+ return
+}
+
+// ObjectOpenFile represents a swift object open for reading
+type ObjectOpenFile struct {
+ connection *Connection // stored copy of Connection used in Open
+ container string // stored copy of container used in Open
+ objectName string // stored copy of objectName used in Open
+ headers Headers // stored copy of headers used in Open
+ resp *http.Response // http connection
+ body io.Reader // read data from this
+ checkHash bool // true if checking MD5
+ hash hash.Hash // currently accumulating MD5
+ bytes int64 // number of bytes read on this connection
+ eof bool // whether we have read end of file
+ pos int64 // current position when reading
+ lengthOk bool // whether length is valid
+ length int64 // length of the object if read
+ seeked bool // whether we have seeked this file or not
+ overSeeked bool // set if we have seeked to the end or beyond
+}
+
+// Read bytes from the object - see io.Reader
+func (file *ObjectOpenFile) Read(p []byte) (n int, err error) {
+ if file.overSeeked {
+ return 0, io.EOF
+ }
+ n, err = file.body.Read(p)
+ file.bytes += int64(n)
+ file.pos += int64(n)
+ if err == io.EOF {
+ file.eof = true
+ }
+ return
+}
+
+// Seek sets the offset for the next Read to offset, interpreted
+// according to whence: 0 means relative to the origin of the file, 1
+// means relative to the current offset, and 2 means relative to the
+// end. Seek returns the new offset and an Error, if any.
+//
+// Seek uses HTTP Range headers which, if the file pointer is moved,
+// will involve reopening the HTTP connection.
+//
+// Note that you can't seek to the end of a file or beyond; HTTP Range
+// requests don't support the file pointer being outside the data,
+// unlike os.File
+//
+// Seek(0, 1) will return the current file pointer.
+func (file *ObjectOpenFile) Seek(offset int64, whence int) (newPos int64, err error) {
+ file.overSeeked = false
+ switch whence {
+ case 0: // relative to start
+ newPos = offset
+ case 1: // relative to current
+ newPos = file.pos + offset
+ case 2: // relative to end
+ if !file.lengthOk {
+ return file.pos, newError(0, "Length of file unknown so can't seek from end")
+ }
+ newPos = file.length + offset
+ if offset >= 0 {
+ file.overSeeked = true
+ return
+ }
+ default:
+ panic("Unknown whence in ObjectOpenFile.Seek")
+ }
+ // If at correct position (quite likely), do nothing
+ if newPos == file.pos {
+ return
+ }
+ // Close the file...
+ file.seeked = true
+ err = file.Close()
+ if err != nil {
+ return
+ }
+ // ...and re-open with a Range header
+ if file.headers == nil {
+ file.headers = Headers{}
+ }
+ if newPos > 0 {
+ file.headers["Range"] = fmt.Sprintf("bytes=%d-", newPos)
+ } else {
+ delete(file.headers, "Range")
+ }
+ newFile, _, err := file.connection.ObjectOpen(file.container, file.objectName, false, file.headers)
+ if err != nil {
+ return
+ }
+ // Update the file
+ file.resp = newFile.resp
+ file.body = newFile.body
+ file.checkHash = false
+ file.pos = newPos
+ return
+}
+
+// Length gets the objects content length either from a cached copy or
+// from the server.
+func (file *ObjectOpenFile) Length() (int64, error) {
+ if !file.lengthOk {
+ info, _, err := file.connection.Object(file.container, file.objectName)
+ file.length = info.Bytes
+ file.lengthOk = (err == nil)
+ return file.length, err
+ }
+ return file.length, nil
+}
+
+// Close the object and checks the length and md5sum if it was
+// required and all the object was read
+func (file *ObjectOpenFile) Close() (err error) {
+ // Close the body at the end
+ defer checkClose(file.resp.Body, &err)
+
+ // If not end of file or seeked then can't check anything
+ if !file.eof || file.seeked {
+ return
+ }
+
+ // Check the MD5 sum if requested
+ if file.checkHash {
+ receivedMd5 := strings.ToLower(file.resp.Header.Get("Etag"))
+ calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil))
+ if receivedMd5 != calculatedMd5 {
+ err = ObjectCorrupted
+ return
+ }
+ }
+
+ // Check to see we read the correct number of bytes
+ if file.lengthOk && file.length != file.bytes {
+ err = ObjectCorrupted
+ return
+ }
+ return
+}
+
+// Check it satisfies the interfaces
+var _ io.ReadCloser = &ObjectOpenFile{}
+var _ io.Seeker = &ObjectOpenFile{}
+
+func (c *Connection) objectOpenBase(container string, objectName string, checkHash bool, h Headers, parameters url.Values) (file *ObjectOpenFile, headers Headers, err error) {
+ var resp *http.Response
+ opts := RequestOpts{
+ Container: container,
+ ObjectName: objectName,
+ Operation: "GET",
+ ErrorMap: objectErrorMap,
+ Headers: h,
+ Parameters: parameters,
+ }
+ resp, headers, err = c.storage(opts)
+ if err != nil {
+ return
+ }
+ // Can't check MD5 on an object with X-Object-Manifest or X-Static-Large-Object set
+ if checkHash && headers.IsLargeObject() {
+ // log.Printf("swift: turning off md5 checking on object with manifest %v", objectName)
+ checkHash = false
+ }
+ file = &ObjectOpenFile{
+ connection: c,
+ container: container,
+ objectName: objectName,
+ headers: h,
+ resp: resp,
+ checkHash: checkHash,
+ body: resp.Body,
+ }
+ if checkHash {
+ file.hash = md5.New()
+ file.body = io.TeeReader(resp.Body, file.hash)
+ }
+ // Read Content-Length
+ if resp.Header.Get("Content-Length") != "" {
+ file.length, err = getInt64FromHeader(resp, "Content-Length")
+ file.lengthOk = (err == nil)
+ }
+ return
+}
+
+func (c *Connection) objectOpen(container string, objectName string, checkHash bool, h Headers, parameters url.Values) (file *ObjectOpenFile, headers Headers, err error) {
+ err = withLORetry(0, func() (Headers, int64, error) {
+ file, headers, err = c.objectOpenBase(container, objectName, checkHash, h, parameters)
+ if err != nil {
+ return headers, 0, err
+ }
+ return headers, file.length, nil
+ })
+ return
+}
+
+// ObjectOpen returns an ObjectOpenFile for reading the contents of
+// the object. This satisfies the io.ReadCloser and the io.Seeker
+// interfaces.
+//
+// You must call Close() on contents when finished
+//
+// Returns the headers of the response.
+//
+// If checkHash is true then it will calculate the md5sum of the file
+// as it is being received and check it against that returned from the
+// server. If it is wrong then it will return ObjectCorrupted. It
+// will also check the length returned. No checking will be done if
+// you don't read all the contents.
+//
+// Note that objects with X-Object-Manifest or X-Static-Large-Object
+// set won't ever have their md5sum's checked as the md5sum reported
+// on the object is actually the md5sum of the md5sums of the
+// parts. This isn't very helpful to detect a corrupted download as
+// the size of the parts aren't known without doing more operations.
+// If you want to ensure integrity of an object with a manifest then
+// you will need to download everything in the manifest separately.
+//
+// headers["Content-Type"] will give the content type if desired.
+func (c *Connection) ObjectOpen(container string, objectName string, checkHash bool, h Headers) (file *ObjectOpenFile, headers Headers, err error) {
+ return c.objectOpen(container, objectName, checkHash, h, nil)
+}
+
+// ObjectGet gets the object into the io.Writer contents.
+//
+// Returns the headers of the response.
+//
+// If checkHash is true then it will calculate the md5sum of the file
+// as it is being received and check it against that returned from the
+// server. If it is wrong then it will return ObjectCorrupted.
+//
+// headers["Content-Type"] will give the content type if desired.
+func (c *Connection) ObjectGet(container string, objectName string, contents io.Writer, checkHash bool, h Headers) (headers Headers, err error) {
+ file, headers, err := c.ObjectOpen(container, objectName, checkHash, h)
+ if err != nil {
+ return
+ }
+ defer checkClose(file, &err)
+ _, err = io.Copy(contents, file)
+ return
+}
+
+// ObjectGetBytes returns an object as a []byte.
+//
+// This is a simplified interface which checks the MD5
+func (c *Connection) ObjectGetBytes(container string, objectName string) (contents []byte, err error) {
+ var buf bytes.Buffer
+ _, err = c.ObjectGet(container, objectName, &buf, true, nil)
+ contents = buf.Bytes()
+ return
+}
+
+// ObjectGetString returns an object as a string.
+//
+// This is a simplified interface which checks the MD5
+func (c *Connection) ObjectGetString(container string, objectName string) (contents string, err error) {
+ var buf bytes.Buffer
+ _, err = c.ObjectGet(container, objectName, &buf, true, nil)
+ contents = buf.String()
+ return
+}
+
+// ObjectDelete deletes the object.
+//
+// May return ObjectNotFound if the object isn't found
+func (c *Connection) ObjectDelete(container string, objectName string) error {
+ _, _, err := c.storage(RequestOpts{
+ Container: container,
+ ObjectName: objectName,
+ Operation: "DELETE",
+ ErrorMap: objectErrorMap,
+ })
+ return err
+}
+
+// ObjectTempUrl returns a temporary URL for an object
+func (c *Connection) ObjectTempUrl(container string, objectName string, secretKey string, method string, expires time.Time) string {
+ mac := hmac.New(sha1.New, []byte(secretKey))
+ prefix, _ := url.Parse(c.StorageUrl)
+ body := fmt.Sprintf("%s\n%d\n%s/%s/%s", method, expires.Unix(), prefix.Path, container, objectName)
+ mac.Write([]byte(body))
+ sig := hex.EncodeToString(mac.Sum(nil))
+ return fmt.Sprintf("%s/%s/%s?temp_url_sig=%s&temp_url_expires=%d", c.StorageUrl, container, objectName, sig, expires.Unix())
+}
+
+// parseResponseStatus parses string like "200 OK" and returns Error.
+//
+// For status codes beween 200 and 299, this returns nil.
+func parseResponseStatus(resp string, errorMap errorMap) error {
+ code := 0
+ reason := resp
+ t := strings.SplitN(resp, " ", 2)
+ if len(t) == 2 {
+ ncode, err := strconv.Atoi(t[0])
+ if err == nil {
+ code = ncode
+ reason = t[1]
+ }
+ }
+ if errorMap != nil {
+ if err, ok := errorMap[code]; ok {
+ return err
+ }
+ }
+ if 200 <= code && code <= 299 {
+ return nil
+ }
+ return newError(code, reason)
+}
+
+// BulkDeleteResult stores results of BulkDelete().
+//
+// Individual errors may (or may not) be returned by Errors.
+// Errors is a map whose keys are a full path of where the object was
+// to be deleted, and whose values are Error objects. A full path of
+// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH".
+type BulkDeleteResult struct {
+ NumberNotFound int64 // # of objects not found.
+ NumberDeleted int64 // # of deleted objects.
+ Errors map[string]error // Mapping between object name and an error.
+ Headers Headers // Response HTTP headers.
+}
+
+func (c *Connection) doBulkDelete(objects []string) (result BulkDeleteResult, err error) {
+ var buffer bytes.Buffer
+ for _, s := range objects {
+ u := url.URL{Path: s}
+ buffer.WriteString(u.String() + "\n")
+ }
+ resp, headers, err := c.storage(RequestOpts{
+ Operation: "DELETE",
+ Parameters: url.Values{"bulk-delete": []string{"1"}},
+ Headers: Headers{
+ "Accept": "application/json",
+ "Content-Type": "text/plain",
+ "Content-Length": strconv.Itoa(buffer.Len()),
+ },
+ ErrorMap: ContainerErrorMap,
+ Body: &buffer,
+ })
+ if err != nil {
+ return
+ }
+ var jsonResult struct {
+ NotFound int64 `json:"Number Not Found"`
+ Status string `json:"Response Status"`
+ Errors [][]string
+ Deleted int64 `json:"Number Deleted"`
+ }
+ err = readJson(resp, &jsonResult)
+ if err != nil {
+ return
+ }
+
+ err = parseResponseStatus(jsonResult.Status, objectErrorMap)
+ result.NumberNotFound = jsonResult.NotFound
+ result.NumberDeleted = jsonResult.Deleted
+ result.Headers = headers
+ el := make(map[string]error, len(jsonResult.Errors))
+ for _, t := range jsonResult.Errors {
+ if len(t) != 2 {
+ continue
+ }
+ el[t[0]] = parseResponseStatus(t[1], objectErrorMap)
+ }
+ result.Errors = el
+ return
+}
+
+// BulkDelete deletes multiple objectNames from container in one operation.
+//
+// Some servers may not accept bulk-delete requests since bulk-delete is
+// an optional feature of swift - these will return the Forbidden error.
+//
+// See also:
+// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html
+// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html
+func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) {
+ if len(objectNames) == 0 {
+ result.Errors = make(map[string]error)
+ return
+ }
+ fullPaths := make([]string, len(objectNames))
+ for i, name := range objectNames {
+ fullPaths[i] = fmt.Sprintf("/%s/%s", container, name)
+ }
+ return c.doBulkDelete(fullPaths)
+}
+
+// BulkUploadResult stores results of BulkUpload().
+//
+// Individual errors may (or may not) be returned by Errors.
+// Errors is a map whose keys are a full path of where an object was
+// to be created, and whose values are Error objects. A full path of
+// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH".
+type BulkUploadResult struct {
+ NumberCreated int64 // # of created objects.
+ Errors map[string]error // Mapping between object name and an error.
+ Headers Headers // Response HTTP headers.
+}
+
+// BulkUpload uploads multiple files in one operation.
+//
+// uploadPath can be empty, a container name, or a pseudo-directory
+// within a container. If uploadPath is empty, new containers may be
+// automatically created.
+//
+// Files are read from dataStream. The format of the stream is specified
+// by the format parameter. Available formats are:
+// * UploadTar - Plain tar stream.
+// * UploadTarGzip - Gzip compressed tar stream.
+// * UploadTarBzip2 - Bzip2 compressed tar stream.
+//
+// Some servers may not accept bulk-upload requests since bulk-upload is
+// an optional feature of swift - these will return the Forbidden error.
+//
+// See also:
+// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-extract-archive.html
+// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Extract_Archive-d1e2338.html
+func (c *Connection) BulkUpload(uploadPath string, dataStream io.Reader, format string, h Headers) (result BulkUploadResult, err error) {
+ extraHeaders := Headers{"Accept": "application/json"}
+ for key, value := range h {
+ extraHeaders[key] = value
+ }
+ // The following code abuses Container parameter intentionally.
+ // The best fix might be to rename Container to UploadPath.
+ resp, headers, err := c.storage(RequestOpts{
+ Container: uploadPath,
+ Operation: "PUT",
+ Parameters: url.Values{"extract-archive": []string{format}},
+ Headers: extraHeaders,
+ ErrorMap: ContainerErrorMap,
+ Body: dataStream,
+ })
+ if err != nil {
+ return
+ }
+ // Detect old servers which don't support this feature
+ if headers["Content-Type"] != "application/json" {
+ err = Forbidden
+ return
+ }
+ var jsonResult struct {
+ Created int64 `json:"Number Files Created"`
+ Status string `json:"Response Status"`
+ Errors [][]string
+ }
+ err = readJson(resp, &jsonResult)
+ if err != nil {
+ return
+ }
+
+ err = parseResponseStatus(jsonResult.Status, objectErrorMap)
+ result.NumberCreated = jsonResult.Created
+ result.Headers = headers
+ el := make(map[string]error, len(jsonResult.Errors))
+ for _, t := range jsonResult.Errors {
+ if len(t) != 2 {
+ continue
+ }
+ el[t[0]] = parseResponseStatus(t[1], objectErrorMap)
+ }
+ result.Errors = el
+ return
+}
+
+// Object returns info about a single object including any metadata in the header.
+//
+// May return ObjectNotFound.
+//
+// Use headers.ObjectMetadata() to read the metadata in the Headers.
+func (c *Connection) Object(container string, objectName string) (info Object, headers Headers, err error) {
+ err = withLORetry(0, func() (Headers, int64, error) {
+ info, headers, err = c.objectBase(container, objectName)
+ if err != nil {
+ return headers, 0, err
+ }
+ return headers, info.Bytes, nil
+ })
+ return
+}
+
+func (c *Connection) objectBase(container string, objectName string) (info Object, headers Headers, err error) {
+ var resp *http.Response
+ resp, headers, err = c.storage(RequestOpts{
+ Container: container,
+ ObjectName: objectName,
+ Operation: "HEAD",
+ ErrorMap: objectErrorMap,
+ NoResponse: true,
+ })
+ if err != nil {
+ return
+ }
+ // Parse the headers into the struct
+ // HTTP/1.1 200 OK
+ // Date: Thu, 07 Jun 2010 20:59:39 GMT
+ // Server: Apache
+ // Last-Modified: Fri, 12 Jun 2010 13:40:18 GMT
+ // ETag: 8a964ee2a5e88be344f36c22562a6486
+ // Content-Length: 512000
+ // Content-Type: text/plain; charset=UTF-8
+ // X-Object-Meta-Meat: Bacon
+ // X-Object-Meta-Fruit: Bacon
+ // X-Object-Meta-Veggie: Bacon
+ // X-Object-Meta-Dairy: Bacon
+ info.Name = objectName
+ info.ContentType = resp.Header.Get("Content-Type")
+ if resp.Header.Get("Content-Length") != "" {
+ if info.Bytes, err = getInt64FromHeader(resp, "Content-Length"); err != nil {
+ return
+ }
+ }
+ // Currently ceph doesn't return a Last-Modified header for DLO manifests without any segments
+ // See ceph http://tracker.ceph.com/issues/15812
+ if resp.Header.Get("Last-Modified") != "" {
+ info.ServerLastModified = resp.Header.Get("Last-Modified")
+ if info.LastModified, err = time.Parse(http.TimeFormat, info.ServerLastModified); err != nil {
+ return
+ }
+ }
+
+ info.Hash = resp.Header.Get("Etag")
+ if resp.Header.Get("X-Object-Manifest") != "" {
+ info.ObjectType = DynamicLargeObjectType
+ } else if resp.Header.Get("X-Static-Large-Object") != "" {
+ info.ObjectType = StaticLargeObjectType
+ }
+
+ return
+}
+
+// ObjectUpdate adds, replaces or removes object metadata.
+//
+// Add or Update keys by mentioning them in the Metadata. Use
+// Metadata.ObjectHeaders and Headers.ObjectMetadata to convert your
+// Metadata to and from normal HTTP headers.
+//
+// This removes all metadata previously added to the object and
+// replaces it with that passed in so to delete keys, just don't
+// mention them the headers you pass in.
+//
+// Object metadata can only be read with Object() not with Objects().
+//
+// This can also be used to set headers not already assigned such as
+// X-Delete-At or X-Delete-After for expiring objects.
+//
+// You cannot use this to change any of the object's other headers
+// such as Content-Type, ETag, etc.
+//
+// Refer to copying an object when you need to update metadata or
+// other headers such as Content-Type or CORS headers.
+//
+// May return ObjectNotFound.
+func (c *Connection) ObjectUpdate(container string, objectName string, h Headers) error {
+ _, _, err := c.storage(RequestOpts{
+ Container: container,
+ ObjectName: objectName,
+ Operation: "POST",
+ ErrorMap: objectErrorMap,
+ NoResponse: true,
+ Headers: h,
+ })
+ return err
+}
+
+// urlPathEscape escapes URL path the in string using URL escaping rules
+//
+// This mimics url.PathEscape which only available from go 1.8
+func urlPathEscape(in string) string {
+ var u url.URL
+ u.Path = in
+ return u.String()
+}
+
+// ObjectCopy does a server side copy of an object to a new position
+//
+// All metadata is preserved. If metadata is set in the headers then
+// it overrides the old metadata on the copied object.
+//
+// The destination container must exist before the copy.
+//
+// You can use this to copy an object to itself - this is the only way
+// to update the content type of an object.
+func (c *Connection) ObjectCopy(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string, h Headers) (headers Headers, err error) {
+ // Meta stuff
+ extraHeaders := map[string]string{
+ "Destination": urlPathEscape(dstContainer + "/" + dstObjectName),
+ }
+ for key, value := range h {
+ extraHeaders[key] = value
+ }
+ _, headers, err = c.storage(RequestOpts{
+ Container: srcContainer,
+ ObjectName: srcObjectName,
+ Operation: "COPY",
+ ErrorMap: objectErrorMap,
+ NoResponse: true,
+ Headers: extraHeaders,
+ })
+ return
+}
+
+// ObjectMove does a server side move of an object to a new position
+//
+// This is a convenience method which calls ObjectCopy then ObjectDelete
+//
+// All metadata is preserved.
+//
+// The destination container must exist before the copy.
+func (c *Connection) ObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) (err error) {
+ _, err = c.ObjectCopy(srcContainer, srcObjectName, dstContainer, dstObjectName, nil)
+ if err != nil {
+ return
+ }
+ return c.ObjectDelete(srcContainer, srcObjectName)
+}
+
+// ObjectUpdateContentType updates the content type of an object
+//
+// This is a convenience method which calls ObjectCopy
+//
+// All other metadata is preserved.
+func (c *Connection) ObjectUpdateContentType(container string, objectName string, contentType string) (err error) {
+ h := Headers{"Content-Type": contentType}
+ _, err = c.ObjectCopy(container, objectName, container, objectName, h)
+ return
+}
+
+// ------------------------------------------------------------
+
+// VersionContainerCreate is a helper method for creating and enabling version controlled containers.
+//
+// It builds the current object container, the non-current object version container, and enables versioning.
+//
+// If the server doesn't support versioning then it will return
+// Forbidden however it will have created both the containers at that point.
+func (c *Connection) VersionContainerCreate(current, version string) error {
+ if err := c.ContainerCreate(version, nil); err != nil {
+ return err
+ }
+ if err := c.ContainerCreate(current, nil); err != nil {
+ return err
+ }
+ if err := c.VersionEnable(current, version); err != nil {
+ return err
+ }
+ return nil
+}
+
+// VersionEnable enables versioning on the current container with version as the tracking container.
+//
+// May return Forbidden if this isn't supported by the server
+func (c *Connection) VersionEnable(current, version string) error {
+ h := Headers{"X-Versions-Location": version}
+ if err := c.ContainerUpdate(current, h); err != nil {
+ return err
+ }
+ // Check to see if the header was set properly
+ _, headers, err := c.Container(current)
+ if err != nil {
+ return err
+ }
+ // If failed to set versions header, return Forbidden as the server doesn't support this
+ if headers["X-Versions-Location"] != version {
+ return Forbidden
+ }
+ return nil
+}
+
+// VersionDisable disables versioning on the current container.
+func (c *Connection) VersionDisable(current string) error {
+ h := Headers{"X-Versions-Location": ""}
+ if err := c.ContainerUpdate(current, h); err != nil {
+ return err
+ }
+ return nil
+}
+
+// VersionObjectList returns a list of older versions of the object.
+//
+// Objects are returned in the format /
+func (c *Connection) VersionObjectList(version, object string) ([]string, error) {
+ opts := &ObjectsOpts{
+ // <3-character zero-padded hexadecimal character length>