overdue
This commit is contained in:
28
.rclone_repo/vendor/github.com/ncw/go-acd/.gitignore
generated
vendored
Executable file
28
.rclone_repo/vendor/github.com/ncw/go-acd/.gitignore
generated
vendored
Executable file
@@ -0,0 +1,28 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
# IDE artifacts
|
||||
*.iml
|
||||
.idea/
|
||||
22
.rclone_repo/vendor/github.com/ncw/go-acd/.travis.yml
generated
vendored
Executable file
22
.rclone_repo/vendor/github.com/ncw/go-acd/.travis.yml
generated
vendored
Executable file
@@ -0,0 +1,22 @@
|
||||
# enable container-based infrastructure by setting sudo to false
|
||||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
install:
|
||||
- wget -qO- https://raw.githubusercontent.com/pote/gpm/v1.3.2/bin/gpm | bash
|
||||
|
||||
script:
|
||||
- make test testrace
|
||||
6
.rclone_repo/vendor/github.com/ncw/go-acd/Godeps
generated
vendored
Executable file
6
.rclone_repo/vendor/github.com/ncw/go-acd/Godeps
generated
vendored
Executable file
@@ -0,0 +1,6 @@
|
||||
# Dependencies can by installed via [gpm](https://github.com/pote/gpm)
|
||||
|
||||
github.com/google/go-querystring/query
|
||||
|
||||
# for tests
|
||||
github.com/stretchr/testify
|
||||
14
.rclone_repo/vendor/github.com/ncw/go-acd/LICENSE
generated
vendored
Executable file
14
.rclone_repo/vendor/github.com/ncw/go-acd/LICENSE
generated
vendored
Executable file
@@ -0,0 +1,14 @@
|
||||
Copyright (c) 2015, Serge Gebhardt <>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
55
.rclone_repo/vendor/github.com/ncw/go-acd/Makefile
generated
vendored
Executable file
55
.rclone_repo/vendor/github.com/ncw/go-acd/Makefile
generated
vendored
Executable file
@@ -0,0 +1,55 @@
|
||||
TEST?=./...
|
||||
VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr
|
||||
|
||||
default: test
|
||||
|
||||
# test runs the unit tests and vets the code
|
||||
test:
|
||||
ACD_ACC= go test $(TEST) $(TESTARGS) -timeout=30s -parallel=4
|
||||
@$(MAKE) fmt
|
||||
@$(MAKE) vet
|
||||
|
||||
# testacc runs acceptance tests
|
||||
testacc:
|
||||
@if [ "$(TEST)" = "./..." ]; then \
|
||||
echo "ERROR: Set TEST to a specific package"; \
|
||||
exit 1; \
|
||||
fi
|
||||
ACD_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 45m
|
||||
|
||||
# testrace runs the race checker
|
||||
testrace:
|
||||
ACD_ACC= go test -race $(TEST) $(TESTARGS)
|
||||
|
||||
# updatedeps installs all the dependencies needed to run
|
||||
# and build
|
||||
updatedeps:
|
||||
@gpm
|
||||
|
||||
cover:
|
||||
@go tool cover 2>/dev/null; if [ $$? -eq 3 ]; then \
|
||||
go get -u golang.org/x/tools/cmd/cover; \
|
||||
fi
|
||||
go test $(TEST) -coverprofile=coverage.out
|
||||
go tool cover -html=coverage.out
|
||||
rm coverage.out
|
||||
|
||||
# fmt formats the Go source code
|
||||
fmt:
|
||||
@go list ./... \
|
||||
| xargs go fmt
|
||||
|
||||
# vet runs the Go source code static analysis tool `vet` to find
|
||||
# any common errors.
|
||||
vet:
|
||||
@go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \
|
||||
go get golang.org/x/tools/cmd/vet; \
|
||||
fi
|
||||
@go list -f '{{.Dir}}' ./... \
|
||||
| xargs go tool vet ; if [ $$? -eq 1 ]; then \
|
||||
echo ""; \
|
||||
echo "Vet found suspicious constructs. Please check the reported constructs"; \
|
||||
echo "and fix them if necessary before submitting the code for reviewal."; \
|
||||
fi
|
||||
|
||||
.PHONY: default test vet
|
||||
9
.rclone_repo/vendor/github.com/ncw/go-acd/README.md
generated
vendored
Executable file
9
.rclone_repo/vendor/github.com/ncw/go-acd/README.md
generated
vendored
Executable file
@@ -0,0 +1,9 @@
|
||||
# go-acd [](https://travis-ci.org/sgeb/go-acd)
|
||||
|
||||
Go library for accessing the Amazon Cloud Drive.
|
||||
|
||||
This library is the basis for [`acdcli`](https://github.com/sgeb/acdcli).
|
||||
|
||||
Still work in progress. Focusing on read-only operations at first. Refer to the
|
||||
[milestones](https://github.com/sgeb/go-acd/milestones) and
|
||||
[issues](https://github.com/sgeb/go-acd/issues) for planned features.
|
||||
142
.rclone_repo/vendor/github.com/ncw/go-acd/account.go
generated
vendored
Executable file
142
.rclone_repo/vendor/github.com/ncw/go-acd/account.go
generated
vendored
Executable file
@@ -0,0 +1,142 @@
|
||||
// Copyright (c) 2015 Serge Gebhardt. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by the ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acd
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AccountService provides access to the account related functions
|
||||
// in the Amazon Cloud Drive API.
|
||||
//
|
||||
// See: https://developer.amazon.com/public/apis/experience/cloud-drive/content/account
|
||||
type AccountService struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// AccountEndpoints represents information about the current customer's endpoints
|
||||
type AccountEndpoints struct {
|
||||
CustomerExists bool `json:"customerExists"`
|
||||
ContentURL string `json:"contentUrl"`
|
||||
MetadataURL string `json:"metadataUrl"`
|
||||
}
|
||||
|
||||
// GetEndpoints retrives the current endpoints for this customer
|
||||
//
|
||||
// It also updates the endpoints in the client
|
||||
func (s *AccountService) GetEndpoints() (*AccountEndpoints, *http.Response, error) {
|
||||
req, err := s.client.NewMetadataRequest("GET", "account/endpoint", nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
endpoints := &AccountEndpoints{}
|
||||
resp, err := s.client.Do(req, endpoints)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
// Update the client endpoints
|
||||
if endpoints.MetadataURL != "" {
|
||||
u, err := url.Parse(endpoints.MetadataURL)
|
||||
if err == nil {
|
||||
s.client.MetadataURL = u
|
||||
}
|
||||
}
|
||||
if endpoints.ContentURL != "" {
|
||||
u, err := url.Parse(endpoints.ContentURL)
|
||||
if err == nil {
|
||||
s.client.ContentURL = u
|
||||
}
|
||||
}
|
||||
|
||||
return endpoints, resp, err
|
||||
}
|
||||
|
||||
// AccountInfo represents information about an Amazon Cloud Drive account.
|
||||
type AccountInfo struct {
|
||||
TermsOfUse *string `json:"termsOfUse"`
|
||||
Status *string `json:"status"`
|
||||
}
|
||||
|
||||
// GetInfo provides information about the current user account like
|
||||
// the status and the accepted “Terms Of Use”.
|
||||
func (s *AccountService) GetInfo() (*AccountInfo, *http.Response, error) {
|
||||
req, err := s.client.NewMetadataRequest("GET", "account/info", nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
accountInfo := &AccountInfo{}
|
||||
resp, err := s.client.Do(req, accountInfo)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return accountInfo, resp, err
|
||||
}
|
||||
|
||||
// AccountQuota represents information about the account quotas.
|
||||
type AccountQuota struct {
|
||||
Quota *uint64 `json:"quota"`
|
||||
LastCalculated *time.Time `json:"lastCalculated"`
|
||||
Available *uint64 `json:"available"`
|
||||
}
|
||||
|
||||
// GetQuota gets account quota and storage availability information.
|
||||
func (s *AccountService) GetQuota() (*AccountQuota, *http.Response, error) {
|
||||
req, err := s.client.NewMetadataRequest("GET", "account/quota", nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
accountQuota := &AccountQuota{}
|
||||
resp, err := s.client.Do(req, accountQuota)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return accountQuota, resp, err
|
||||
}
|
||||
|
||||
// AccountUsage represents information about the account usage.
|
||||
type AccountUsage struct {
|
||||
LastCalculated *time.Time `json:"lastCalculated"`
|
||||
Other *CategoryUsage `json:"other"`
|
||||
Doc *CategoryUsage `json:"doc"`
|
||||
Photo *CategoryUsage `json:"photo"`
|
||||
Video *CategoryUsage `json:"video"`
|
||||
}
|
||||
|
||||
// CategoryUsage defines Total and Billable UsageNumbers
|
||||
type CategoryUsage struct {
|
||||
Total *UsageNumbers `json:"total"`
|
||||
Billable *UsageNumbers `json:"billable"`
|
||||
}
|
||||
|
||||
// UsageNumbers defines Bytes and Count for a metered count
|
||||
type UsageNumbers struct {
|
||||
Bytes *uint64 `json:"bytes"`
|
||||
Count *uint64 `json:"count"`
|
||||
}
|
||||
|
||||
// GetUsage gets Account Usage information broken down by content category.
|
||||
func (s *AccountService) GetUsage() (*AccountUsage, *http.Response, error) {
|
||||
req, err := s.client.NewMetadataRequest("GET", "account/usage", nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
accountUsage := &AccountUsage{}
|
||||
resp, err := s.client.Do(req, accountUsage)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return accountUsage, resp, err
|
||||
}
|
||||
85
.rclone_repo/vendor/github.com/ncw/go-acd/changes.go
generated
vendored
Executable file
85
.rclone_repo/vendor/github.com/ncw/go-acd/changes.go
generated
vendored
Executable file
@@ -0,0 +1,85 @@
|
||||
package acd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ChangesService provides access to incemental changes in the Amazon Cloud Drive API.
|
||||
//
|
||||
// See: https://developer.amazon.com/public/apis/experience/cloud-drive/content/changes
|
||||
type ChangesService struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// A ChangeSet is collection of node changes as received from the Changes API
|
||||
type ChangeSet struct {
|
||||
Checkpoint string `json:"checkpoint"`
|
||||
Nodes []*Node `json:"nodes"`
|
||||
Reset bool `json:"reset"`
|
||||
StatusCode int `json:"statusCode"`
|
||||
End bool `json:"end"`
|
||||
}
|
||||
|
||||
// ChangesOptions contains all possible arguments for the Changes API
|
||||
type ChangesOptions struct {
|
||||
Checkpoint string `json:"checkpoint,omitempty"`
|
||||
ChunkSize int `json:"chunkSize,omitempty"`
|
||||
MaxNodes int `json:"maxNodes,omitempty"`
|
||||
IncludePurged bool `json:"includePurged,omitempty,string"`
|
||||
}
|
||||
|
||||
// GetChanges returns all the changes since opts.Checkpoint
|
||||
func (s *ChangesService) GetChanges(opts *ChangesOptions) ([]*ChangeSet, *http.Response, error) {
|
||||
var changeSets []*ChangeSet
|
||||
resp, err := s.GetChangesFunc(opts, func(cs *ChangeSet, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
changeSets = append(changeSets, cs)
|
||||
return nil
|
||||
})
|
||||
return changeSets, resp, err
|
||||
}
|
||||
|
||||
// GetChangesChan gets all the changes since opts.Checkpoint sending each ChangeSet to the channel.
|
||||
// The provided channel is closed before returning
|
||||
func (s *ChangesService) GetChangesChan(opts *ChangesOptions, ch chan<- *ChangeSet) (*http.Response, error) {
|
||||
defer close(ch)
|
||||
|
||||
return s.GetChangesFunc(opts, func(cs *ChangeSet, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ch <- cs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetChangesFunc gets all the changes since opts.Checkpoint and calls f with the ChangeSet or the error received.
|
||||
// If f returns a non nil value, GetChangesFunc exits and returns the given error.
|
||||
func (s *ChangesService) GetChangesFunc(opts *ChangesOptions, f func(*ChangeSet, error) error) (*http.Response, error) {
|
||||
req, err := s.client.NewMetadataRequest("POST", "changes", opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := s.client.Do(req, nil)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
for {
|
||||
changeSet := &ChangeSet{}
|
||||
err := decoder.Decode(&changeSet)
|
||||
if err == io.EOF {
|
||||
return resp, nil
|
||||
}
|
||||
err = f(changeSet, err)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
}
|
||||
195
.rclone_repo/vendor/github.com/ncw/go-acd/client.go
generated
vendored
Executable file
195
.rclone_repo/vendor/github.com/ncw/go-acd/client.go
generated
vendored
Executable file
@@ -0,0 +1,195 @@
|
||||
// Copyright (c) 2015 Serge Gebhardt. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by the ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// LibraryVersion is the current version of this library
|
||||
LibraryVersion = "0.1.0"
|
||||
defaultMetadataURL = "https://drive.amazonaws.com/drive/v1/"
|
||||
defaultContentURL = "https://content-na.drive.amazonaws.com/cdproxy/"
|
||||
userAgent = "go-acd/" + LibraryVersion
|
||||
)
|
||||
|
||||
// A Client manages communication with the Amazon Cloud Drive API.
|
||||
type Client struct {
|
||||
// HTTP client used to communicate with the API.
|
||||
httpClient *http.Client
|
||||
|
||||
// Metadata URL for API requests. Defaults to the public Amazon Cloud Drive API.
|
||||
// MetadataURL should always be specified with a trailing slash.
|
||||
MetadataURL *url.URL
|
||||
|
||||
// Content URL for API requests. Defaults to the public Amazon Cloud Drive API.
|
||||
// ContentURL should always be specified with a trailing slash.
|
||||
ContentURL *url.URL
|
||||
|
||||
// User agent used when communicating with the API.
|
||||
UserAgent string
|
||||
|
||||
// Services used for talking to different parts of the API.
|
||||
Account *AccountService
|
||||
Nodes *NodesService
|
||||
Changes *ChangesService
|
||||
}
|
||||
|
||||
// NewClient returns a new Amazon Cloud Drive API client. If a nil httpClient is
|
||||
// provided, http.DefaultClient will be used. To use API methods which require
|
||||
// authentication, provide an http.Client that will perform the authentication
|
||||
// for you (such as that provided by the golang.org/x/oauth2 library).
|
||||
func NewClient(httpClient *http.Client) *Client {
|
||||
if httpClient == nil {
|
||||
httpClient = http.DefaultClient
|
||||
}
|
||||
metadataURL, _ := url.Parse(defaultMetadataURL)
|
||||
contentURL, _ := url.Parse(defaultContentURL)
|
||||
|
||||
c := &Client{
|
||||
httpClient: httpClient,
|
||||
MetadataURL: metadataURL,
|
||||
ContentURL: contentURL,
|
||||
UserAgent: userAgent,
|
||||
}
|
||||
|
||||
c.Account = &AccountService{client: c}
|
||||
c.Nodes = &NodesService{client: c}
|
||||
c.Changes = &ChangesService{client: c}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// NewMetadataRequest creates an API request for metadata. A relative URL can be
|
||||
// provided in urlStr, in which case it is resolved relative to the MetadataURL
|
||||
// of the Client. Relative URLs should always be specified without a preceding
|
||||
// slash. If specified, the value pointed to by body is JSON encoded and included
|
||||
// as the request body.
|
||||
func (c *Client) NewMetadataRequest(method, urlStr string, body interface{}) (*http.Request, error) {
|
||||
return c.newRequest(c.MetadataURL, method, urlStr, body)
|
||||
}
|
||||
|
||||
// NewContentRequest creates an API request for content. A relative URL can be
|
||||
// provided in urlStr, in which case it is resolved relative to the ContentURL
|
||||
// of the Client. Relative URLs should always be specified without a preceding
|
||||
// slash. If specified, the value pointed to by body is JSON encoded and included
|
||||
// as the request body.
|
||||
func (c *Client) NewContentRequest(method, urlStr string, body interface{}) (*http.Request, error) {
|
||||
return c.newRequest(c.ContentURL, method, urlStr, body)
|
||||
}
|
||||
|
||||
// newRequest creates an API request. A relative URL can be provided in urlStr,
|
||||
// in which case it is resolved relative to base URL.
|
||||
// Relative URLs should always be specified without a preceding slash. If
|
||||
// specified, the value pointed to by body is JSON encoded and included as the
|
||||
// request body.
|
||||
func (c *Client) newRequest(base *url.URL, method, urlStr string, body interface{}) (*http.Request, error) {
|
||||
rel, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u := base.ResolveReference(rel)
|
||||
|
||||
bodyReader, ok := body.(io.Reader)
|
||||
if !ok && body != nil {
|
||||
buf := &bytes.Buffer{}
|
||||
err := json.NewEncoder(buf).Encode(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bodyReader = buf
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, u.String(), bodyReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// req.Header.Add("Accept", mediaTypeV3)
|
||||
if c.UserAgent != "" {
|
||||
req.Header.Add("User-Agent", c.UserAgent)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Do sends an API request and returns the API response. The API response is
|
||||
// JSON decoded and stored in the value pointed to by v, or returned as an
|
||||
// error if an API error has occurred. If v implements the io.Writer
|
||||
// interface, the raw response body will be written to v, without attempting to
|
||||
// first decode it. If v is nil then the resp.Body won't be closed - this is
|
||||
// your responsibility.
|
||||
//
|
||||
func (c *Client) Do(req *http.Request, v interface{}) (*http.Response, error) {
|
||||
//buf, _ := httputil.DumpRequest(req, true)
|
||||
//buf, _ := httputil.DumpRequest(req, false)
|
||||
//log.Printf("req = %s", string(buf))
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
//buf, _ = httputil.DumpResponse(resp, true)
|
||||
//buf, _ = httputil.DumpResponse(resp, false)
|
||||
//log.Printf("resp = %s", string(buf))
|
||||
|
||||
err = CheckResponse(resp)
|
||||
if err != nil {
|
||||
// even though there was an error, we still return the response
|
||||
// in case the caller wants to inspect it further. We do close the
|
||||
// Body though
|
||||
if v == nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
if v != nil {
|
||||
if w, ok := v.(io.Writer); ok {
|
||||
io.Copy(w, resp.Body)
|
||||
} else {
|
||||
err = json.NewDecoder(resp.Body).Decode(v)
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// CheckResponse checks the API response for errors, and returns them if
|
||||
// present. A response is considered an error if it has a status code outside
|
||||
// the 200 range.
|
||||
func CheckResponse(r *http.Response) error {
|
||||
c := r.StatusCode
|
||||
if 200 <= c && c <= 299 {
|
||||
return nil
|
||||
}
|
||||
|
||||
errBody := ""
|
||||
if data, err := ioutil.ReadAll(r.Body); err == nil {
|
||||
errBody = strings.TrimSpace(string(data))
|
||||
}
|
||||
|
||||
errMsg := fmt.Sprintf("HTTP code %v: %q: ", c, r.Status)
|
||||
if errBody == "" {
|
||||
errMsg += "no response body"
|
||||
} else {
|
||||
errMsg += fmt.Sprintf("response body: %q", errBody)
|
||||
}
|
||||
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
766
.rclone_repo/vendor/github.com/ncw/go-acd/nodes.go
generated
vendored
Executable file
766
.rclone_repo/vendor/github.com/ncw/go-acd/nodes.go
generated
vendored
Executable file
@@ -0,0 +1,766 @@
|
||||
// Copyright (c) 2015 Serge Gebhardt. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by the ISC
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
|
||||
"github.com/google/go-querystring/query"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorNodeNotFound is returned from GetFile, GetFolder, GetNode
|
||||
ErrorNodeNotFound = errors.New("Node not found")
|
||||
)
|
||||
|
||||
// NodesService provides access to the nodes in the Amazon Cloud Drive API.
|
||||
//
|
||||
// See: https://developer.amazon.com/public/apis/experience/cloud-drive/content/nodes
|
||||
type NodesService struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// GetRoot gets the root folder of the Amazon Cloud Drive.
|
||||
func (s *NodesService) GetRoot() (*Folder, *http.Response, error) {
|
||||
opts := &NodeListOptions{Filters: "kind:FOLDER AND isRoot:true"}
|
||||
|
||||
roots, resp, err := s.GetNodes(opts)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
if len(roots) < 1 {
|
||||
return nil, resp, errors.New("No root found")
|
||||
}
|
||||
|
||||
return &Folder{roots[0]}, resp, nil
|
||||
}
|
||||
|
||||
// GetAllNodes gets the list of all nodes.
|
||||
func (s *NodesService) GetAllNodes(opts *NodeListOptions) ([]*Node, *http.Response, error) {
|
||||
return s.listAllNodes("nodes", opts)
|
||||
}
|
||||
|
||||
// GetNodes gets a list of nodes, up until the limit (either default or the one set in opts).
|
||||
func (s *NodesService) GetNodes(opts *NodeListOptions) ([]*Node, *http.Response, error) {
|
||||
nodes, res, err := s.listNodes("nodes", opts)
|
||||
return nodes, res, err
|
||||
}
|
||||
|
||||
func (s *NodesService) listAllNodes(url string, opts *NodeListOptions) ([]*Node, *http.Response, error) {
|
||||
// Need opts to maintain state (NodeListOptions.reachedEnd)
|
||||
if opts == nil {
|
||||
opts = &NodeListOptions{}
|
||||
}
|
||||
|
||||
result := make([]*Node, 0, 200)
|
||||
|
||||
for {
|
||||
nodes, resp, err := s.listNodes(url, opts)
|
||||
if err != nil {
|
||||
return result, resp, err
|
||||
}
|
||||
if nodes == nil {
|
||||
break
|
||||
}
|
||||
|
||||
result = append(result, nodes...)
|
||||
}
|
||||
|
||||
return result, nil, nil
|
||||
}
|
||||
|
||||
func (s *NodesService) listNodes(url string, opts *NodeListOptions) ([]*Node, *http.Response, error) {
|
||||
if opts != nil && opts.reachedEnd {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
url, err := addOptions(url, opts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := s.client.NewMetadataRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
nodeList := &nodeListInternal{}
|
||||
resp, err := s.client.Do(req, nodeList)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
if nodeList.NextToken != nil {
|
||||
opts.StartToken = *nodeList.NextToken
|
||||
} else {
|
||||
opts.reachedEnd = true
|
||||
}
|
||||
}
|
||||
|
||||
nodes := nodeList.Data
|
||||
for _, node := range nodes {
|
||||
node.service = s
|
||||
}
|
||||
|
||||
return nodes, resp, nil
|
||||
}
|
||||
|
||||
type nodeListInternal struct {
|
||||
Count *uint64 `json:"count"`
|
||||
NextToken *string `json:"nextToken"`
|
||||
Data []*Node `json:"data"`
|
||||
}
|
||||
|
||||
// Node represents a digital asset on the Amazon Cloud Drive, including files
|
||||
// and folders, in a parent-child relationship. A node contains only metadata
|
||||
// (e.g. folder) or it contains metadata and content (e.g. file).
|
||||
type Node struct {
|
||||
Id *string `json:"id"`
|
||||
Name *string `json:"name"`
|
||||
Kind *string `json:"kind"`
|
||||
ModifiedDate *string `json:"modifiedDate"`
|
||||
Parents []string `json:"parents"`
|
||||
Status *string `json:"status"`
|
||||
ContentProperties *struct {
|
||||
Size *uint64 `json:"size"`
|
||||
Md5 *string `json:"md5"`
|
||||
ContentType *string `json:"contentType"`
|
||||
} `json:"contentProperties"`
|
||||
TempURL string `json:"tempLink"`
|
||||
|
||||
service *NodesService
|
||||
}
|
||||
|
||||
// NodeFromId constructs a skeleton Node from an Id and a NodeService
|
||||
func NodeFromId(ID string, service *NodesService) *Node {
|
||||
return &Node{
|
||||
Id: &ID,
|
||||
service: service,
|
||||
}
|
||||
}
|
||||
|
||||
// IsFile returns whether the node represents a file.
|
||||
func (n *Node) IsFile() bool {
|
||||
return n.Kind != nil && *n.Kind == "FILE"
|
||||
}
|
||||
|
||||
// IsFolder returns whether the node represents a folder.
|
||||
func (n *Node) IsFolder() bool {
|
||||
return n.Kind != nil && *n.Kind == "FOLDER"
|
||||
}
|
||||
|
||||
// Typed returns the Node typed as either File or Folder.
|
||||
func (n *Node) Typed() interface{} {
|
||||
if n.IsFile() {
|
||||
return &File{n}
|
||||
}
|
||||
|
||||
if n.IsFolder() {
|
||||
return &Folder{n}
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// GetTempURL sets the TempURL for the node passed in if it isn't already set
|
||||
func (n *Node) GetTempURL() (*http.Response, error) {
|
||||
if n.TempURL != "" {
|
||||
return nil, nil
|
||||
}
|
||||
url := fmt.Sprintf("nodes/%s?tempLink=true", *n.Id)
|
||||
req, err := n.service.client.NewMetadataRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node := &Node{}
|
||||
resp, err := n.service.client.Do(req, node)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
if node.TempURL == "" {
|
||||
return resp, fmt.Errorf("Couldn't read TempURL")
|
||||
}
|
||||
|
||||
// Set the TempURL in the node
|
||||
n.TempURL = node.TempURL
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetMetadata return a pretty-printed JSON string of the node's metadata
|
||||
func (n *Node) GetMetadata() (string, error) {
|
||||
url := fmt.Sprintf("nodes/%s?tempLink=true", *n.Id)
|
||||
req, err := n.service.client.NewMetadataRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
_, err = n.service.client.Do(req, buf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
md := &bytes.Buffer{}
|
||||
err = json.Indent(md, buf.Bytes(), "", " ")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return md.String(), nil
|
||||
}
|
||||
|
||||
type replaceParent struct {
|
||||
FromParent string `json:"fromParent"`
|
||||
ChildID string `json:"childId"`
|
||||
}
|
||||
|
||||
// ReplaceParent puts Node n below a new parent while removing the old one at the same time.
|
||||
// This is equivalent to calling AddParent and RemoveParent sequentially, but
|
||||
// only needs one REST call. Can return a 409 Conflict if there's already a
|
||||
// file or folder in the new location with the same name as Node n.
|
||||
func (n *Node) ReplaceParent(oldParentID string, newParentID string) (*http.Response, error) {
|
||||
body := &replaceParent{
|
||||
FromParent: oldParentID,
|
||||
ChildID: *n.Id,
|
||||
}
|
||||
url := fmt.Sprintf("nodes/%s/children", newParentID)
|
||||
req, err := n.service.client.NewMetadataRequest("POST", url, &body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := n.service.client.Do(req, nil)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
n.Parents = []string{newParentID}
|
||||
|
||||
err = resp.Body.Close()
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// AddParent adds an additional parent to Node n. Can return a 409 Conflict if there's
|
||||
// already a file or folder below the new parent with the same name as Node n.
|
||||
func (n *Node) AddParent(newParentID string) (*http.Response, error) {
|
||||
return n.changeParents(newParentID, true)
|
||||
}
|
||||
|
||||
// RemoveParent removes a parent from Node n. If all parents are removed, the file is instead
|
||||
// attached to the absolute root folder of AmazonDrive.
|
||||
func (n *Node) RemoveParent(parentID string) (*http.Response, error) {
|
||||
return n.changeParents(parentID, false)
|
||||
}
|
||||
|
||||
func (n *Node) changeParents(parentID string, add bool) (*http.Response, error) {
|
||||
method := "DELETE"
|
||||
if add {
|
||||
method = "PUT"
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("nodes/%s/children/%s", parentID, *n.Id)
|
||||
req, err := n.service.client.NewMetadataRequest(method, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := n.service.client.Do(req, nil)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
if add {
|
||||
n.Parents = append(n.Parents, parentID)
|
||||
} else {
|
||||
var removeIndex int
|
||||
for i := 0; i < len(n.Parents); i++ {
|
||||
if n.Parents[i] == parentID {
|
||||
removeIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
n.Parents = append(n.Parents[:removeIndex], n.Parents[removeIndex+1:]...)
|
||||
}
|
||||
|
||||
err = resp.Body.Close()
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// renameNode is a cut down set of parameters for renaming nodes
|
||||
type renameNode struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Rename node
|
||||
func (n *Node) Rename(newName string) (*Node, *http.Response, error) {
|
||||
url := fmt.Sprintf("nodes/%s", *n.Id)
|
||||
metadata := renameNode{
|
||||
Name: newName,
|
||||
}
|
||||
|
||||
node := &Node{service: n.service}
|
||||
req, err := n.service.client.NewMetadataRequest("PATCH", url, &metadata)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
resp, err := n.service.client.Do(req, node)
|
||||
return node, resp, err
|
||||
}
|
||||
|
||||
// Trash places Node n into the trash. If the node is a directory it
|
||||
// places it and all of its contents into the trash.
|
||||
func (n *Node) Trash() (*http.Response, error) {
|
||||
url := fmt.Sprintf("trash/%s", *n.Id)
|
||||
req, err := n.service.client.NewMetadataRequest("PUT", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := n.service.client.Do(req, nil)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
return resp, nil
|
||||
|
||||
}
|
||||
|
||||
// Restore moves a previously trashed Node n back into all its connected parents
|
||||
func (n *Node) Restore() (*Node, *http.Response, error) {
|
||||
url := fmt.Sprintf("trash/%s/restore", *n.Id)
|
||||
req, err := n.service.client.NewMetadataRequest("POST", url, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
node := &Node{service: n.service}
|
||||
resp, err := n.service.client.Do(req, node)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
return node, resp, err
|
||||
}
|
||||
|
||||
// File represents a file on the Amazon Cloud Drive.
|
||||
type File struct {
|
||||
*Node
|
||||
}
|
||||
|
||||
// OpenHeaders opens the content of the file f for read
|
||||
//
|
||||
// Extra headers for the GET can be passed in in headers
|
||||
//
|
||||
// You must call in.Close() when finished
|
||||
func (f *File) OpenHeaders(headers map[string]string) (in io.ReadCloser, resp *http.Response, err error) {
|
||||
url := fmt.Sprintf("nodes/%s/content", *f.Id)
|
||||
req, err := f.service.client.NewContentRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for k, v := range headers {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
resp, err = f.service.client.Do(req, nil)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return resp.Body, resp, nil
|
||||
}
|
||||
|
||||
// Open the content of the file f for read
|
||||
//
|
||||
// You must call in.Close() when finished
|
||||
func (f *File) Open() (in io.ReadCloser, resp *http.Response, err error) {
|
||||
return f.OpenHeaders(nil)
|
||||
}
|
||||
|
||||
// OpenTempURLHeaders opens the content of the file f for read from the TempURL
|
||||
//
|
||||
// Pass in an http Client (without authorization) for the download.
|
||||
//
|
||||
// You must call in.Close() when finished
|
||||
func (f *File) OpenTempURLHeaders(client *http.Client, headers map[string]string) (in io.ReadCloser, resp *http.Response, err error) {
|
||||
resp, err = f.GetTempURL()
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
req, err := http.NewRequest("GET", f.TempURL, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if f.service.client.UserAgent != "" {
|
||||
req.Header.Add("User-Agent", f.service.client.UserAgent)
|
||||
}
|
||||
for k, v := range headers {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return resp.Body, resp, nil
|
||||
}
|
||||
|
||||
// OpenTempURL opens the content of the file f for read from the TempURL
|
||||
//
|
||||
// Pass in an http Client (without authorization) for the download.
|
||||
//
|
||||
// You must call in.Close() when finished
|
||||
func (f *File) OpenTempURL(client *http.Client) (in io.ReadCloser, resp *http.Response, err error) {
|
||||
return f.OpenTempURLHeaders(client, nil)
|
||||
}
|
||||
|
||||
// Download fetches the content of file f and stores it into the file pointed
|
||||
// to by path. Errors if the file at path already exists. Does not create the
|
||||
// intermediate directories in path.
|
||||
func (f *File) Download(path string) (*http.Response, error) {
|
||||
url := fmt.Sprintf("nodes/%s/content", *f.Id)
|
||||
req, err := f.service.client.NewContentRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
resp, err := f.service.client.Do(req, out)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Folder represents a folder on the Amazon Cloud Drive.
|
||||
type Folder struct {
|
||||
*Node
|
||||
}
|
||||
|
||||
// FolderFromId constructs a skeleton Folder from an Id and a NodeService
|
||||
func FolderFromId(ID string, service *NodesService) *Folder {
|
||||
return &Folder{
|
||||
Node: NodeFromId(ID, service),
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllChildren gets the list of all children.
|
||||
func (f *Folder) GetAllChildren(opts *NodeListOptions) ([]*Node, *http.Response, error) {
|
||||
url := fmt.Sprintf("nodes/%s/children", *f.Id)
|
||||
return f.service.listAllNodes(url, opts)
|
||||
}
|
||||
|
||||
// GetChildren gets a list of children, up until the limit (either
|
||||
// default or the one set in opts).
|
||||
func (f *Folder) GetChildren(opts *NodeListOptions) ([]*Node, *http.Response, error) {
|
||||
url := fmt.Sprintf("nodes/%s/children", *f.Id)
|
||||
return f.service.listNodes(url, opts)
|
||||
}
|
||||
|
||||
// GetFolder gets the subfolder by name. It is an error if not exactly
|
||||
// one subfolder is found.
|
||||
//
|
||||
// If it isn't found then it returns the error ErrorNodeNotFound
|
||||
func (f *Folder) GetFolder(name string) (*Folder, *http.Response, error) {
|
||||
n, resp, err := f.GetNode(name)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
res, ok := n.Typed().(*Folder)
|
||||
if !ok {
|
||||
err := fmt.Errorf("Node '%s' is not a folder", name)
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return res, resp, nil
|
||||
}
|
||||
|
||||
// createNode is a cut down set of parameters for creating nodes
|
||||
type createNode struct {
|
||||
Name string `json:"name"`
|
||||
Kind string `json:"kind"`
|
||||
Parents []string `json:"parents"`
|
||||
}
|
||||
|
||||
// CreateFolder makes a new folder with the given name.
|
||||
//
|
||||
// The new Folder is returned
|
||||
func (f *Folder) CreateFolder(name string) (*Folder, *http.Response, error) {
|
||||
createFolder := createNode{
|
||||
Name: name,
|
||||
Kind: "FOLDER",
|
||||
Parents: []string{*f.Id},
|
||||
}
|
||||
req, err := f.service.client.NewMetadataRequest("POST", "nodes", &createFolder)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
folder := &Folder{&Node{service: f.service}}
|
||||
resp, err := f.service.client.Do(req, folder)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
return folder, resp, nil
|
||||
|
||||
}
|
||||
|
||||
// GetFile gets the file by name. It is an error if not exactly one file is found.
|
||||
//
|
||||
// If it isn't found then it returns the error ErrorNodeNotFound
|
||||
func (f *Folder) GetFile(name string) (*File, *http.Response, error) {
|
||||
n, resp, err := f.GetNode(name)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
res, ok := n.Typed().(*File)
|
||||
if !ok {
|
||||
err := fmt.Errorf("Node '%s' is not a file", name)
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return res, resp, nil
|
||||
}
|
||||
|
||||
var escapeForFilterRe = regexp.MustCompile(`([+\-&|!(){}\[\]^'"~*?:\\ ])`)
|
||||
|
||||
// EscapeForFilter escapes an abitrary string for use as a filter
|
||||
// query parameter.
|
||||
//
|
||||
// Special characters that are part of the query syntax will be
|
||||
// escaped. The list of special characters are:
|
||||
//
|
||||
// + - & | ! ( ) { } [ ] ^ ' " ~ * ? : \
|
||||
//
|
||||
// Additionally, space will be escaped. Characters are escaped by
|
||||
// using \ before the character.
|
||||
func EscapeForFilter(s string) string {
|
||||
return escapeForFilterRe.ReplaceAllString(s, `\$1`)
|
||||
}
|
||||
|
||||
// GetNode gets the node by name. It is an error if not exactly one node is found.
|
||||
//
|
||||
// If it isn't found then it returns the error ErrorNodeNotFound
|
||||
func (f *Folder) GetNode(name string) (*Node, *http.Response, error) {
|
||||
filter := fmt.Sprintf(`parents:"%v" AND name:"%s"`, *f.Id, EscapeForFilter(name))
|
||||
opts := &NodeListOptions{Filters: filter}
|
||||
|
||||
nodes, resp, err := f.service.GetNodes(opts)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
if len(nodes) < 1 {
|
||||
return nil, resp, ErrorNodeNotFound
|
||||
}
|
||||
if len(nodes) > 1 {
|
||||
err := fmt.Errorf("Too many nodes '%s' found (%v)", name, len(nodes))
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return nodes[0], resp, nil
|
||||
}
|
||||
|
||||
// WalkNodes walks the given node hierarchy, getting each node along the way, and returns
|
||||
// the deepest node. If an error occurs, returns the furthest successful node and the list
|
||||
// of HTTP responses.
|
||||
func (f *Folder) WalkNodes(names ...string) (*Node, []*http.Response, error) {
|
||||
resps := make([]*http.Response, 0, len(names))
|
||||
|
||||
if len(names) == 0 {
|
||||
return f.Node, resps, nil
|
||||
}
|
||||
|
||||
// process each node except the last one
|
||||
fp := f
|
||||
for _, name := range names[:len(names)-1] {
|
||||
fn, resp, err := fp.GetFolder(name)
|
||||
resps = append(resps, resp)
|
||||
if err != nil {
|
||||
return fp.Node, resps, err
|
||||
}
|
||||
|
||||
fp = fn
|
||||
}
|
||||
|
||||
// process the last node
|
||||
nl, resp, err := fp.GetNode(names[len(names)-1])
|
||||
resps = append(resps, resp)
|
||||
if err != nil {
|
||||
return fp.Node, resps, err
|
||||
}
|
||||
|
||||
return nl, resps, nil
|
||||
}
|
||||
|
||||
// Put stores the data read from in at path as name on the Amazon Cloud Drive.
|
||||
// Errors if the file already exists on the drive.
|
||||
func (s *NodesService) putOrOverwrite(in io.Reader, httpVerb, url, name, metadata string) (*File, *http.Response, error) {
|
||||
var bodyReader io.Reader
|
||||
|
||||
bodyReader, bodyWriter := io.Pipe()
|
||||
writer := multipart.NewWriter(bodyWriter)
|
||||
contentType := writer.FormDataContentType()
|
||||
contentLength := int64(-1)
|
||||
|
||||
buf := make([]byte, 1)
|
||||
n, err := io.ReadFull(in, buf)
|
||||
isZeroLength := err == io.EOF
|
||||
if !isZeroLength && err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
in = io.MultiReader(bytes.NewReader(buf[:n]), in)
|
||||
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer bodyWriter.Close()
|
||||
var err error
|
||||
|
||||
if metadata != "" {
|
||||
err = writer.WriteField("metadata", string(metadata))
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
part, err := writer.CreateFormFile("content", name)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
if _, err := io.Copy(part, in); err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
errChan <- writer.Close()
|
||||
}()
|
||||
|
||||
if isZeroLength {
|
||||
buf, err := ioutil.ReadAll(bodyReader)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
bodyReader = bytes.NewReader(buf)
|
||||
contentLength = int64(len(buf))
|
||||
}
|
||||
|
||||
req, err := s.client.NewContentRequest(httpVerb, url, bodyReader)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req.ContentLength = contentLength
|
||||
req.Header.Add("Content-Type", contentType)
|
||||
|
||||
file := &File{&Node{service: s}}
|
||||
resp, err := s.client.Do(req, file)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
err = <-errChan
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return file, resp, err
|
||||
}
|
||||
|
||||
// Put stores the data read from in at path as name on the Amazon Cloud Drive.
|
||||
// Errors if the file already exists on the drive.
|
||||
func (f *Folder) Put(in io.Reader, name string) (*File, *http.Response, error) {
|
||||
metadata := createNode{
|
||||
Name: name,
|
||||
Kind: "FILE",
|
||||
Parents: []string{*f.Id},
|
||||
}
|
||||
metadataJSON, err := json.Marshal(&metadata)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return f.service.putOrOverwrite(in, "POST", "nodes?suppress=deduplication", name, string(metadataJSON))
|
||||
}
|
||||
|
||||
// Overwrite updates the file contents from in
|
||||
func (f *File) Overwrite(in io.Reader) (*File, *http.Response, error) {
|
||||
url := fmt.Sprintf("nodes/%s/content", *f.Id)
|
||||
return f.service.putOrOverwrite(in, "PUT", url, *f.Name, "")
|
||||
}
|
||||
|
||||
// PutSized stores the data read from in at path as name on the Amazon
|
||||
// Cloud Drive. Errors if the file already exists on the drive.
|
||||
//
|
||||
// Deprecated: no longer needed - just use Put
|
||||
func (f *Folder) PutSized(in io.Reader, _ int64, name string) (*File, *http.Response, error) {
|
||||
return f.Put(in, name)
|
||||
}
|
||||
|
||||
// OverwriteSized updates the file contents from in
|
||||
//
|
||||
// Deprecated: no longer needed - just use Overwrite
|
||||
func (f *File) OverwriteSized(in io.Reader, _ int64) (*File, *http.Response, error) {
|
||||
return f.Overwrite(in)
|
||||
}
|
||||
|
||||
// Upload stores the content of file at path as name on the Amazon Cloud Drive.
|
||||
// Errors if the file already exists on the drive.
|
||||
func (f *Folder) Upload(path, name string) (*File, *http.Response, error) {
|
||||
in, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer in.Close()
|
||||
return f.Put(in, name)
|
||||
}
|
||||
|
||||
// NodeListOptions holds the options when getting a list of nodes, such as the filter,
|
||||
// sorting and pagination.
|
||||
type NodeListOptions struct {
|
||||
Limit uint `url:"limit,omitempty"`
|
||||
Filters string `url:"filters,omitempty"`
|
||||
Sort string `url:"sort,omitempty"`
|
||||
|
||||
// Token where to start for next page (internal)
|
||||
StartToken string `url:"startToken,omitempty"`
|
||||
reachedEnd bool
|
||||
}
|
||||
|
||||
// addOptions adds the parameters in opts as URL query parameters to s. opts
|
||||
// must be a struct whose fields may contain "url" tags.
|
||||
func addOptions(s string, opts interface{}) (string, error) {
|
||||
v := reflect.ValueOf(opts)
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
u, err := url.Parse(s)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
|
||||
qs, err := query.Values(opts)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
|
||||
u.RawQuery = qs.Encode()
|
||||
return u.String(), nil
|
||||
}
|
||||
4
.rclone_repo/vendor/github.com/ncw/swift/.gitignore
generated
vendored
Executable file
4
.rclone_repo/vendor/github.com/ncw/swift/.gitignore
generated
vendored
Executable file
@@ -0,0 +1,4 @@
|
||||
*~
|
||||
*.pyc
|
||||
test-env*
|
||||
junk/
|
||||
33
.rclone_repo/vendor/github.com/ncw/swift/.travis.yml
generated
vendored
Executable file
33
.rclone_repo/vendor/github.com/ncw/swift/.travis.yml
generated
vendored
Executable file
@@ -0,0 +1,33 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.1.x
|
||||
- 1.2.x
|
||||
- 1.3.x
|
||||
- 1.4.x
|
||||
- 1.5.x
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- master
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.11.x
|
||||
env: TEST_REAL_SERVER=rackspace
|
||||
- go: 1.11.x
|
||||
env: TEST_REAL_SERVER=memset
|
||||
allow_failures:
|
||||
- go: 1.11.x
|
||||
env: TEST_REAL_SERVER=rackspace
|
||||
- go: 1.11.x
|
||||
env: TEST_REAL_SERVER=memset
|
||||
install: go test -i ./...
|
||||
script:
|
||||
- test -z "$(go fmt ./...)"
|
||||
- go test
|
||||
- ./travis_realserver.sh
|
||||
20
.rclone_repo/vendor/github.com/ncw/swift/COPYING
generated
vendored
Executable file
20
.rclone_repo/vendor/github.com/ncw/swift/COPYING
generated
vendored
Executable file
@@ -0,0 +1,20 @@
|
||||
Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
143
.rclone_repo/vendor/github.com/ncw/swift/README.md
generated
vendored
Executable file
143
.rclone_repo/vendor/github.com/ncw/swift/README.md
generated
vendored
Executable file
@@ -0,0 +1,143 @@
|
||||
Swift
|
||||
=====
|
||||
|
||||
This package provides an easy to use library for interfacing with
|
||||
Swift / Openstack Object Storage / Rackspace cloud files from the Go
|
||||
Language
|
||||
|
||||
See here for package docs
|
||||
|
||||
http://godoc.org/github.com/ncw/swift
|
||||
|
||||
[](https://travis-ci.org/ncw/swift) [](https://godoc.org/github.com/ncw/swift)
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
Use go to install the library
|
||||
|
||||
go get github.com/ncw/swift
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
See here for full package docs
|
||||
|
||||
- http://godoc.org/github.com/ncw/swift
|
||||
|
||||
Here is a short example from the docs
|
||||
```go
|
||||
import "github.com/ncw/swift"
|
||||
|
||||
// Create a connection
|
||||
c := swift.Connection{
|
||||
UserName: "user",
|
||||
ApiKey: "key",
|
||||
AuthUrl: "auth_url",
|
||||
Domain: "domain", // Name of the domain (v3 auth only)
|
||||
Tenant: "tenant", // Name of the tenant (v2 auth only)
|
||||
}
|
||||
// Authenticate
|
||||
err := c.Authenticate()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// List all the containers
|
||||
containers, err := c.ContainerNames(nil)
|
||||
fmt.Println(containers)
|
||||
// etc...
|
||||
```
|
||||
|
||||
Additions
|
||||
---------
|
||||
|
||||
The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface.
|
||||
|
||||
Testing
|
||||
-------
|
||||
|
||||
To run the tests you can either use an embedded fake Swift server
|
||||
either use a real Openstack Swift server or a Rackspace Cloud files account.
|
||||
|
||||
When using a real Swift server, you need to set these environment variables
|
||||
before running the tests
|
||||
|
||||
export SWIFT_API_USER='user'
|
||||
export SWIFT_API_KEY='key'
|
||||
export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0'
|
||||
|
||||
And optionally these if using v2 authentication
|
||||
|
||||
export SWIFT_TENANT='TenantName'
|
||||
export SWIFT_TENANT_ID='TenantId'
|
||||
|
||||
And optionally these if using v3 authentication
|
||||
|
||||
export SWIFT_TENANT='TenantName'
|
||||
export SWIFT_TENANT_ID='TenantId'
|
||||
export SWIFT_API_DOMAIN_ID='domain id'
|
||||
export SWIFT_API_DOMAIN='domain name'
|
||||
|
||||
And optionally these if using v3 trust
|
||||
|
||||
export SWIFT_TRUST_ID='TrustId'
|
||||
|
||||
And optionally this if you want to skip server certificate validation
|
||||
|
||||
export SWIFT_AUTH_INSECURE=1
|
||||
|
||||
And optionally this to configure the connect channel timeout, in seconds
|
||||
|
||||
export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60
|
||||
|
||||
And optionally this to configure the data channel timeout, in seconds
|
||||
|
||||
export SWIFT_DATA_CHANNEL_TIMEOUT=60
|
||||
|
||||
Then run the tests with `go test`
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT license (check COPYING file
|
||||
included in this package).
|
||||
|
||||
Contact and support
|
||||
-------------------
|
||||
|
||||
The project website is at:
|
||||
|
||||
- https://github.com/ncw/swift
|
||||
|
||||
There you can file bug reports, ask for help or contribute patches.
|
||||
|
||||
Authors
|
||||
-------
|
||||
|
||||
- Nick Craig-Wood <nick@craig-wood.com>
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
- Brian "bojo" Jones <mojobojo@gmail.com>
|
||||
- Janika Liiv <janika@toggl.com>
|
||||
- Yamamoto, Hirotaka <ymmt2005@gmail.com>
|
||||
- Stephen <yo@groks.org>
|
||||
- platformpurple <stephen@platformpurple.com>
|
||||
- Paul Querna <pquerna@apache.org>
|
||||
- Livio Soares <liviobs@gmail.com>
|
||||
- thesyncim <thesyncim@gmail.com>
|
||||
- lsowen <lsowen@s1network.com>
|
||||
- Sylvain Baubeau <sbaubeau@redhat.com>
|
||||
- Chris Kastorff <encryptio@gmail.com>
|
||||
- Dai HaoJun <haojun.dai@hp.com>
|
||||
- Hua Wang <wanghua.humble@gmail.com>
|
||||
- Fabian Ruff <fabian@progra.de>
|
||||
- Arturo Reuschenbach Puncernau <reuschenbach@gmail.com>
|
||||
- Petr Kotek <petr.kotek@bigcommerce.com>
|
||||
- Stefan Majewsky <stefan.majewsky@sap.com>
|
||||
- Cezar Sa Espinola <cezarsa@gmail.com>
|
||||
- Sam Gunaratne <samgzeit@gmail.com>
|
||||
- Richard Scothern <richard.scothern@gmail.com>
|
||||
- Michel Couillard <couillard.michel@voxlog.ca>
|
||||
- Christopher Waldon <ckwaldon@us.ibm.com>
|
||||
320
.rclone_repo/vendor/github.com/ncw/swift/auth.go
generated
vendored
Executable file
320
.rclone_repo/vendor/github.com/ncw/swift/auth.go
generated
vendored
Executable file
@@ -0,0 +1,320 @@
|
||||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Auth defines the operations needed to authenticate with swift
|
||||
//
|
||||
// This encapsulates the different authentication schemes in use
|
||||
type Authenticator interface {
|
||||
// Request creates an http.Request for the auth - return nil if not needed
|
||||
Request(*Connection) (*http.Request, error)
|
||||
// Response parses the http.Response
|
||||
Response(resp *http.Response) error
|
||||
// The public storage URL - set Internal to true to read
|
||||
// internal/service net URL
|
||||
StorageUrl(Internal bool) string
|
||||
// The access token
|
||||
Token() string
|
||||
// The CDN url if available
|
||||
CdnUrl() string
|
||||
}
|
||||
|
||||
type CustomEndpointAuthenticator interface {
|
||||
StorageUrlForEndpoint(endpointType EndpointType) string
|
||||
}
|
||||
|
||||
type EndpointType string
|
||||
|
||||
const (
|
||||
// Use public URL as storage URL
|
||||
EndpointTypePublic = EndpointType("public")
|
||||
|
||||
// Use internal URL as storage URL
|
||||
EndpointTypeInternal = EndpointType("internal")
|
||||
|
||||
// Use admin URL as storage URL
|
||||
EndpointTypeAdmin = EndpointType("admin")
|
||||
)
|
||||
|
||||
// newAuth - create a new Authenticator from the AuthUrl
|
||||
//
|
||||
// A hint for AuthVersion can be provided
|
||||
func newAuth(c *Connection) (Authenticator, error) {
|
||||
AuthVersion := c.AuthVersion
|
||||
if AuthVersion == 0 {
|
||||
if strings.Contains(c.AuthUrl, "v3") {
|
||||
AuthVersion = 3
|
||||
} else if strings.Contains(c.AuthUrl, "v2") {
|
||||
AuthVersion = 2
|
||||
} else if strings.Contains(c.AuthUrl, "v1") {
|
||||
AuthVersion = 1
|
||||
} else {
|
||||
return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly")
|
||||
}
|
||||
}
|
||||
switch AuthVersion {
|
||||
case 1:
|
||||
return &v1Auth{}, nil
|
||||
case 2:
|
||||
return &v2Auth{
|
||||
// Guess as to whether using API key or
|
||||
// password it will try both eventually so
|
||||
// this is just an optimization.
|
||||
useApiKey: len(c.ApiKey) >= 32,
|
||||
}, nil
|
||||
case 3:
|
||||
return &v3Auth{}, nil
|
||||
}
|
||||
return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// v1 auth
|
||||
type v1Auth struct {
|
||||
Headers http.Header // V1 auth: the authentication headers so extensions can access them
|
||||
}
|
||||
|
||||
// v1 Authentication - make request
|
||||
func (auth *v1Auth) Request(c *Connection) (*http.Request, error) {
|
||||
req, err := http.NewRequest("GET", c.AuthUrl, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("User-Agent", c.UserAgent)
|
||||
req.Header.Set("X-Auth-Key", c.ApiKey)
|
||||
req.Header.Set("X-Auth-User", c.UserName)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// v1 Authentication - read response
|
||||
func (auth *v1Auth) Response(resp *http.Response) error {
|
||||
auth.Headers = resp.Header
|
||||
return nil
|
||||
}
|
||||
|
||||
// v1 Authentication - read storage url
|
||||
func (auth *v1Auth) StorageUrl(Internal bool) string {
|
||||
storageUrl := auth.Headers.Get("X-Storage-Url")
|
||||
if Internal {
|
||||
newUrl, err := url.Parse(storageUrl)
|
||||
if err != nil {
|
||||
return storageUrl
|
||||
}
|
||||
newUrl.Host = "snet-" + newUrl.Host
|
||||
storageUrl = newUrl.String()
|
||||
}
|
||||
return storageUrl
|
||||
}
|
||||
|
||||
// v1 Authentication - read auth token
|
||||
func (auth *v1Auth) Token() string {
|
||||
return auth.Headers.Get("X-Auth-Token")
|
||||
}
|
||||
|
||||
// v1 Authentication - read cdn url
|
||||
func (auth *v1Auth) CdnUrl() string {
|
||||
return auth.Headers.Get("X-CDN-Management-Url")
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// v2 Authentication
|
||||
type v2Auth struct {
|
||||
Auth *v2AuthResponse
|
||||
Region string
|
||||
useApiKey bool // if set will use API key not Password
|
||||
useApiKeyOk bool // if set won't change useApiKey any more
|
||||
notFirst bool // set after first run
|
||||
}
|
||||
|
||||
// v2 Authentication - make request
|
||||
func (auth *v2Auth) Request(c *Connection) (*http.Request, error) {
|
||||
auth.Region = c.Region
|
||||
// Toggle useApiKey if not first run and not OK yet
|
||||
if auth.notFirst && !auth.useApiKeyOk {
|
||||
auth.useApiKey = !auth.useApiKey
|
||||
}
|
||||
auth.notFirst = true
|
||||
// Create a V2 auth request for the body of the connection
|
||||
var v2i interface{}
|
||||
if !auth.useApiKey {
|
||||
// Normal swift authentication
|
||||
v2 := v2AuthRequest{}
|
||||
v2.Auth.PasswordCredentials.UserName = c.UserName
|
||||
v2.Auth.PasswordCredentials.Password = c.ApiKey
|
||||
v2.Auth.Tenant = c.Tenant
|
||||
v2.Auth.TenantId = c.TenantId
|
||||
v2i = v2
|
||||
} else {
|
||||
// Rackspace special with API Key
|
||||
v2 := v2AuthRequestRackspace{}
|
||||
v2.Auth.ApiKeyCredentials.UserName = c.UserName
|
||||
v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey
|
||||
v2.Auth.Tenant = c.Tenant
|
||||
v2.Auth.TenantId = c.TenantId
|
||||
v2i = v2
|
||||
}
|
||||
body, err := json.Marshal(v2i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
url := c.AuthUrl
|
||||
if !strings.HasSuffix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
url += "tokens"
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", c.UserAgent)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// v2 Authentication - read response
|
||||
func (auth *v2Auth) Response(resp *http.Response) error {
|
||||
auth.Auth = new(v2AuthResponse)
|
||||
err := readJson(resp, auth.Auth)
|
||||
// If successfully read Auth then no need to toggle useApiKey any more
|
||||
if err == nil {
|
||||
auth.useApiKeyOk = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Finds the Endpoint Url of "type" from the v2AuthResponse using the
|
||||
// Region if set or defaulting to the first one if not
|
||||
//
|
||||
// Returns "" if not found
|
||||
func (auth *v2Auth) endpointUrl(Type string, endpointType EndpointType) string {
|
||||
for _, catalog := range auth.Auth.Access.ServiceCatalog {
|
||||
if catalog.Type == Type {
|
||||
for _, endpoint := range catalog.Endpoints {
|
||||
if auth.Region == "" || (auth.Region == endpoint.Region) {
|
||||
switch endpointType {
|
||||
case EndpointTypeInternal:
|
||||
return endpoint.InternalUrl
|
||||
case EndpointTypePublic:
|
||||
return endpoint.PublicUrl
|
||||
case EndpointTypeAdmin:
|
||||
return endpoint.AdminUrl
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// v2 Authentication - read storage url
|
||||
//
|
||||
// If Internal is true then it reads the private (internal / service
|
||||
// net) URL.
|
||||
func (auth *v2Auth) StorageUrl(Internal bool) string {
|
||||
endpointType := EndpointTypePublic
|
||||
if Internal {
|
||||
endpointType = EndpointTypeInternal
|
||||
}
|
||||
return auth.StorageUrlForEndpoint(endpointType)
|
||||
}
|
||||
|
||||
// v2 Authentication - read storage url
|
||||
//
|
||||
// Use the indicated endpointType to choose a URL.
|
||||
func (auth *v2Auth) StorageUrlForEndpoint(endpointType EndpointType) string {
|
||||
return auth.endpointUrl("object-store", endpointType)
|
||||
}
|
||||
|
||||
// v2 Authentication - read auth token
|
||||
func (auth *v2Auth) Token() string {
|
||||
return auth.Auth.Access.Token.Id
|
||||
}
|
||||
|
||||
// v2 Authentication - read cdn url
|
||||
func (auth *v2Auth) CdnUrl() string {
|
||||
return auth.endpointUrl("rax:object-cdn", EndpointTypePublic)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// V2 Authentication request
|
||||
//
|
||||
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
|
||||
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
|
||||
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
|
||||
type v2AuthRequest struct {
|
||||
Auth struct {
|
||||
PasswordCredentials struct {
|
||||
UserName string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
} `json:"passwordCredentials"`
|
||||
Tenant string `json:"tenantName,omitempty"`
|
||||
TenantId string `json:"tenantId,omitempty"`
|
||||
} `json:"auth"`
|
||||
}
|
||||
|
||||
// V2 Authentication request - Rackspace variant
|
||||
//
|
||||
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
|
||||
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
|
||||
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
|
||||
type v2AuthRequestRackspace struct {
|
||||
Auth struct {
|
||||
ApiKeyCredentials struct {
|
||||
UserName string `json:"username"`
|
||||
ApiKey string `json:"apiKey"`
|
||||
} `json:"RAX-KSKEY:apiKeyCredentials"`
|
||||
Tenant string `json:"tenantName,omitempty"`
|
||||
TenantId string `json:"tenantId,omitempty"`
|
||||
} `json:"auth"`
|
||||
}
|
||||
|
||||
// V2 Authentication reply
|
||||
//
|
||||
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
|
||||
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
|
||||
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
|
||||
type v2AuthResponse struct {
|
||||
Access struct {
|
||||
ServiceCatalog []struct {
|
||||
Endpoints []struct {
|
||||
InternalUrl string
|
||||
PublicUrl string
|
||||
AdminUrl string
|
||||
Region string
|
||||
TenantId string
|
||||
}
|
||||
Name string
|
||||
Type string
|
||||
}
|
||||
Token struct {
|
||||
Expires string
|
||||
Id string
|
||||
Tenant struct {
|
||||
Id string
|
||||
Name string
|
||||
}
|
||||
}
|
||||
User struct {
|
||||
DefaultRegion string `json:"RAX-AUTH:defaultRegion"`
|
||||
Id string
|
||||
Name string
|
||||
Roles []struct {
|
||||
Description string
|
||||
Id string
|
||||
Name string
|
||||
TenantId string
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
228
.rclone_repo/vendor/github.com/ncw/swift/auth_v3.go
generated
vendored
Executable file
228
.rclone_repo/vendor/github.com/ncw/swift/auth_v3.go
generated
vendored
Executable file
@@ -0,0 +1,228 @@
|
||||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
v3AuthMethodToken = "token"
|
||||
v3AuthMethodPassword = "password"
|
||||
v3CatalogTypeObjectStore = "object-store"
|
||||
)
|
||||
|
||||
// V3 Authentication request
|
||||
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
|
||||
// http://developer.openstack.org/api-ref-identity-v3.html
|
||||
type v3AuthRequest struct {
|
||||
Auth struct {
|
||||
Identity struct {
|
||||
Methods []string `json:"methods"`
|
||||
Password *v3AuthPassword `json:"password,omitempty"`
|
||||
Token *v3AuthToken `json:"token,omitempty"`
|
||||
} `json:"identity"`
|
||||
Scope *v3Scope `json:"scope,omitempty"`
|
||||
} `json:"auth"`
|
||||
}
|
||||
|
||||
type v3Scope struct {
|
||||
Project *v3Project `json:"project,omitempty"`
|
||||
Domain *v3Domain `json:"domain,omitempty"`
|
||||
Trust *v3Trust `json:"OS-TRUST:trust,omitempty"`
|
||||
}
|
||||
|
||||
type v3Domain struct {
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
type v3Project struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Domain *v3Domain `json:"domain,omitempty"`
|
||||
}
|
||||
|
||||
type v3Trust struct {
|
||||
Id string `json:"id"`
|
||||
}
|
||||
|
||||
type v3User struct {
|
||||
Domain *v3Domain `json:"domain,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
}
|
||||
|
||||
type v3AuthToken struct {
|
||||
Id string `json:"id"`
|
||||
}
|
||||
|
||||
type v3AuthPassword struct {
|
||||
User v3User `json:"user"`
|
||||
}
|
||||
|
||||
// V3 Authentication response
|
||||
type v3AuthResponse struct {
|
||||
Token struct {
|
||||
Expires_At, Issued_At string
|
||||
Methods []string
|
||||
Roles []struct {
|
||||
Id, Name string
|
||||
Links struct {
|
||||
Self string
|
||||
}
|
||||
}
|
||||
|
||||
Project struct {
|
||||
Domain struct {
|
||||
Id, Name string
|
||||
}
|
||||
Id, Name string
|
||||
}
|
||||
|
||||
Catalog []struct {
|
||||
Id, Namem, Type string
|
||||
Endpoints []struct {
|
||||
Id, Region_Id, Url, Region string
|
||||
Interface EndpointType
|
||||
}
|
||||
}
|
||||
|
||||
User struct {
|
||||
Id, Name string
|
||||
Domain struct {
|
||||
Id, Name string
|
||||
Links struct {
|
||||
Self string
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Audit_Ids []string
|
||||
}
|
||||
}
|
||||
|
||||
type v3Auth struct {
|
||||
Region string
|
||||
Auth *v3AuthResponse
|
||||
Headers http.Header
|
||||
}
|
||||
|
||||
func (auth *v3Auth) Request(c *Connection) (*http.Request, error) {
|
||||
auth.Region = c.Region
|
||||
|
||||
var v3i interface{}
|
||||
|
||||
v3 := v3AuthRequest{}
|
||||
|
||||
if c.UserName == "" && c.UserId == "" {
|
||||
v3.Auth.Identity.Methods = []string{v3AuthMethodToken}
|
||||
v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey}
|
||||
} else {
|
||||
v3.Auth.Identity.Methods = []string{v3AuthMethodPassword}
|
||||
v3.Auth.Identity.Password = &v3AuthPassword{
|
||||
User: v3User{
|
||||
Name: c.UserName,
|
||||
Id: c.UserId,
|
||||
Password: c.ApiKey,
|
||||
},
|
||||
}
|
||||
|
||||
var domain *v3Domain
|
||||
|
||||
if c.Domain != "" {
|
||||
domain = &v3Domain{Name: c.Domain}
|
||||
} else if c.DomainId != "" {
|
||||
domain = &v3Domain{Id: c.DomainId}
|
||||
}
|
||||
v3.Auth.Identity.Password.User.Domain = domain
|
||||
}
|
||||
|
||||
if c.TrustId != "" {
|
||||
v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}}
|
||||
} else if c.TenantId != "" || c.Tenant != "" {
|
||||
|
||||
v3.Auth.Scope = &v3Scope{Project: &v3Project{}}
|
||||
|
||||
if c.TenantId != "" {
|
||||
v3.Auth.Scope.Project.Id = c.TenantId
|
||||
} else if c.Tenant != "" {
|
||||
v3.Auth.Scope.Project.Name = c.Tenant
|
||||
switch {
|
||||
case c.TenantDomain != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.TenantDomain}
|
||||
case c.TenantDomainId != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.TenantDomainId}
|
||||
case c.Domain != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.Domain}
|
||||
case c.DomainId != "":
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.DomainId}
|
||||
default:
|
||||
v3.Auth.Scope.Project.Domain = &v3Domain{Name: "Default"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
v3i = v3
|
||||
|
||||
body, err := json.Marshal(v3i)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := c.AuthUrl
|
||||
if !strings.HasSuffix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
url += "auth/tokens"
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("User-Agent", c.UserAgent)
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (auth *v3Auth) Response(resp *http.Response) error {
|
||||
auth.Auth = &v3AuthResponse{}
|
||||
auth.Headers = resp.Header
|
||||
err := readJson(resp, auth.Auth)
|
||||
return err
|
||||
}
|
||||
|
||||
func (auth *v3Auth) endpointUrl(Type string, endpointType EndpointType) string {
|
||||
for _, catalog := range auth.Auth.Token.Catalog {
|
||||
if catalog.Type == Type {
|
||||
for _, endpoint := range catalog.Endpoints {
|
||||
if endpoint.Interface == endpointType && (auth.Region == "" || (auth.Region == endpoint.Region)) {
|
||||
return endpoint.Url
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (auth *v3Auth) StorageUrl(Internal bool) string {
|
||||
endpointType := EndpointTypePublic
|
||||
if Internal {
|
||||
endpointType = EndpointTypeInternal
|
||||
}
|
||||
return auth.StorageUrlForEndpoint(endpointType)
|
||||
}
|
||||
|
||||
func (auth *v3Auth) StorageUrlForEndpoint(endpointType EndpointType) string {
|
||||
return auth.endpointUrl("object-store", endpointType)
|
||||
}
|
||||
|
||||
func (auth *v3Auth) Token() string {
|
||||
return auth.Headers.Get("X-Subject-Token")
|
||||
}
|
||||
|
||||
func (auth *v3Auth) CdnUrl() string {
|
||||
return ""
|
||||
}
|
||||
28
.rclone_repo/vendor/github.com/ncw/swift/compatibility_1_0.go
generated
vendored
Executable file
28
.rclone_repo/vendor/github.com/ncw/swift/compatibility_1_0.go
generated
vendored
Executable file
@@ -0,0 +1,28 @@
|
||||
// Go 1.0 compatibility functions
|
||||
|
||||
// +build !go1.1
|
||||
|
||||
package swift
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Cancel the request - doesn't work under < go 1.1
|
||||
func cancelRequest(transport http.RoundTripper, req *http.Request) {
|
||||
log.Printf("Tried to cancel a request but couldn't - recompile with go 1.1")
|
||||
}
|
||||
|
||||
// Reset a timer - Doesn't work properly < go 1.1
|
||||
//
|
||||
// This is quite hard to do properly under go < 1.1 so we do a crude
|
||||
// approximation and hope that everyone upgrades to go 1.1 quickly
|
||||
func resetTimer(t *time.Timer, d time.Duration) {
|
||||
t.Stop()
|
||||
// Very likely this doesn't actually work if we are already
|
||||
// selecting on t.C. However we've stopped the original timer
|
||||
// so won't break transfers but may not time them out :-(
|
||||
*t = *time.NewTimer(d)
|
||||
}
|
||||
24
.rclone_repo/vendor/github.com/ncw/swift/compatibility_1_1.go
generated
vendored
Executable file
24
.rclone_repo/vendor/github.com/ncw/swift/compatibility_1_1.go
generated
vendored
Executable file
@@ -0,0 +1,24 @@
|
||||
// Go 1.1 and later compatibility functions
|
||||
//
|
||||
// +build go1.1
|
||||
|
||||
package swift
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Cancel the request
|
||||
func cancelRequest(transport http.RoundTripper, req *http.Request) {
|
||||
if tr, ok := transport.(interface {
|
||||
CancelRequest(*http.Request)
|
||||
}); ok {
|
||||
tr.CancelRequest(req)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset a timer
|
||||
func resetTimer(t *time.Timer, d time.Duration) {
|
||||
t.Reset(d)
|
||||
}
|
||||
136
.rclone_repo/vendor/github.com/ncw/swift/dlo.go
generated
vendored
Executable file
136
.rclone_repo/vendor/github.com/ncw/swift/dlo.go
generated
vendored
Executable file
@@ -0,0 +1,136 @@
|
||||
package swift
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// DynamicLargeObjectCreateFile represents an open static large object
|
||||
type DynamicLargeObjectCreateFile struct {
|
||||
largeObjectCreateFile
|
||||
}
|
||||
|
||||
// DynamicLargeObjectCreateFile creates a dynamic large object
|
||||
// returning an object which satisfies io.Writer, io.Seeker, io.Closer
|
||||
// and io.ReaderFrom. The flags are as passes to the
|
||||
// largeObjectCreate method.
|
||||
func (c *Connection) DynamicLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
||||
lo, err := c.largeObjectCreate(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return withBuffer(opts, &DynamicLargeObjectCreateFile{
|
||||
largeObjectCreateFile: *lo,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// DynamicLargeObjectCreate creates or truncates an existing dynamic
|
||||
// large object returning a writeable object. This sets opts.Flags to
|
||||
// an appropriate value before calling DynamicLargeObjectCreateFile
|
||||
func (c *Connection) DynamicLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
||||
opts.Flags = os.O_TRUNC | os.O_CREATE
|
||||
return c.DynamicLargeObjectCreateFile(opts)
|
||||
}
|
||||
|
||||
// DynamicLargeObjectDelete deletes a dynamic large object and all of its segments.
|
||||
func (c *Connection) DynamicLargeObjectDelete(container string, path string) error {
|
||||
return c.LargeObjectDelete(container, path)
|
||||
}
|
||||
|
||||
// DynamicLargeObjectMove moves a dynamic large object from srcContainer, srcObjectName to dstContainer, dstObjectName
|
||||
func (c *Connection) DynamicLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
|
||||
info, headers, err := c.Object(dstContainer, srcObjectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
segmentContainer, segmentPath := parseFullPath(headers["X-Object-Manifest"])
|
||||
if err := c.createDLOManifest(dstContainer, dstObjectName, segmentContainer+"/"+segmentPath, info.ContentType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createDLOManifest creates a dynamic large object manifest
|
||||
func (c *Connection) createDLOManifest(container string, objectName string, prefix string, contentType string) error {
|
||||
headers := make(Headers)
|
||||
headers["X-Object-Manifest"] = prefix
|
||||
manifest, err := c.ObjectCreate(container, objectName, false, "", contentType, headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := manifest.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close satisfies the io.Closer interface
|
||||
func (file *DynamicLargeObjectCreateFile) Close() error {
|
||||
return file.Flush()
|
||||
}
|
||||
|
||||
func (file *DynamicLargeObjectCreateFile) Flush() error {
|
||||
err := file.conn.createDLOManifest(file.container, file.objectName, file.segmentContainer+"/"+file.prefix, file.contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size())
|
||||
}
|
||||
|
||||
func (c *Connection) getAllDLOSegments(segmentContainer, segmentPath string) ([]Object, error) {
|
||||
//a simple container listing works 99.9% of the time
|
||||
segments, err := c.ObjectsAll(segmentContainer, &ObjectsOpts{Prefix: segmentPath})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hasObjectName := make(map[string]struct{})
|
||||
for _, segment := range segments {
|
||||
hasObjectName[segment.Name] = struct{}{}
|
||||
}
|
||||
|
||||
//The container listing might be outdated (i.e. not contain all existing
|
||||
//segment objects yet) because of temporary inconsistency (Swift is only
|
||||
//eventually consistent!). Check its completeness.
|
||||
segmentNumber := 0
|
||||
for {
|
||||
segmentNumber++
|
||||
segmentName := getSegment(segmentPath, segmentNumber)
|
||||
if _, seen := hasObjectName[segmentName]; seen {
|
||||
continue
|
||||
}
|
||||
|
||||
//This segment is missing in the container listing. Use a more reliable
|
||||
//request to check its existence. (HEAD requests on segments are
|
||||
//guaranteed to return the correct metadata, except for the pathological
|
||||
//case of an outage of large parts of the Swift cluster or its network,
|
||||
//since every segment is only written once.)
|
||||
segment, _, err := c.Object(segmentContainer, segmentName)
|
||||
switch err {
|
||||
case nil:
|
||||
//found new segment -> add it in the correct position and keep
|
||||
//going, more might be missing
|
||||
if segmentNumber <= len(segments) {
|
||||
segments = append(segments[:segmentNumber], segments[segmentNumber-1:]...)
|
||||
segments[segmentNumber-1] = segment
|
||||
} else {
|
||||
segments = append(segments, segment)
|
||||
}
|
||||
continue
|
||||
case ObjectNotFound:
|
||||
//This segment is missing. Since we upload segments sequentially,
|
||||
//there won't be any more segments after it.
|
||||
return segments, nil
|
||||
default:
|
||||
return nil, err //unexpected error
|
||||
}
|
||||
}
|
||||
}
|
||||
19
.rclone_repo/vendor/github.com/ncw/swift/doc.go
generated
vendored
Executable file
19
.rclone_repo/vendor/github.com/ncw/swift/doc.go
generated
vendored
Executable file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Package swift provides an easy to use interface to Swift / Openstack Object Storage / Rackspace Cloud Files
|
||||
|
||||
Standard Usage
|
||||
|
||||
Most of the work is done through the Container*() and Object*() methods.
|
||||
|
||||
All methods are safe to use concurrently in multiple go routines.
|
||||
|
||||
Object Versioning
|
||||
|
||||
As defined by http://docs.openstack.org/api/openstack-object-storage/1.0/content/Object_Versioning-e1e3230.html#d6e983 one can create a container which allows for version control of files. The suggested method is to create a version container for holding all non-current files, and a current container for holding the latest version that the file points to. The container and objects inside it can be used in the standard manner, however, pushing a file multiple times will result in it being copied to the version container and the new file put in it's place. If the current file is deleted, the previous file in the version container will replace it. This means that if a file is updated 5 times, it must be deleted 5 times to be completely removed from the system.
|
||||
|
||||
Rackspace Sub Module
|
||||
|
||||
This module specifically allows the enabling/disabling of Rackspace Cloud File CDN management on a container. This is specific to the Rackspace API and not Swift/Openstack, therefore it has been placed in a submodule. One can easily create a RsConnection and use it like the standard Connection to access and manipulate containers and objects.
|
||||
|
||||
*/
|
||||
package swift
|
||||
448
.rclone_repo/vendor/github.com/ncw/swift/largeobjects.go
generated
vendored
Executable file
448
.rclone_repo/vendor/github.com/ncw/swift/largeobjects.go
generated
vendored
Executable file
@@ -0,0 +1,448 @@
|
||||
package swift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
gopath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NotLargeObject is returned if an operation is performed on an object which isn't large.
|
||||
var NotLargeObject = errors.New("Not a large object")
|
||||
|
||||
// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded
|
||||
var readAfterWriteTimeout = 15 * time.Second
|
||||
|
||||
// readAfterWriteWait defines the time to sleep between two retries
|
||||
var readAfterWriteWait = 200 * time.Millisecond
|
||||
|
||||
// largeObjectCreateFile represents an open static or dynamic large object
|
||||
type largeObjectCreateFile struct {
|
||||
conn *Connection
|
||||
container string
|
||||
objectName string
|
||||
currentLength int64
|
||||
filePos int64
|
||||
chunkSize int64
|
||||
segmentContainer string
|
||||
prefix string
|
||||
contentType string
|
||||
checkHash bool
|
||||
segments []Object
|
||||
headers Headers
|
||||
minChunkSize int64
|
||||
}
|
||||
|
||||
func swiftSegmentPath(path string) (string, error) {
|
||||
checksum := sha1.New()
|
||||
random := make([]byte, 32)
|
||||
if _, err := rand.Read(random); err != nil {
|
||||
return "", err
|
||||
}
|
||||
path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...)))
|
||||
return strings.TrimLeft(strings.TrimRight("segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil
|
||||
}
|
||||
|
||||
func getSegment(segmentPath string, partNumber int) string {
|
||||
return fmt.Sprintf("%s/%016d", segmentPath, partNumber)
|
||||
}
|
||||
|
||||
func parseFullPath(manifest string) (container string, prefix string) {
|
||||
components := strings.SplitN(manifest, "/", 2)
|
||||
container = components[0]
|
||||
if len(components) > 1 {
|
||||
prefix = components[1]
|
||||
}
|
||||
return container, prefix
|
||||
}
|
||||
|
||||
func (headers Headers) IsLargeObjectDLO() bool {
|
||||
_, isDLO := headers["X-Object-Manifest"]
|
||||
return isDLO
|
||||
}
|
||||
|
||||
func (headers Headers) IsLargeObjectSLO() bool {
|
||||
_, isSLO := headers["X-Static-Large-Object"]
|
||||
return isSLO
|
||||
}
|
||||
|
||||
func (headers Headers) IsLargeObject() bool {
|
||||
return headers.IsLargeObjectSLO() || headers.IsLargeObjectDLO()
|
||||
}
|
||||
|
||||
func (c *Connection) getAllSegments(container string, path string, headers Headers) (string, []Object, error) {
|
||||
if manifest, isDLO := headers["X-Object-Manifest"]; isDLO {
|
||||
segmentContainer, segmentPath := parseFullPath(manifest)
|
||||
segments, err := c.getAllDLOSegments(segmentContainer, segmentPath)
|
||||
return segmentContainer, segments, err
|
||||
}
|
||||
if headers.IsLargeObjectSLO() {
|
||||
return c.getAllSLOSegments(container, path)
|
||||
}
|
||||
return "", nil, NotLargeObject
|
||||
}
|
||||
|
||||
// LargeObjectOpts describes how a large object should be created
|
||||
type LargeObjectOpts struct {
|
||||
Container string // Name of container to place object
|
||||
ObjectName string // Name of object
|
||||
Flags int // Creation flags
|
||||
CheckHash bool // If set Check the hash
|
||||
Hash string // If set use this hash to check
|
||||
ContentType string // Content-Type of the object
|
||||
Headers Headers // Additional headers to upload the object with
|
||||
ChunkSize int64 // Size of chunks of the object, defaults to 10MB if not set
|
||||
MinChunkSize int64 // Minimum chunk size, automatically set for SLO's based on info
|
||||
SegmentContainer string // Name of the container to place segments
|
||||
SegmentPrefix string // Prefix to use for the segments
|
||||
NoBuffer bool // Prevents using a bufio.Writer to write segments
|
||||
}
|
||||
|
||||
type LargeObjectFile interface {
|
||||
io.Writer
|
||||
io.Seeker
|
||||
io.Closer
|
||||
Size() int64
|
||||
Flush() error
|
||||
}
|
||||
|
||||
// largeObjectCreate creates a large object at opts.Container, opts.ObjectName.
|
||||
//
|
||||
// opts.Flags can have the following bits set
|
||||
// os.TRUNC - remove the contents of the large object if it exists
|
||||
// os.APPEND - write at the end of the large object
|
||||
func (c *Connection) largeObjectCreate(opts *LargeObjectOpts) (*largeObjectCreateFile, error) {
|
||||
var (
|
||||
segmentPath string
|
||||
segmentContainer string
|
||||
segments []Object
|
||||
currentLength int64
|
||||
err error
|
||||
)
|
||||
|
||||
if opts.SegmentPrefix != "" {
|
||||
segmentPath = opts.SegmentPrefix
|
||||
} else if segmentPath, err = swiftSegmentPath(opts.ObjectName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info, headers, err := c.Object(opts.Container, opts.ObjectName); err == nil {
|
||||
if opts.Flags&os.O_TRUNC != 0 {
|
||||
c.LargeObjectDelete(opts.Container, opts.ObjectName)
|
||||
} else {
|
||||
currentLength = info.Bytes
|
||||
if headers.IsLargeObject() {
|
||||
segmentContainer, segments, err = c.getAllSegments(opts.Container, opts.ObjectName, headers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(segments) > 0 {
|
||||
segmentPath = gopath.Dir(segments[0].Name)
|
||||
}
|
||||
} else {
|
||||
if err = c.ObjectMove(opts.Container, opts.ObjectName, opts.Container, getSegment(segmentPath, 1)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
segments = append(segments, info)
|
||||
}
|
||||
}
|
||||
} else if err != ObjectNotFound {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// segmentContainer is not empty when the manifest already existed
|
||||
if segmentContainer == "" {
|
||||
if opts.SegmentContainer != "" {
|
||||
segmentContainer = opts.SegmentContainer
|
||||
} else {
|
||||
segmentContainer = opts.Container + "_segments"
|
||||
}
|
||||
}
|
||||
|
||||
file := &largeObjectCreateFile{
|
||||
conn: c,
|
||||
checkHash: opts.CheckHash,
|
||||
container: opts.Container,
|
||||
objectName: opts.ObjectName,
|
||||
chunkSize: opts.ChunkSize,
|
||||
minChunkSize: opts.MinChunkSize,
|
||||
headers: opts.Headers,
|
||||
segmentContainer: segmentContainer,
|
||||
prefix: segmentPath,
|
||||
segments: segments,
|
||||
currentLength: currentLength,
|
||||
}
|
||||
|
||||
if file.chunkSize == 0 {
|
||||
file.chunkSize = 10 * 1024 * 1024
|
||||
}
|
||||
|
||||
if file.minChunkSize > file.chunkSize {
|
||||
file.chunkSize = file.minChunkSize
|
||||
}
|
||||
|
||||
if opts.Flags&os.O_APPEND != 0 {
|
||||
file.filePos = currentLength
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// LargeObjectDelete deletes the large object named by container, path
|
||||
func (c *Connection) LargeObjectDelete(container string, objectName string) error {
|
||||
_, headers, err := c.Object(container, objectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var objects [][]string
|
||||
if headers.IsLargeObject() {
|
||||
segmentContainer, segments, err := c.getAllSegments(container, objectName, headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, obj := range segments {
|
||||
objects = append(objects, []string{segmentContainer, obj.Name})
|
||||
}
|
||||
}
|
||||
objects = append(objects, []string{container, objectName})
|
||||
|
||||
info, err := c.cachedQueryInfo()
|
||||
if err == nil && info.SupportsBulkDelete() && len(objects) > 0 {
|
||||
filenames := make([]string, len(objects))
|
||||
for i, obj := range objects {
|
||||
filenames[i] = obj[0] + "/" + obj[1]
|
||||
}
|
||||
_, err = c.doBulkDelete(filenames)
|
||||
// Don't fail on ObjectNotFound because eventual consistency
|
||||
// makes this situation normal.
|
||||
if err != nil && err != Forbidden && err != ObjectNotFound {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
for _, obj := range objects {
|
||||
if err := c.ObjectDelete(obj[0], obj[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LargeObjectGetSegments returns all the segments that compose an object
|
||||
// If the object is a Dynamic Large Object (DLO), it just returns the objects
|
||||
// that have the prefix as indicated by the manifest.
|
||||
// If the object is a Static Large Object (SLO), it retrieves the JSON content
|
||||
// of the manifest and return all the segments of it.
|
||||
func (c *Connection) LargeObjectGetSegments(container string, path string) (string, []Object, error) {
|
||||
_, headers, err := c.Object(container, path)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return c.getAllSegments(container, path, headers)
|
||||
}
|
||||
|
||||
// Seek sets the offset for the next write operation
|
||||
func (file *largeObjectCreateFile) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case 0:
|
||||
file.filePos = offset
|
||||
case 1:
|
||||
file.filePos += offset
|
||||
case 2:
|
||||
file.filePos = file.currentLength + offset
|
||||
default:
|
||||
return -1, fmt.Errorf("invalid value for whence")
|
||||
}
|
||||
if file.filePos < 0 {
|
||||
return -1, fmt.Errorf("negative offset")
|
||||
}
|
||||
return file.filePos, nil
|
||||
}
|
||||
|
||||
func (file *largeObjectCreateFile) Size() int64 {
|
||||
return file.currentLength
|
||||
}
|
||||
|
||||
func withLORetry(expectedSize int64, fn func() (Headers, int64, error)) (err error) {
|
||||
endTimer := time.NewTimer(readAfterWriteTimeout)
|
||||
defer endTimer.Stop()
|
||||
waitingTime := readAfterWriteWait
|
||||
for {
|
||||
var headers Headers
|
||||
var sz int64
|
||||
if headers, sz, err = fn(); err == nil {
|
||||
if !headers.IsLargeObjectDLO() || (expectedSize == 0 && sz > 0) || expectedSize == sz {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
waitTimer := time.NewTimer(waitingTime)
|
||||
select {
|
||||
case <-endTimer.C:
|
||||
waitTimer.Stop()
|
||||
err = fmt.Errorf("Timeout expired while waiting for object to have size == %d, got: %d", expectedSize, sz)
|
||||
return
|
||||
case <-waitTimer.C:
|
||||
waitingTime *= 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Connection) waitForSegmentsToShowUp(container, objectName string, expectedSize int64) (err error) {
|
||||
err = withLORetry(expectedSize, func() (Headers, int64, error) {
|
||||
var info Object
|
||||
var headers Headers
|
||||
info, headers, err = c.objectBase(container, objectName)
|
||||
if err != nil {
|
||||
return headers, 0, err
|
||||
}
|
||||
return headers, info.Bytes, nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Write satisfies the io.Writer interface
|
||||
func (file *largeObjectCreateFile) Write(buf []byte) (int, error) {
|
||||
var sz int64
|
||||
var relativeFilePos int
|
||||
writeSegmentIdx := 0
|
||||
for i, obj := range file.segments {
|
||||
if file.filePos < sz+obj.Bytes || (i == len(file.segments)-1 && file.filePos < sz+file.minChunkSize) {
|
||||
relativeFilePos = int(file.filePos - sz)
|
||||
break
|
||||
}
|
||||
writeSegmentIdx++
|
||||
sz += obj.Bytes
|
||||
}
|
||||
sizeToWrite := len(buf)
|
||||
for offset := 0; offset < sizeToWrite; {
|
||||
newSegment, n, err := file.writeSegment(buf[offset:], writeSegmentIdx, relativeFilePos)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if writeSegmentIdx < len(file.segments) {
|
||||
file.segments[writeSegmentIdx] = *newSegment
|
||||
} else {
|
||||
file.segments = append(file.segments, *newSegment)
|
||||
}
|
||||
offset += n
|
||||
writeSegmentIdx++
|
||||
relativeFilePos = 0
|
||||
}
|
||||
file.filePos += int64(sizeToWrite)
|
||||
file.currentLength = 0
|
||||
for _, obj := range file.segments {
|
||||
file.currentLength += obj.Bytes
|
||||
}
|
||||
return sizeToWrite, nil
|
||||
}
|
||||
|
||||
func (file *largeObjectCreateFile) writeSegment(buf []byte, writeSegmentIdx int, relativeFilePos int) (*Object, int, error) {
|
||||
var (
|
||||
readers []io.Reader
|
||||
existingSegment *Object
|
||||
segmentSize int
|
||||
)
|
||||
segmentName := getSegment(file.prefix, writeSegmentIdx+1)
|
||||
sizeToRead := int(file.chunkSize)
|
||||
if writeSegmentIdx < len(file.segments) {
|
||||
existingSegment = &file.segments[writeSegmentIdx]
|
||||
if writeSegmentIdx != len(file.segments)-1 {
|
||||
sizeToRead = int(existingSegment.Bytes)
|
||||
}
|
||||
if relativeFilePos > 0 {
|
||||
headers := make(Headers)
|
||||
headers["Range"] = "bytes=0-" + strconv.FormatInt(int64(relativeFilePos-1), 10)
|
||||
existingSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer existingSegmentReader.Close()
|
||||
sizeToRead -= relativeFilePos
|
||||
segmentSize += relativeFilePos
|
||||
readers = []io.Reader{existingSegmentReader}
|
||||
}
|
||||
}
|
||||
if sizeToRead > len(buf) {
|
||||
sizeToRead = len(buf)
|
||||
}
|
||||
segmentSize += sizeToRead
|
||||
readers = append(readers, bytes.NewReader(buf[:sizeToRead]))
|
||||
if existingSegment != nil && segmentSize < int(existingSegment.Bytes) {
|
||||
headers := make(Headers)
|
||||
headers["Range"] = "bytes=" + strconv.FormatInt(int64(segmentSize), 10) + "-"
|
||||
tailSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer tailSegmentReader.Close()
|
||||
segmentSize = int(existingSegment.Bytes)
|
||||
readers = append(readers, tailSegmentReader)
|
||||
}
|
||||
segmentReader := io.MultiReader(readers...)
|
||||
headers, err := file.conn.ObjectPut(file.segmentContainer, segmentName, segmentReader, true, "", file.contentType, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return &Object{Name: segmentName, Bytes: int64(segmentSize), Hash: headers["Etag"]}, sizeToRead, nil
|
||||
}
|
||||
|
||||
func withBuffer(opts *LargeObjectOpts, lo LargeObjectFile) LargeObjectFile {
|
||||
if !opts.NoBuffer {
|
||||
return &bufferedLargeObjectFile{
|
||||
LargeObjectFile: lo,
|
||||
bw: bufio.NewWriterSize(lo, int(opts.ChunkSize)),
|
||||
}
|
||||
}
|
||||
return lo
|
||||
}
|
||||
|
||||
type bufferedLargeObjectFile struct {
|
||||
LargeObjectFile
|
||||
bw *bufio.Writer
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Close() error {
|
||||
err := blo.bw.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return blo.LargeObjectFile.Close()
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Write(p []byte) (n int, err error) {
|
||||
return blo.bw.Write(p)
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Seek(offset int64, whence int) (int64, error) {
|
||||
err := blo.bw.Flush()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return blo.LargeObjectFile.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Size() int64 {
|
||||
return blo.LargeObjectFile.Size() + int64(blo.bw.Buffered())
|
||||
}
|
||||
|
||||
func (blo *bufferedLargeObjectFile) Flush() error {
|
||||
err := blo.bw.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return blo.LargeObjectFile.Flush()
|
||||
}
|
||||
174
.rclone_repo/vendor/github.com/ncw/swift/meta.go
generated
vendored
Executable file
174
.rclone_repo/vendor/github.com/ncw/swift/meta.go
generated
vendored
Executable file
@@ -0,0 +1,174 @@
|
||||
// Metadata manipulation in and out of Headers
|
||||
|
||||
package swift
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Metadata stores account, container or object metadata.
|
||||
type Metadata map[string]string
|
||||
|
||||
// Metadata gets the Metadata starting with the metaPrefix out of the Headers.
|
||||
//
|
||||
// The keys in the Metadata will be converted to lower case
|
||||
func (h Headers) Metadata(metaPrefix string) Metadata {
|
||||
m := Metadata{}
|
||||
metaPrefix = http.CanonicalHeaderKey(metaPrefix)
|
||||
for key, value := range h {
|
||||
if strings.HasPrefix(key, metaPrefix) {
|
||||
metaKey := strings.ToLower(key[len(metaPrefix):])
|
||||
m[metaKey] = value
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// AccountMetadata converts Headers from account to a Metadata.
|
||||
//
|
||||
// The keys in the Metadata will be converted to lower case.
|
||||
func (h Headers) AccountMetadata() Metadata {
|
||||
return h.Metadata("X-Account-Meta-")
|
||||
}
|
||||
|
||||
// ContainerMetadata converts Headers from container to a Metadata.
|
||||
//
|
||||
// The keys in the Metadata will be converted to lower case.
|
||||
func (h Headers) ContainerMetadata() Metadata {
|
||||
return h.Metadata("X-Container-Meta-")
|
||||
}
|
||||
|
||||
// ObjectMetadata converts Headers from object to a Metadata.
|
||||
//
|
||||
// The keys in the Metadata will be converted to lower case.
|
||||
func (h Headers) ObjectMetadata() Metadata {
|
||||
return h.Metadata("X-Object-Meta-")
|
||||
}
|
||||
|
||||
// Headers convert the Metadata starting with the metaPrefix into a
|
||||
// Headers.
|
||||
//
|
||||
// The keys in the Metadata will be converted from lower case to http
|
||||
// Canonical (see http.CanonicalHeaderKey).
|
||||
func (m Metadata) Headers(metaPrefix string) Headers {
|
||||
h := Headers{}
|
||||
for key, value := range m {
|
||||
key = http.CanonicalHeaderKey(metaPrefix + key)
|
||||
h[key] = value
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// AccountHeaders converts the Metadata for the account.
|
||||
func (m Metadata) AccountHeaders() Headers {
|
||||
return m.Headers("X-Account-Meta-")
|
||||
}
|
||||
|
||||
// ContainerHeaders converts the Metadata for the container.
|
||||
func (m Metadata) ContainerHeaders() Headers {
|
||||
return m.Headers("X-Container-Meta-")
|
||||
}
|
||||
|
||||
// ObjectHeaders converts the Metadata for the object.
|
||||
func (m Metadata) ObjectHeaders() Headers {
|
||||
return m.Headers("X-Object-Meta-")
|
||||
}
|
||||
|
||||
// Turns a number of ns into a floating point string in seconds
|
||||
//
|
||||
// Trims trailing zeros and guaranteed to be perfectly accurate
|
||||
func nsToFloatString(ns int64) string {
|
||||
if ns < 0 {
|
||||
return "-" + nsToFloatString(-ns)
|
||||
}
|
||||
result := fmt.Sprintf("%010d", ns)
|
||||
split := len(result) - 9
|
||||
result, decimals := result[:split], result[split:]
|
||||
decimals = strings.TrimRight(decimals, "0")
|
||||
if decimals != "" {
|
||||
result += "."
|
||||
result += decimals
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Turns a floating point string in seconds into a ns integer
|
||||
//
|
||||
// Guaranteed to be perfectly accurate
|
||||
func floatStringToNs(s string) (int64, error) {
|
||||
const zeros = "000000000"
|
||||
if point := strings.IndexRune(s, '.'); point >= 0 {
|
||||
tail := s[point+1:]
|
||||
if fill := 9 - len(tail); fill < 0 {
|
||||
tail = tail[:9]
|
||||
} else {
|
||||
tail += zeros[:fill]
|
||||
}
|
||||
s = s[:point] + tail
|
||||
} else if len(s) > 0 { // Make sure empty string produces an error
|
||||
s += zeros
|
||||
}
|
||||
return strconv.ParseInt(s, 10, 64)
|
||||
}
|
||||
|
||||
// FloatStringToTime converts a floating point number string to a time.Time
|
||||
//
|
||||
// The string is floating point number of seconds since the epoch
|
||||
// (Unix time). The number should be in fixed point format (not
|
||||
// exponential), eg "1354040105.123456789" which represents the time
|
||||
// "2012-11-27T18:15:05.123456789Z"
|
||||
//
|
||||
// Some care is taken to preserve all the accuracy in the time.Time
|
||||
// (which wouldn't happen with a naive conversion through float64) so
|
||||
// a round trip conversion won't change the data.
|
||||
//
|
||||
// If an error is returned then time will be returned as the zero time.
|
||||
func FloatStringToTime(s string) (t time.Time, err error) {
|
||||
ns, err := floatStringToNs(s)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
t = time.Unix(0, ns)
|
||||
return
|
||||
}
|
||||
|
||||
// TimeToFloatString converts a time.Time object to a floating point string
|
||||
//
|
||||
// The string is floating point number of seconds since the epoch
|
||||
// (Unix time). The number is in fixed point format (not
|
||||
// exponential), eg "1354040105.123456789" which represents the time
|
||||
// "2012-11-27T18:15:05.123456789Z". Trailing zeros will be dropped
|
||||
// from the output.
|
||||
//
|
||||
// Some care is taken to preserve all the accuracy in the time.Time
|
||||
// (which wouldn't happen with a naive conversion through float64) so
|
||||
// a round trip conversion won't change the data.
|
||||
func TimeToFloatString(t time.Time) string {
|
||||
return nsToFloatString(t.UnixNano())
|
||||
}
|
||||
|
||||
// Read a modification time (mtime) from a Metadata object
|
||||
//
|
||||
// This is a defacto standard (used in the official python-swiftclient
|
||||
// amongst others) for storing the modification time (as read using
|
||||
// os.Stat) for an object. It is stored using the key 'mtime', which
|
||||
// for example when written to an object will be 'X-Object-Meta-Mtime'.
|
||||
//
|
||||
// If an error is returned then time will be returned as the zero time.
|
||||
func (m Metadata) GetModTime() (t time.Time, err error) {
|
||||
return FloatStringToTime(m["mtime"])
|
||||
}
|
||||
|
||||
// Write an modification time (mtime) to a Metadata object
|
||||
//
|
||||
// This is a defacto standard (used in the official python-swiftclient
|
||||
// amongst others) for storing the modification time (as read using
|
||||
// os.Stat) for an object. It is stored using the key 'mtime', which
|
||||
// for example when written to an object will be 'X-Object-Meta-Mtime'.
|
||||
func (m Metadata) SetModTime(t time.Time) {
|
||||
m["mtime"] = TimeToFloatString(t)
|
||||
}
|
||||
55
.rclone_repo/vendor/github.com/ncw/swift/notes.txt
generated
vendored
Executable file
55
.rclone_repo/vendor/github.com/ncw/swift/notes.txt
generated
vendored
Executable file
@@ -0,0 +1,55 @@
|
||||
Notes on Go Swift
|
||||
=================
|
||||
|
||||
Make a builder style interface like the Google Go APIs? Advantages
|
||||
are that it is easy to add named methods to the service object to do
|
||||
specific things. Slightly less efficient. Not sure about how to
|
||||
return extra stuff though - in an object?
|
||||
|
||||
Make a container struct so these could be methods on it?
|
||||
|
||||
Make noResponse check for 204?
|
||||
|
||||
Make storage public so it can be extended easily?
|
||||
|
||||
Rename to go-swift to match user agent string?
|
||||
|
||||
Reconnect on auth error - 401 when token expires isn't tested
|
||||
|
||||
Make more api compatible with python cloudfiles?
|
||||
|
||||
Retry operations on timeout / network errors?
|
||||
- also 408 error
|
||||
- GET requests only?
|
||||
|
||||
Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock
|
||||
|
||||
Add extra headers field to Connection (for via etc)
|
||||
|
||||
Make errors use an error heirachy then can catch them with a type assertion
|
||||
|
||||
Error(...)
|
||||
ObjectCorrupted{ Error }
|
||||
|
||||
Make a Debug flag in connection for logging stuff
|
||||
|
||||
Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc
|
||||
|
||||
Object range
|
||||
|
||||
Object create, update with X-Delete-At or X-Delete-After
|
||||
|
||||
Large object support
|
||||
- check uploads are less than 5GB in normal mode?
|
||||
|
||||
Access control CORS?
|
||||
|
||||
Swift client retries and backs off for all types of errors
|
||||
|
||||
Implement net error interface?
|
||||
|
||||
type Error interface {
|
||||
error
|
||||
Timeout() bool // Is the error a timeout?
|
||||
Temporary() bool // Is the error temporary?
|
||||
}
|
||||
171
.rclone_repo/vendor/github.com/ncw/swift/slo.go
generated
vendored
Executable file
171
.rclone_repo/vendor/github.com/ncw/swift/slo.go
generated
vendored
Executable file
@@ -0,0 +1,171 @@
|
||||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
)
|
||||
|
||||
// StaticLargeObjectCreateFile represents an open static large object
|
||||
type StaticLargeObjectCreateFile struct {
|
||||
largeObjectCreateFile
|
||||
}
|
||||
|
||||
var SLONotSupported = errors.New("SLO not supported")
|
||||
|
||||
type swiftSegment struct {
|
||||
Path string `json:"path,omitempty"`
|
||||
Etag string `json:"etag,omitempty"`
|
||||
Size int64 `json:"size_bytes,omitempty"`
|
||||
// When uploading a manifest, the attributes must be named `path`, `etag` and `size_bytes`
|
||||
// but when querying the JSON content of a manifest with the `multipart-manifest=get`
|
||||
// parameter, Swift names those attributes `name`, `hash` and `bytes`.
|
||||
// We use all the different attributes names in this structure to be able to use
|
||||
// the same structure for both uploading and retrieving.
|
||||
Name string `json:"name,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
Bytes int64 `json:"bytes,omitempty"`
|
||||
ContentType string `json:"content_type,omitempty"`
|
||||
LastModified string `json:"last_modified,omitempty"`
|
||||
}
|
||||
|
||||
// StaticLargeObjectCreateFile creates a static large object returning
|
||||
// an object which satisfies io.Writer, io.Seeker, io.Closer and
|
||||
// io.ReaderFrom. The flags are as passed to the largeObjectCreate
|
||||
// method.
|
||||
func (c *Connection) StaticLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
||||
info, err := c.cachedQueryInfo()
|
||||
if err != nil || !info.SupportsSLO() {
|
||||
return nil, SLONotSupported
|
||||
}
|
||||
realMinChunkSize := info.SLOMinSegmentSize()
|
||||
if realMinChunkSize > opts.MinChunkSize {
|
||||
opts.MinChunkSize = realMinChunkSize
|
||||
}
|
||||
lo, err := c.largeObjectCreate(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return withBuffer(opts, &StaticLargeObjectCreateFile{
|
||||
largeObjectCreateFile: *lo,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// StaticLargeObjectCreate creates or truncates an existing static
|
||||
// large object returning a writeable object. This sets opts.Flags to
|
||||
// an appropriate value before calling StaticLargeObjectCreateFile
|
||||
func (c *Connection) StaticLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
|
||||
opts.Flags = os.O_TRUNC | os.O_CREATE
|
||||
return c.StaticLargeObjectCreateFile(opts)
|
||||
}
|
||||
|
||||
// StaticLargeObjectDelete deletes a static large object and all of its segments.
|
||||
func (c *Connection) StaticLargeObjectDelete(container string, path string) error {
|
||||
info, err := c.cachedQueryInfo()
|
||||
if err != nil || !info.SupportsSLO() {
|
||||
return SLONotSupported
|
||||
}
|
||||
return c.LargeObjectDelete(container, path)
|
||||
}
|
||||
|
||||
// StaticLargeObjectMove moves a static large object from srcContainer, srcObjectName to dstContainer, dstObjectName
|
||||
func (c *Connection) StaticLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
|
||||
swiftInfo, err := c.cachedQueryInfo()
|
||||
if err != nil || !swiftInfo.SupportsSLO() {
|
||||
return SLONotSupported
|
||||
}
|
||||
info, headers, err := c.Object(srcContainer, srcObjectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
container, segments, err := c.getAllSegments(srcContainer, srcObjectName, headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//copy only metadata during move (other headers might not be safe for copying)
|
||||
headers = headers.ObjectMetadata().ObjectHeaders()
|
||||
|
||||
if err := c.createSLOManifest(dstContainer, dstObjectName, info.ContentType, container, segments, headers); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createSLOManifest creates a static large object manifest
|
||||
func (c *Connection) createSLOManifest(container string, path string, contentType string, segmentContainer string, segments []Object, h Headers) error {
|
||||
sloSegments := make([]swiftSegment, len(segments))
|
||||
for i, segment := range segments {
|
||||
sloSegments[i].Path = fmt.Sprintf("%s/%s", segmentContainer, segment.Name)
|
||||
sloSegments[i].Etag = segment.Hash
|
||||
sloSegments[i].Size = segment.Bytes
|
||||
}
|
||||
|
||||
content, err := json.Marshal(sloSegments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("multipart-manifest", "put")
|
||||
if _, err := c.objectPut(container, path, bytes.NewBuffer(content), false, "", contentType, h, values); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *StaticLargeObjectCreateFile) Close() error {
|
||||
return file.Flush()
|
||||
}
|
||||
|
||||
func (file *StaticLargeObjectCreateFile) Flush() error {
|
||||
if err := file.conn.createSLOManifest(file.container, file.objectName, file.contentType, file.segmentContainer, file.segments, file.headers); err != nil {
|
||||
return err
|
||||
}
|
||||
return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size())
|
||||
}
|
||||
|
||||
func (c *Connection) getAllSLOSegments(container, path string) (string, []Object, error) {
|
||||
var (
|
||||
segmentList []swiftSegment
|
||||
segments []Object
|
||||
segPath string
|
||||
segmentContainer string
|
||||
)
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("multipart-manifest", "get")
|
||||
|
||||
file, _, err := c.objectOpen(container, path, true, nil, values)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
json.Unmarshal(content, &segmentList)
|
||||
for _, segment := range segmentList {
|
||||
segmentContainer, segPath = parseFullPath(segment.Name[1:])
|
||||
segments = append(segments, Object{
|
||||
Name: segPath,
|
||||
Bytes: segment.Bytes,
|
||||
Hash: segment.Hash,
|
||||
})
|
||||
}
|
||||
|
||||
return segmentContainer, segments, nil
|
||||
}
|
||||
2204
.rclone_repo/vendor/github.com/ncw/swift/swift.go
generated
vendored
Executable file
2204
.rclone_repo/vendor/github.com/ncw/swift/swift.go
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
59
.rclone_repo/vendor/github.com/ncw/swift/timeout_reader.go
generated
vendored
Executable file
59
.rclone_repo/vendor/github.com/ncw/swift/timeout_reader.go
generated
vendored
Executable file
@@ -0,0 +1,59 @@
|
||||
package swift
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An io.ReadCloser which obeys an idle timeout
|
||||
type timeoutReader struct {
|
||||
reader io.ReadCloser
|
||||
timeout time.Duration
|
||||
cancel func()
|
||||
}
|
||||
|
||||
// Returns a wrapper around the reader which obeys an idle
|
||||
// timeout. The cancel function is called if the timeout happens
|
||||
func newTimeoutReader(reader io.ReadCloser, timeout time.Duration, cancel func()) *timeoutReader {
|
||||
return &timeoutReader{
|
||||
reader: reader,
|
||||
timeout: timeout,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p
|
||||
//
|
||||
// Waits at most for timeout for the read to complete otherwise returns a timeout
|
||||
func (t *timeoutReader) Read(p []byte) (int, error) {
|
||||
// FIXME limit the amount of data read in one chunk so as to not exceed the timeout?
|
||||
// Do the read in the background
|
||||
type result struct {
|
||||
n int
|
||||
err error
|
||||
}
|
||||
done := make(chan result, 1)
|
||||
go func() {
|
||||
n, err := t.reader.Read(p)
|
||||
done <- result{n, err}
|
||||
}()
|
||||
// Wait for the read or the timeout
|
||||
timer := time.NewTimer(t.timeout)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case r := <-done:
|
||||
return r.n, r.err
|
||||
case <-timer.C:
|
||||
t.cancel()
|
||||
return 0, TimeoutError
|
||||
}
|
||||
panic("unreachable") // for Go 1.0
|
||||
}
|
||||
|
||||
// Close the channel
|
||||
func (t *timeoutReader) Close() error {
|
||||
return t.reader.Close()
|
||||
}
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ io.ReadCloser = &timeoutReader{}
|
||||
22
.rclone_repo/vendor/github.com/ncw/swift/travis_realserver.sh
generated
vendored
Executable file
22
.rclone_repo/vendor/github.com/ncw/swift/travis_realserver.sh
generated
vendored
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [ "${TRAVIS_PULL_REQUEST}" = "true" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "${TEST_REAL_SERVER}" = "rackspace" ] && [ ! -z "${RACKSPACE_APIKEY}" ]; then
|
||||
echo "Running tests pointing to Rackspace"
|
||||
export SWIFT_API_KEY=$RACKSPACE_APIKEY
|
||||
export SWIFT_API_USER=$RACKSPACE_USER
|
||||
export SWIFT_AUTH_URL=$RACKSPACE_AUTH
|
||||
go test ./...
|
||||
fi
|
||||
|
||||
if [ "${TEST_REAL_SERVER}" = "memset" ] && [ ! -z "${MEMSET_APIKEY}" ]; then
|
||||
echo "Running tests pointing to Memset"
|
||||
export SWIFT_API_KEY=$MEMSET_APIKEY
|
||||
export SWIFT_API_USER=$MEMSET_USER
|
||||
export SWIFT_AUTH_URL=$MEMSET_AUTH
|
||||
go test
|
||||
fi
|
||||
55
.rclone_repo/vendor/github.com/ncw/swift/watchdog_reader.go
generated
vendored
Executable file
55
.rclone_repo/vendor/github.com/ncw/swift/watchdog_reader.go
generated
vendored
Executable file
@@ -0,0 +1,55 @@
|
||||
package swift
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
var watchdogChunkSize = 1 << 20 // 1 MiB
|
||||
|
||||
// An io.Reader which resets a watchdog timer whenever data is read
|
||||
type watchdogReader struct {
|
||||
timeout time.Duration
|
||||
reader io.Reader
|
||||
timer *time.Timer
|
||||
chunkSize int
|
||||
}
|
||||
|
||||
// Returns a new reader which will kick the watchdog timer whenever data is read
|
||||
func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader {
|
||||
return &watchdogReader{
|
||||
timeout: timeout,
|
||||
reader: reader,
|
||||
timer: timer,
|
||||
chunkSize: watchdogChunkSize,
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p
|
||||
func (t *watchdogReader) Read(p []byte) (int, error) {
|
||||
//read from underlying reader in chunks not larger than t.chunkSize
|
||||
//while resetting the watchdog timer before every read; the small chunk
|
||||
//size ensures that the timer does not fire when reading a large amount of
|
||||
//data from a slow connection
|
||||
start := 0
|
||||
end := len(p)
|
||||
for start < end {
|
||||
length := end - start
|
||||
if length > t.chunkSize {
|
||||
length = t.chunkSize
|
||||
}
|
||||
|
||||
resetTimer(t.timer, t.timeout)
|
||||
n, err := t.reader.Read(p[start : start+length])
|
||||
start += n
|
||||
if n == 0 || err != nil {
|
||||
return start, err
|
||||
}
|
||||
}
|
||||
|
||||
resetTimer(t.timer, t.timeout)
|
||||
return start, nil
|
||||
}
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ io.Reader = &watchdogReader{}
|
||||
Reference in New Issue
Block a user