VENDOR intensifies
This commit is contained in:
313
vendor/github.com/ncw/rclone/lib/dircache/dircache.go
generated
vendored
Executable file
313
vendor/github.com/ncw/rclone/lib/dircache/dircache.go
generated
vendored
Executable file
@@ -0,0 +1,313 @@
|
||||
// Package dircache provides a simple cache for caching directory to path lookups
|
||||
package dircache
|
||||
|
||||
// _methods are called without the lock
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DirCache caches paths to directory IDs and vice versa
|
||||
type DirCache struct {
|
||||
cacheMu sync.RWMutex
|
||||
cache map[string]string
|
||||
invCache map[string]string
|
||||
mu sync.Mutex
|
||||
fs DirCacher // Interface to find and make stuff
|
||||
trueRootID string // ID of the absolute root
|
||||
root string // the path we are working on
|
||||
rootID string // ID of the root directory
|
||||
rootParentID string // ID of the root's parent directory
|
||||
foundRoot bool // Whether we have found the root or not
|
||||
}
|
||||
|
||||
// DirCacher describes an interface for doing the low level directory work
|
||||
type DirCacher interface {
|
||||
FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error)
|
||||
CreateDir(pathID, leaf string) (newID string, err error)
|
||||
}
|
||||
|
||||
// New makes a DirCache
|
||||
//
|
||||
// The cache is safe for concurrent use
|
||||
func New(root string, trueRootID string, fs DirCacher) *DirCache {
|
||||
d := &DirCache{
|
||||
trueRootID: trueRootID,
|
||||
root: root,
|
||||
fs: fs,
|
||||
}
|
||||
d.Flush()
|
||||
d.ResetRoot()
|
||||
return d
|
||||
}
|
||||
|
||||
// Get an ID given a path
|
||||
func (dc *DirCache) Get(path string) (id string, ok bool) {
|
||||
dc.cacheMu.RLock()
|
||||
id, ok = dc.cache[path]
|
||||
dc.cacheMu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// GetInv gets a path given an ID
|
||||
func (dc *DirCache) GetInv(id string) (path string, ok bool) {
|
||||
dc.cacheMu.RLock()
|
||||
path, ok = dc.invCache[id]
|
||||
dc.cacheMu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Put a path, id into the map
|
||||
func (dc *DirCache) Put(path, id string) {
|
||||
dc.cacheMu.Lock()
|
||||
dc.cache[path] = id
|
||||
dc.invCache[id] = path
|
||||
dc.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
// Flush the map of all data
|
||||
func (dc *DirCache) Flush() {
|
||||
dc.cacheMu.Lock()
|
||||
dc.cache = make(map[string]string)
|
||||
dc.invCache = make(map[string]string)
|
||||
dc.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
// FlushDir flushes the map of all data starting with dir
|
||||
//
|
||||
// If dir is empty then this is equivalent to calling ResetRoot
|
||||
func (dc *DirCache) FlushDir(dir string) {
|
||||
if dir == "" {
|
||||
dc.ResetRoot()
|
||||
return
|
||||
}
|
||||
dc.cacheMu.Lock()
|
||||
|
||||
// Delete the root dir
|
||||
ID, ok := dc.cache[dir]
|
||||
if ok {
|
||||
delete(dc.cache, dir)
|
||||
delete(dc.invCache, ID)
|
||||
}
|
||||
|
||||
// And any sub directories
|
||||
dir += "/"
|
||||
for key, ID := range dc.cache {
|
||||
if strings.HasPrefix(key, dir) {
|
||||
delete(dc.cache, key)
|
||||
delete(dc.invCache, ID)
|
||||
}
|
||||
}
|
||||
|
||||
dc.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
// SplitPath splits a path into directory, leaf
|
||||
//
|
||||
// Path shouldn't start or end with a /
|
||||
//
|
||||
// If there are no slashes then directory will be "" and leaf = path
|
||||
func SplitPath(path string) (directory, leaf string) {
|
||||
lastSlash := strings.LastIndex(path, "/")
|
||||
if lastSlash >= 0 {
|
||||
directory = path[:lastSlash]
|
||||
leaf = path[lastSlash+1:]
|
||||
} else {
|
||||
directory = ""
|
||||
leaf = path
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FindDir finds the directory passed in returning the directory ID
|
||||
// starting from pathID
|
||||
//
|
||||
// Path shouldn't start or end with a /
|
||||
//
|
||||
// If create is set it will make the directory if not found
|
||||
//
|
||||
// Algorithm:
|
||||
// Look in the cache for the path, if found return the pathID
|
||||
// If not found strip the last path off the path and recurse
|
||||
// Now have a parent directory id, so look in the parent for self and return it
|
||||
func (dc *DirCache) FindDir(path string, create bool) (pathID string, err error) {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
return dc._findDir(path, create)
|
||||
}
|
||||
|
||||
// Look for the root and in the cache - safe to call without the mu
|
||||
func (dc *DirCache) _findDirInCache(path string) string {
|
||||
// fmt.Println("Finding",path,"create",create,"cache",cache)
|
||||
// If it is the root, then return it
|
||||
if path == "" {
|
||||
// fmt.Println("Root")
|
||||
return dc.rootID
|
||||
}
|
||||
|
||||
// If it is in the cache then return it
|
||||
pathID, ok := dc.Get(path)
|
||||
if ok {
|
||||
// fmt.Println("Cache hit on", path)
|
||||
return pathID
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// Unlocked findDir - must have mu
|
||||
func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error) {
|
||||
pathID = dc._findDirInCache(path)
|
||||
if pathID != "" {
|
||||
return pathID, nil
|
||||
}
|
||||
|
||||
// Split the path into directory, leaf
|
||||
directory, leaf := SplitPath(path)
|
||||
|
||||
// Recurse and find pathID for parent directory
|
||||
parentPathID, err := dc._findDir(directory, create)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
}
|
||||
|
||||
// Find the leaf in parentPathID
|
||||
pathID, found, err := dc.fs.FindLeaf(parentPathID, leaf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If not found create the directory if required or return an error
|
||||
if !found {
|
||||
if create {
|
||||
pathID, err = dc.fs.CreateDir(parentPathID, leaf)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make directory")
|
||||
}
|
||||
} else {
|
||||
return "", fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// Store the leaf directory in the cache
|
||||
dc.Put(path, pathID)
|
||||
|
||||
// fmt.Println("Dir", path, "is", pathID)
|
||||
return pathID, nil
|
||||
}
|
||||
|
||||
// FindPath finds the leaf and directoryID from a path
|
||||
//
|
||||
// Do not call FindPath with the root directory - it will return an error
|
||||
//
|
||||
// If create is set parent directories will be created if they don't exist
|
||||
func (dc *DirCache) FindPath(path string, create bool) (leaf, directoryID string, err error) {
|
||||
if path == "" {
|
||||
err = errors.New("internal error: can't call FindPath with root directory")
|
||||
return
|
||||
}
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
directory, leaf := SplitPath(path)
|
||||
directoryID, err = dc._findDir(directory, create)
|
||||
return
|
||||
}
|
||||
|
||||
// FindRoot finds the root directory if not already found
|
||||
//
|
||||
// Resets the root directory
|
||||
//
|
||||
// If create is set it will make the directory if not found
|
||||
func (dc *DirCache) FindRoot(create bool) error {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
if dc.foundRoot {
|
||||
return nil
|
||||
}
|
||||
rootID, err := dc._findDir(dc.root, create)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dc.foundRoot = true
|
||||
dc.rootID = rootID
|
||||
|
||||
// Find the parent of the root while we still have the root
|
||||
// directory tree cached
|
||||
rootParentPath, _ := SplitPath(dc.root)
|
||||
dc.rootParentID, _ = dc.Get(rootParentPath)
|
||||
|
||||
// Reset the tree based on dc.root
|
||||
dc.Flush()
|
||||
// Put the root directory in
|
||||
dc.Put("", dc.rootID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindRootAndPath finds the root first if not found then finds leaf and directoryID from a path
|
||||
//
|
||||
// If create is set parent directories will be created if they don't exist
|
||||
func (dc *DirCache) FindRootAndPath(path string, create bool) (leaf, directoryID string, err error) {
|
||||
err = dc.FindRoot(create)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return dc.FindPath(path, create)
|
||||
}
|
||||
|
||||
// FoundRoot returns whether the root directory has been found yet
|
||||
//
|
||||
// Call this from FindLeaf or CreateDir only
|
||||
func (dc *DirCache) FoundRoot() bool {
|
||||
return dc.foundRoot
|
||||
}
|
||||
|
||||
// RootID returns the ID of the root directory
|
||||
//
|
||||
// This should be called after FindRoot
|
||||
func (dc *DirCache) RootID() string {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
if !dc.foundRoot {
|
||||
log.Fatalf("Internal Error: RootID() called before FindRoot")
|
||||
}
|
||||
return dc.rootID
|
||||
}
|
||||
|
||||
// RootParentID returns the ID of the parent of the root directory
|
||||
//
|
||||
// This should be called after FindRoot
|
||||
func (dc *DirCache) RootParentID() (string, error) {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
if !dc.foundRoot {
|
||||
return "", errors.New("internal error: RootID() called before FindRoot")
|
||||
}
|
||||
if dc.rootParentID == "" {
|
||||
return "", errors.New("internal error: didn't find rootParentID")
|
||||
}
|
||||
if dc.rootID == dc.trueRootID {
|
||||
return "", errors.New("is root directory")
|
||||
}
|
||||
return dc.rootParentID, nil
|
||||
}
|
||||
|
||||
// ResetRoot resets the root directory to the absolute root and clears
|
||||
// the DirCache
|
||||
func (dc *DirCache) ResetRoot() {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
dc.foundRoot = false
|
||||
dc.Flush()
|
||||
|
||||
// Put the true root in
|
||||
dc.rootID = dc.trueRootID
|
||||
|
||||
// Put the root directory in
|
||||
dc.Put("", dc.rootID)
|
||||
}
|
||||
545
vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil.go
generated
vendored
Executable file
545
vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil.go
generated
vendored
Executable file
@@ -0,0 +1,545 @@
|
||||
package oauthutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
// TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization
|
||||
// code should be returned in the title bar of the browser, with the page text
|
||||
// prompting the user to copy the code and paste it in the application.
|
||||
TitleBarRedirectURL = "urn:ietf:wg:oauth:2.0:oob"
|
||||
|
||||
// bindPort is the port that we bind the local webserver to
|
||||
bindPort = "53682"
|
||||
|
||||
// bindAddress is binding for local webserver when active
|
||||
bindAddress = "127.0.0.1:" + bindPort
|
||||
|
||||
// RedirectURL is redirect to local webserver when active
|
||||
RedirectURL = "http://" + bindAddress + "/"
|
||||
|
||||
// RedirectPublicURL is redirect to local webserver when active with public name
|
||||
RedirectPublicURL = "http://localhost.rclone.org:" + bindPort + "/"
|
||||
|
||||
// RedirectLocalhostURL is redirect to local webserver when active with localhost
|
||||
RedirectLocalhostURL = "http://localhost:" + bindPort + "/"
|
||||
|
||||
// AuthResponse is a template to handle the redirect URL for oauth requests
|
||||
AuthResponse = `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>{{ if .OK }}Success!{{ else }}Failure!{{ end }}</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>{{ if .OK }}Success!{{ else }}Failure!{{ end }}</h1>
|
||||
<hr>
|
||||
<pre style="width: 750px; white-space: pre-wrap;">
|
||||
{{ if eq .OK false }}
|
||||
Error: {{ .AuthError.Name }}<br>
|
||||
{{ if .AuthError.Description }}Description: {{ .AuthError.Description }}<br>{{ end }}
|
||||
{{ if .AuthError.Code }}Code: {{ .AuthError.Code }}<br>{{ end }}
|
||||
{{ if .AuthError.HelpURL }}Look here for help: <a href="{{ .AuthError.HelpURL }}">{{ .AuthError.HelpURL }}</a><br>{{ end }}
|
||||
{{ else }}
|
||||
{{ if .Code }}
|
||||
Please copy this code into rclone:
|
||||
{{ .Code }}
|
||||
{{ else }}
|
||||
All done. Please go back to rclone.
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
</pre>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
)
|
||||
|
||||
// oldToken contains an end-user's tokens.
|
||||
// This is the data you must store to persist authentication.
|
||||
//
|
||||
// From the original code.google.com/p/goauth2/oauth package - used
|
||||
// for backwards compatibility in the rclone config file
|
||||
type oldToken struct {
|
||||
AccessToken string
|
||||
RefreshToken string
|
||||
Expiry time.Time
|
||||
}
|
||||
|
||||
// GetToken returns the token saved in the config file under
|
||||
// section name.
|
||||
func GetToken(name string, m configmap.Mapper) (*oauth2.Token, error) {
|
||||
tokenString, ok := m.Get(config.ConfigToken)
|
||||
if !ok || tokenString == "" {
|
||||
return nil, errors.New("empty token found - please run rclone config again")
|
||||
}
|
||||
token := new(oauth2.Token)
|
||||
err := json.Unmarshal([]byte(tokenString), token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// if has data then return it
|
||||
if token.AccessToken != "" {
|
||||
return token, nil
|
||||
}
|
||||
// otherwise try parsing as oldToken
|
||||
oldtoken := new(oldToken)
|
||||
err = json.Unmarshal([]byte(tokenString), oldtoken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Fill in result into new token
|
||||
token.AccessToken = oldtoken.AccessToken
|
||||
token.RefreshToken = oldtoken.RefreshToken
|
||||
token.Expiry = oldtoken.Expiry
|
||||
// Save new format in config file
|
||||
err = PutToken(name, m, token, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// PutToken stores the token in the config file
|
||||
//
|
||||
// This saves the config file if it changes
|
||||
func PutToken(name string, m configmap.Mapper, token *oauth2.Token, newSection bool) error {
|
||||
tokenBytes, err := json.Marshal(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tokenString := string(tokenBytes)
|
||||
old, ok := m.Get(config.ConfigToken)
|
||||
if !ok || tokenString != old {
|
||||
err = config.SetValueAndSave(name, config.ConfigToken, tokenString)
|
||||
if newSection && err != nil {
|
||||
fs.Debugf(name, "Added new token to config, still needs to be saved")
|
||||
} else if err != nil {
|
||||
fs.Errorf(nil, "Failed to save new token in config file: %v", err)
|
||||
} else {
|
||||
fs.Debugf(name, "Saved new token in config file")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TokenSource stores updated tokens in the config file
|
||||
type TokenSource struct {
|
||||
mu sync.Mutex
|
||||
name string
|
||||
m configmap.Mapper
|
||||
tokenSource oauth2.TokenSource
|
||||
token *oauth2.Token
|
||||
config *oauth2.Config
|
||||
ctx context.Context
|
||||
expiryTimer *time.Timer // signals whenever the token expires
|
||||
}
|
||||
|
||||
// Token returns a token or an error.
|
||||
// Token must be safe for concurrent use by multiple goroutines.
|
||||
// The returned Token must not be modified.
|
||||
//
|
||||
// This saves the token in the config file if it has changed
|
||||
func (ts *TokenSource) Token() (*oauth2.Token, error) {
|
||||
ts.mu.Lock()
|
||||
defer ts.mu.Unlock()
|
||||
|
||||
// Make a new token source if required
|
||||
if ts.tokenSource == nil {
|
||||
ts.tokenSource = ts.config.TokenSource(ts.ctx, ts.token)
|
||||
}
|
||||
|
||||
token, err := ts.tokenSource.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
changed := *token != *ts.token
|
||||
ts.token = token
|
||||
if changed {
|
||||
// Bump on the expiry timer if it is set
|
||||
if ts.expiryTimer != nil {
|
||||
ts.expiryTimer.Reset(ts.timeToExpiry())
|
||||
}
|
||||
err = PutToken(ts.name, ts.m, token, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// Invalidate invalidates the token
|
||||
func (ts *TokenSource) Invalidate() {
|
||||
ts.mu.Lock()
|
||||
ts.token.AccessToken = ""
|
||||
ts.mu.Unlock()
|
||||
}
|
||||
|
||||
// timeToExpiry returns how long until the token expires
|
||||
//
|
||||
// Call with the lock held
|
||||
func (ts *TokenSource) timeToExpiry() time.Duration {
|
||||
t := ts.token
|
||||
if t == nil {
|
||||
return 0
|
||||
}
|
||||
if t.Expiry.IsZero() {
|
||||
return 3E9 * time.Second // ~95 years
|
||||
}
|
||||
return t.Expiry.Sub(time.Now())
|
||||
}
|
||||
|
||||
// OnExpiry returns a channel which has the time written to it when
|
||||
// the token expires. Note that there is only one channel so if
|
||||
// attaching multiple go routines it will only signal to one of them.
|
||||
func (ts *TokenSource) OnExpiry() <-chan time.Time {
|
||||
ts.mu.Lock()
|
||||
defer ts.mu.Unlock()
|
||||
if ts.expiryTimer == nil {
|
||||
ts.expiryTimer = time.NewTimer(ts.timeToExpiry())
|
||||
}
|
||||
return ts.expiryTimer.C
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ oauth2.TokenSource = (*TokenSource)(nil)
|
||||
|
||||
// Context returns a context with our HTTP Client baked in for oauth2
|
||||
func Context(client *http.Client) context.Context {
|
||||
return context.WithValue(context.Background(), oauth2.HTTPClient, client)
|
||||
}
|
||||
|
||||
// overrideCredentials sets the ClientID and ClientSecret from the
|
||||
// config file if they are not blank.
|
||||
// If any value is overridden, true is returned.
|
||||
// the origConfig is copied
|
||||
func overrideCredentials(name string, m configmap.Mapper, origConfig *oauth2.Config) (newConfig *oauth2.Config, changed bool) {
|
||||
newConfig = new(oauth2.Config)
|
||||
*newConfig = *origConfig
|
||||
changed = false
|
||||
ClientID, ok := m.Get(config.ConfigClientID)
|
||||
if ok && ClientID != "" {
|
||||
newConfig.ClientID = ClientID
|
||||
changed = true
|
||||
}
|
||||
ClientSecret, ok := m.Get(config.ConfigClientSecret)
|
||||
if ok && ClientSecret != "" {
|
||||
newConfig.ClientSecret = ClientSecret
|
||||
changed = true
|
||||
}
|
||||
AuthURL, ok := m.Get(config.ConfigAuthURL)
|
||||
if ok && AuthURL != "" {
|
||||
newConfig.Endpoint.AuthURL = AuthURL
|
||||
changed = true
|
||||
}
|
||||
TokenURL, ok := m.Get(config.ConfigTokenURL)
|
||||
if ok && TokenURL != "" {
|
||||
newConfig.Endpoint.TokenURL = TokenURL
|
||||
changed = true
|
||||
}
|
||||
return newConfig, changed
|
||||
}
|
||||
|
||||
// NewClientWithBaseClient gets a token from the config file and
|
||||
// configures a Client with it. It returns the client and a
|
||||
// TokenSource which Invalidate may need to be called on. It uses the
|
||||
// httpClient passed in as the base client.
|
||||
func NewClientWithBaseClient(name string, m configmap.Mapper, config *oauth2.Config, baseClient *http.Client) (*http.Client, *TokenSource, error) {
|
||||
config, _ = overrideCredentials(name, m, config)
|
||||
token, err := GetToken(name, m)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Set our own http client in the context
|
||||
ctx := Context(baseClient)
|
||||
|
||||
// Wrap the TokenSource in our TokenSource which saves changed
|
||||
// tokens in the config file
|
||||
ts := &TokenSource{
|
||||
name: name,
|
||||
m: m,
|
||||
token: token,
|
||||
config: config,
|
||||
ctx: ctx,
|
||||
}
|
||||
return oauth2.NewClient(ctx, ts), ts, nil
|
||||
|
||||
}
|
||||
|
||||
// NewClient gets a token from the config file and configures a Client
|
||||
// with it. It returns the client and a TokenSource which Invalidate may need to be called on
|
||||
func NewClient(name string, m configmap.Mapper, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) {
|
||||
return NewClientWithBaseClient(name, m, oauthConfig, fshttp.NewClient(fs.Config))
|
||||
}
|
||||
|
||||
// Config does the initial creation of the token
|
||||
//
|
||||
// It may run an internal webserver to receive the results
|
||||
func Config(id, name string, m configmap.Mapper, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
||||
return doConfig(id, name, m, nil, config, true, opts)
|
||||
}
|
||||
|
||||
// ConfigNoOffline does the same as Config but does not pass the
|
||||
// "access_type=offline" parameter.
|
||||
func ConfigNoOffline(id, name string, m configmap.Mapper, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
||||
return doConfig(id, name, m, nil, config, false, opts)
|
||||
}
|
||||
|
||||
// ConfigErrorCheck does the same as Config, but allows the backend to pass a error handling function
|
||||
// This function gets called with the request made to rclone as a parameter if no code was found
|
||||
func ConfigErrorCheck(id, name string, m configmap.Mapper, errorHandler func(*http.Request) AuthError, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
||||
return doConfig(id, name, m, errorHandler, config, true, opts)
|
||||
}
|
||||
|
||||
func doConfig(id, name string, m configmap.Mapper, errorHandler func(*http.Request) AuthError, oauthConfig *oauth2.Config, offline bool, opts []oauth2.AuthCodeOption) error {
|
||||
oauthConfig, changed := overrideCredentials(name, m, oauthConfig)
|
||||
auto, ok := m.Get(config.ConfigAutomatic)
|
||||
automatic := ok && auto != ""
|
||||
|
||||
// See if already have a token
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Detect whether we should use internal web server
|
||||
useWebServer := false
|
||||
switch oauthConfig.RedirectURL {
|
||||
case RedirectURL, RedirectPublicURL, RedirectLocalhostURL:
|
||||
if changed {
|
||||
fmt.Printf("Make sure your Redirect URL is set to %q in your custom config.\n", oauthConfig.RedirectURL)
|
||||
}
|
||||
useWebServer = true
|
||||
if automatic {
|
||||
break
|
||||
}
|
||||
fmt.Printf("Use auto config?\n")
|
||||
fmt.Printf(" * Say Y if not sure\n")
|
||||
fmt.Printf(" * Say N if you are working on a remote or headless machine\n")
|
||||
auto := config.Confirm()
|
||||
if !auto {
|
||||
fmt.Printf("For this to work, you will need rclone available on a machine that has a web browser available.\n")
|
||||
fmt.Printf("Execute the following on your machine:\n")
|
||||
if changed {
|
||||
fmt.Printf("\trclone authorize %q %q %q\n", id, oauthConfig.ClientID, oauthConfig.ClientSecret)
|
||||
} else {
|
||||
fmt.Printf("\trclone authorize %q\n", id)
|
||||
}
|
||||
fmt.Println("Then paste the result below:")
|
||||
code := ""
|
||||
for code == "" {
|
||||
fmt.Printf("result> ")
|
||||
code = strings.TrimSpace(config.ReadLine())
|
||||
}
|
||||
token := &oauth2.Token{}
|
||||
err := json.Unmarshal([]byte(code), token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return PutToken(name, m, token, false)
|
||||
}
|
||||
case TitleBarRedirectURL:
|
||||
useWebServer = automatic
|
||||
if !automatic {
|
||||
fmt.Printf("Use auto config?\n")
|
||||
fmt.Printf(" * Say Y if not sure\n")
|
||||
fmt.Printf(" * Say N if you are working on a remote or headless machine or Y didn't work\n")
|
||||
useWebServer = config.Confirm()
|
||||
}
|
||||
if useWebServer {
|
||||
// copy the config and set to use the internal webserver
|
||||
configCopy := *oauthConfig
|
||||
oauthConfig = &configCopy
|
||||
oauthConfig.RedirectURL = RedirectURL
|
||||
}
|
||||
}
|
||||
|
||||
// Make random state
|
||||
stateBytes := make([]byte, 16)
|
||||
_, err := rand.Read(stateBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state := fmt.Sprintf("%x", stateBytes)
|
||||
if offline {
|
||||
opts = append(opts, oauth2.AccessTypeOffline)
|
||||
}
|
||||
authURL := oauthConfig.AuthCodeURL(state, opts...)
|
||||
|
||||
// Prepare webserver
|
||||
server := authServer{
|
||||
state: state,
|
||||
bindAddress: bindAddress,
|
||||
authURL: authURL,
|
||||
errorHandler: errorHandler,
|
||||
}
|
||||
if useWebServer {
|
||||
server.code = make(chan string, 1)
|
||||
server.err = make(chan error, 1)
|
||||
go server.Start()
|
||||
defer server.Stop()
|
||||
authURL = "http://" + bindAddress + "/auth"
|
||||
}
|
||||
|
||||
// Generate a URL for the user to visit for authorization.
|
||||
_ = open.Start(authURL)
|
||||
fmt.Printf("If your browser doesn't open automatically go to the following link: %s\n", authURL)
|
||||
fmt.Printf("Log in and authorize rclone for access\n")
|
||||
|
||||
var authCode string
|
||||
if useWebServer {
|
||||
// Read the code, and exchange it for a token.
|
||||
fmt.Printf("Waiting for code...\n")
|
||||
authCode = <-server.code
|
||||
authError := <-server.err
|
||||
if authCode != "" {
|
||||
fmt.Printf("Got code\n")
|
||||
} else {
|
||||
if authError != nil {
|
||||
return authError
|
||||
}
|
||||
return errors.New("failed to get code")
|
||||
}
|
||||
} else {
|
||||
// Read the code, and exchange it for a token.
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode = config.ReadLine()
|
||||
}
|
||||
token, err := oauthConfig.Exchange(oauth2.NoContext, authCode)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get token")
|
||||
}
|
||||
|
||||
// Print code if we do automatic retrieval
|
||||
if automatic {
|
||||
result, err := json.Marshal(token)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal token")
|
||||
}
|
||||
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste", result)
|
||||
}
|
||||
return PutToken(name, m, token, true)
|
||||
}
|
||||
|
||||
// Local web server for collecting auth
|
||||
type authServer struct {
|
||||
state string
|
||||
listener net.Listener
|
||||
bindAddress string
|
||||
code chan string
|
||||
err chan error
|
||||
authURL string
|
||||
server *http.Server
|
||||
errorHandler func(*http.Request) AuthError
|
||||
}
|
||||
|
||||
// AuthError gets returned by the backend's errorHandler function
|
||||
type AuthError struct {
|
||||
Name string
|
||||
Description string
|
||||
Code string
|
||||
HelpURL string
|
||||
}
|
||||
|
||||
// AuthResponseData can fill the AuthResponse template
|
||||
type AuthResponseData struct {
|
||||
OK bool // Failure or Success?
|
||||
Code string // code to paste into rclone config
|
||||
AuthError
|
||||
}
|
||||
|
||||
// startWebServer runs an internal web server to receive config details
|
||||
func (s *authServer) Start() {
|
||||
fs.Debugf(nil, "Starting auth server on %s", s.bindAddress)
|
||||
mux := http.NewServeMux()
|
||||
s.server = &http.Server{
|
||||
Addr: s.bindAddress,
|
||||
Handler: mux,
|
||||
}
|
||||
s.server.SetKeepAlivesEnabled(false)
|
||||
mux.HandleFunc("/favicon.ico", func(w http.ResponseWriter, req *http.Request) {
|
||||
http.Error(w, "", 404)
|
||||
return
|
||||
})
|
||||
mux.HandleFunc("/auth", func(w http.ResponseWriter, req *http.Request) {
|
||||
http.Redirect(w, req, s.authURL, http.StatusTemporaryRedirect)
|
||||
return
|
||||
})
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
fs.Debugf(nil, "Received request on auth server")
|
||||
code := req.FormValue("code")
|
||||
var err error
|
||||
var t = template.Must(template.New("authResponse").Parse(AuthResponse))
|
||||
resp := AuthResponseData{AuthError: AuthError{}}
|
||||
if code != "" {
|
||||
state := req.FormValue("state")
|
||||
if state != s.state {
|
||||
fs.Debugf(nil, "State did not match: want %q got %q", s.state, state)
|
||||
resp.OK = false
|
||||
resp.AuthError = AuthError{
|
||||
Name: "Auth State doesn't match",
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(nil, "Successfully got code")
|
||||
resp.OK = true
|
||||
if s.code == nil {
|
||||
resp.Code = code
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(nil, "No code found on request")
|
||||
var authError AuthError
|
||||
if s.errorHandler == nil {
|
||||
authError = AuthError{
|
||||
Name: "Auth Error",
|
||||
Description: "No code found returned by remote server.",
|
||||
}
|
||||
} else {
|
||||
authError = s.errorHandler(req)
|
||||
}
|
||||
err = fmt.Errorf("Error: %s\nCode: %s\nDescription: %s\nHelp: %s",
|
||||
authError.Name, authError.Code, authError.Description, authError.HelpURL)
|
||||
resp.OK = false
|
||||
resp.AuthError = authError
|
||||
w.WriteHeader(500)
|
||||
}
|
||||
if err := t.Execute(w, resp); err != nil {
|
||||
fs.Debugf(nil, "Could not execute template for web response.")
|
||||
}
|
||||
if s.code != nil {
|
||||
s.code <- code
|
||||
s.err <- err
|
||||
}
|
||||
})
|
||||
|
||||
var err error
|
||||
s.listener, err = net.Listen("tcp", s.bindAddress)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to start auth webserver: %v", err)
|
||||
}
|
||||
err = s.server.Serve(s.listener)
|
||||
fs.Debugf(nil, "Closed auth server with error: %v", err)
|
||||
}
|
||||
19
vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil_new.go
generated
vendored
Executable file
19
vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil_new.go
generated
vendored
Executable file
@@ -0,0 +1,19 @@
|
||||
// oauthutil parts go1.8+
|
||||
|
||||
//+build go1.8
|
||||
|
||||
package oauthutil
|
||||
|
||||
import "github.com/ncw/rclone/fs"
|
||||
|
||||
func (s *authServer) Stop() {
|
||||
fs.Debugf(nil, "Closing auth server")
|
||||
if s.code != nil {
|
||||
close(s.code)
|
||||
s.code = nil
|
||||
}
|
||||
_ = s.listener.Close()
|
||||
|
||||
// close the server
|
||||
_ = s.server.Close()
|
||||
}
|
||||
16
vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil_old.go
generated
vendored
Executable file
16
vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil_old.go
generated
vendored
Executable file
@@ -0,0 +1,16 @@
|
||||
// oauthutil parts pre go1.8+
|
||||
|
||||
//+build !go1.8
|
||||
|
||||
package oauthutil
|
||||
|
||||
import "github.com/ncw/rclone/fs"
|
||||
|
||||
func (s *authServer) Stop() {
|
||||
fs.Debugf(nil, "Closing auth server")
|
||||
if s.code != nil {
|
||||
close(s.code)
|
||||
s.code = nil
|
||||
}
|
||||
_ = s.listener.Close()
|
||||
}
|
||||
69
vendor/github.com/ncw/rclone/lib/oauthutil/renew.go
generated
vendored
Executable file
69
vendor/github.com/ncw/rclone/lib/oauthutil/renew.go
generated
vendored
Executable file
@@ -0,0 +1,69 @@
|
||||
package oauthutil
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Renew allows tokens to be renewed on expiry if uploads are in progress.
|
||||
type Renew struct {
|
||||
name string // name to use in logs
|
||||
ts *TokenSource // token source that needs renewing
|
||||
uploads int32 // number of uploads in progress - atomic access required
|
||||
run func() error // a transaction to run to renew the token on
|
||||
}
|
||||
|
||||
// NewRenew creates a new Renew struct and starts a background process
|
||||
// which renews the token whenever it expires. It uses the run() call
|
||||
// to run a transaction to do this.
|
||||
//
|
||||
// It will only renew the token if the number of uploads > 0
|
||||
func NewRenew(name string, ts *TokenSource, run func() error) *Renew {
|
||||
r := &Renew{
|
||||
name: name,
|
||||
ts: ts,
|
||||
run: run,
|
||||
}
|
||||
go r.renewOnExpiry()
|
||||
return r
|
||||
}
|
||||
|
||||
// renewOnExpiry renews the token whenever it expires. Useful when there
|
||||
// are lots of uploads in progress and the token doesn't get renewed.
|
||||
// Amazon seem to cancel your uploads if you don't renew your token
|
||||
// for 2hrs.
|
||||
func (r *Renew) renewOnExpiry() {
|
||||
expiry := r.ts.OnExpiry()
|
||||
for {
|
||||
<-expiry
|
||||
uploads := atomic.LoadInt32(&r.uploads)
|
||||
if uploads != 0 {
|
||||
fs.Debugf(r.name, "Token expired - %d uploads in progress - refreshing", uploads)
|
||||
// Do a transaction
|
||||
err := r.run()
|
||||
if err == nil {
|
||||
fs.Debugf(r.name, "Token refresh successful")
|
||||
} else {
|
||||
fs.Errorf(r.name, "Token refresh failed: %v", err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(r.name, "Token expired but no uploads in progress - doing nothing")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start should be called before starting an upload
|
||||
func (r *Renew) Start() {
|
||||
atomic.AddInt32(&r.uploads, 1)
|
||||
}
|
||||
|
||||
// Stop should be called after finishing an upload
|
||||
func (r *Renew) Stop() {
|
||||
atomic.AddInt32(&r.uploads, -1)
|
||||
}
|
||||
|
||||
// Invalidate invalidates the token source
|
||||
func (r *Renew) Invalidate() {
|
||||
r.ts.Invalidate()
|
||||
}
|
||||
368
vendor/github.com/ncw/rclone/lib/pacer/pacer.go
generated
vendored
Executable file
368
vendor/github.com/ncw/rclone/lib/pacer/pacer.go
generated
vendored
Executable file
@@ -0,0 +1,368 @@
|
||||
// Package pacer makes pacing and retrying API calls easy
|
||||
package pacer
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
// Pacer state
|
||||
type Pacer struct {
|
||||
mu sync.Mutex // Protecting read/writes
|
||||
minSleep time.Duration // minimum sleep time
|
||||
maxSleep time.Duration // maximum sleep time
|
||||
decayConstant uint // decay constant
|
||||
attackConstant uint // attack constant
|
||||
pacer chan struct{} // To pace the operations
|
||||
sleepTime time.Duration // Time to sleep for each transaction
|
||||
retries int // Max number of retries
|
||||
maxConnections int // Maximum number of concurrent connections
|
||||
connTokens chan struct{} // Connection tokens
|
||||
calculatePace func(bool) // switchable pacing algorithm - call with mu held
|
||||
consecutiveRetries int // number of consecutive retries
|
||||
}
|
||||
|
||||
// Type is for selecting different pacing algorithms
|
||||
type Type int
|
||||
|
||||
const (
|
||||
// DefaultPacer is a truncated exponential attack and decay.
|
||||
//
|
||||
// On retries the sleep time is doubled, on non errors then
|
||||
// sleeptime decays according to the decay constant as set
|
||||
// with SetDecayConstant.
|
||||
//
|
||||
// The sleep never goes below that set with SetMinSleep or
|
||||
// above that set with SetMaxSleep.
|
||||
DefaultPacer = Type(iota)
|
||||
|
||||
// AmazonCloudDrivePacer is a specialised pacer for Amazon Drive
|
||||
//
|
||||
// It implements a truncated exponential backoff strategy with
|
||||
// randomization. Normally operations are paced at the
|
||||
// interval set with SetMinSleep. On errors the sleep timer
|
||||
// is set to 0..2**retries seconds.
|
||||
//
|
||||
// See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices
|
||||
AmazonCloudDrivePacer
|
||||
|
||||
// GoogleDrivePacer is a specialised pacer for Google Drive
|
||||
//
|
||||
// It implements a truncated exponential backoff strategy with
|
||||
// randomization. Normally operations are paced at the
|
||||
// interval set with SetMinSleep. On errors the sleep timer
|
||||
// is set to (2 ^ n) + random_number_milliseconds seconds
|
||||
//
|
||||
// See https://developers.google.com/drive/v2/web/handle-errors#exponential-backoff
|
||||
GoogleDrivePacer
|
||||
)
|
||||
|
||||
// Paced is a function which is called by the Call and CallNoRetry
|
||||
// methods. It should return a boolean, true if it would like to be
|
||||
// retried, and an error. This error may be returned or returned
|
||||
// wrapped in a RetryError.
|
||||
type Paced func() (bool, error)
|
||||
|
||||
// New returns a Pacer with sensible defaults
|
||||
func New() *Pacer {
|
||||
p := &Pacer{
|
||||
minSleep: 10 * time.Millisecond,
|
||||
maxSleep: 2 * time.Second,
|
||||
decayConstant: 2,
|
||||
attackConstant: 1,
|
||||
retries: fs.Config.LowLevelRetries,
|
||||
pacer: make(chan struct{}, 1),
|
||||
}
|
||||
p.sleepTime = p.minSleep
|
||||
p.SetPacer(DefaultPacer)
|
||||
p.SetMaxConnections(fs.Config.Checkers + fs.Config.Transfers)
|
||||
|
||||
// Put the first pacing token in
|
||||
p.pacer <- struct{}{}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// SetSleep sets the current sleep time
|
||||
func (p *Pacer) SetSleep(t time.Duration) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.sleepTime = t
|
||||
return p
|
||||
}
|
||||
|
||||
// GetSleep gets the current sleep time
|
||||
func (p *Pacer) GetSleep() time.Duration {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
return p.sleepTime
|
||||
}
|
||||
|
||||
// SetMinSleep sets the minimum sleep time for the pacer
|
||||
func (p *Pacer) SetMinSleep(t time.Duration) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.minSleep = t
|
||||
p.sleepTime = p.minSleep
|
||||
return p
|
||||
}
|
||||
|
||||
// SetMaxSleep sets the maximum sleep time for the pacer
|
||||
func (p *Pacer) SetMaxSleep(t time.Duration) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.maxSleep = t
|
||||
p.sleepTime = p.minSleep
|
||||
return p
|
||||
}
|
||||
|
||||
// SetMaxConnections sets the maximum number of concurrent connections.
|
||||
// Setting the value to 0 will allow unlimited number of connections.
|
||||
// Should not be changed once you have started calling the pacer.
|
||||
// By default this will be set to fs.Config.Checkers.
|
||||
func (p *Pacer) SetMaxConnections(n int) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.maxConnections = n
|
||||
if n <= 0 {
|
||||
p.connTokens = nil
|
||||
} else {
|
||||
p.connTokens = make(chan struct{}, n)
|
||||
for i := 0; i < n; i++ {
|
||||
p.connTokens <- struct{}{}
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// SetDecayConstant sets the decay constant for the pacer
|
||||
//
|
||||
// This is the speed the time falls back to the minimum after errors
|
||||
// have occurred.
|
||||
//
|
||||
// bigger for slower decay, exponential. 1 is halve, 0 is go straight to minimum
|
||||
func (p *Pacer) SetDecayConstant(decay uint) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.decayConstant = decay
|
||||
return p
|
||||
}
|
||||
|
||||
// SetAttackConstant sets the attack constant for the pacer
|
||||
//
|
||||
// This is the speed the time grows from the minimum after errors have
|
||||
// occurred.
|
||||
//
|
||||
// bigger for slower attack, 1 is double, 0 is go straight to maximum
|
||||
func (p *Pacer) SetAttackConstant(attack uint) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.attackConstant = attack
|
||||
return p
|
||||
}
|
||||
|
||||
// SetRetries sets the max number of tries for Call
|
||||
func (p *Pacer) SetRetries(retries int) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.retries = retries
|
||||
return p
|
||||
}
|
||||
|
||||
// SetPacer sets the pacing algorithm
|
||||
//
|
||||
// It will choose the default algorithm if an incorrect value is
|
||||
// passed in.
|
||||
func (p *Pacer) SetPacer(t Type) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
switch t {
|
||||
case AmazonCloudDrivePacer:
|
||||
p.calculatePace = p.acdPacer
|
||||
case GoogleDrivePacer:
|
||||
p.calculatePace = p.drivePacer
|
||||
default:
|
||||
p.calculatePace = p.defaultPacer
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Start a call to the API
|
||||
//
|
||||
// This must be called as a pair with endCall
|
||||
//
|
||||
// This waits for the pacer token
|
||||
func (p *Pacer) beginCall() {
|
||||
// pacer starts with a token in and whenever we take one out
|
||||
// XXX ms later we put another in. We could do this with a
|
||||
// Ticker more accurately, but then we'd have to work out how
|
||||
// not to run it when it wasn't needed
|
||||
<-p.pacer
|
||||
if p.maxConnections > 0 {
|
||||
<-p.connTokens
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
// Restart the timer
|
||||
go func(t time.Duration) {
|
||||
// fs.Debugf(f, "New sleep for %v at %v", t, time.Now())
|
||||
time.Sleep(t)
|
||||
p.pacer <- struct{}{}
|
||||
}(p.sleepTime)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// exponentialImplementation implements a exponentialImplementation up
|
||||
// and down pacing algorithm
|
||||
//
|
||||
// See the description for DefaultPacer
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
//
|
||||
// Call with p.mu held
|
||||
func (p *Pacer) defaultPacer(retry bool) {
|
||||
oldSleepTime := p.sleepTime
|
||||
if retry {
|
||||
if p.attackConstant == 0 {
|
||||
p.sleepTime = p.maxSleep
|
||||
} else {
|
||||
p.sleepTime = (p.sleepTime << p.attackConstant) / ((1 << p.attackConstant) - 1)
|
||||
}
|
||||
if p.sleepTime > p.maxSleep {
|
||||
p.sleepTime = p.maxSleep
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debugf("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
|
||||
if p.sleepTime < p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debugf("pacer", "Reducing sleep to %v", p.sleepTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// acdPacer implements a truncated exponential backoff
|
||||
// strategy with randomization for Amazon Drive
|
||||
//
|
||||
// See the description for AmazonCloudDrivePacer
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
//
|
||||
// Call with p.mu held
|
||||
func (p *Pacer) acdPacer(retry bool) {
|
||||
consecutiveRetries := p.consecutiveRetries
|
||||
if consecutiveRetries == 0 {
|
||||
if p.sleepTime != p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
if consecutiveRetries > 9 {
|
||||
consecutiveRetries = 9
|
||||
}
|
||||
// consecutiveRetries starts at 1 so
|
||||
// maxSleep is 2**(consecutiveRetries-1) seconds
|
||||
maxSleep := time.Second << uint(consecutiveRetries-1)
|
||||
// actual sleep is random from 0..maxSleep
|
||||
p.sleepTime = time.Duration(rand.Int63n(int64(maxSleep)))
|
||||
if p.sleepTime < p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
}
|
||||
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
|
||||
}
|
||||
}
|
||||
|
||||
// drivePacer implements a truncated exponential backoff strategy with
|
||||
// randomization for Google Drive
|
||||
//
|
||||
// See the description for GoogleDrivePacer
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
//
|
||||
// Call with p.mu held
|
||||
func (p *Pacer) drivePacer(retry bool) {
|
||||
consecutiveRetries := p.consecutiveRetries
|
||||
if consecutiveRetries == 0 {
|
||||
if p.sleepTime != p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
if consecutiveRetries > 5 {
|
||||
consecutiveRetries = 5
|
||||
}
|
||||
// consecutiveRetries starts at 1 so go from 1,2,3,4,5,5 => 1,2,4,8,16,16
|
||||
// maxSleep is 2**(consecutiveRetries-1) seconds + random milliseconds
|
||||
p.sleepTime = time.Second<<uint(consecutiveRetries-1) + time.Duration(rand.Int63n(int64(time.Second)))
|
||||
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
|
||||
}
|
||||
}
|
||||
|
||||
// endCall implements the pacing algorithm
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
func (p *Pacer) endCall(retry bool) {
|
||||
if p.maxConnections > 0 {
|
||||
p.connTokens <- struct{}{}
|
||||
}
|
||||
p.mu.Lock()
|
||||
if retry {
|
||||
p.consecutiveRetries++
|
||||
} else {
|
||||
p.consecutiveRetries = 0
|
||||
}
|
||||
p.calculatePace(retry)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// call implements Call but with settable retries
|
||||
func (p *Pacer) call(fn Paced, retries int) (err error) {
|
||||
var retry bool
|
||||
for i := 1; i <= retries; i++ {
|
||||
p.beginCall()
|
||||
retry, err = fn()
|
||||
p.endCall(retry)
|
||||
if !retry {
|
||||
break
|
||||
}
|
||||
fs.Debugf("pacer", "low level retry %d/%d (error %v)", i, retries, err)
|
||||
}
|
||||
if retry {
|
||||
err = fserrors.RetryError(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Call paces the remote operations to not exceed the limits and retry
|
||||
// on rate limit exceeded
|
||||
//
|
||||
// This calls fn, expecting it to return a retry flag and an
|
||||
// error. This error may be returned wrapped in a RetryError if the
|
||||
// number of retries is exceeded.
|
||||
func (p *Pacer) Call(fn Paced) (err error) {
|
||||
p.mu.Lock()
|
||||
retries := p.retries
|
||||
p.mu.Unlock()
|
||||
return p.call(fn, retries)
|
||||
}
|
||||
|
||||
// CallNoRetry paces the remote operations to not exceed the limits
|
||||
// and return a retry error on rate limit exceeded
|
||||
//
|
||||
// This calls fn and wraps the output in a RetryError if it would like
|
||||
// it to be retried
|
||||
func (p *Pacer) CallNoRetry(fn Paced) error {
|
||||
return p.call(fn, 1)
|
||||
}
|
||||
31
vendor/github.com/ncw/rclone/lib/pacer/tokens.go
generated
vendored
Executable file
31
vendor/github.com/ncw/rclone/lib/pacer/tokens.go
generated
vendored
Executable file
@@ -0,0 +1,31 @@
|
||||
// Tokens for controlling concurrency
|
||||
|
||||
package pacer
|
||||
|
||||
// TokenDispenser is for controlling concurrency
|
||||
type TokenDispenser struct {
|
||||
tokens chan struct{}
|
||||
}
|
||||
|
||||
// NewTokenDispenser makes a pool of n tokens
|
||||
func NewTokenDispenser(n int) *TokenDispenser {
|
||||
td := &TokenDispenser{
|
||||
tokens: make(chan struct{}, n),
|
||||
}
|
||||
// Fill up the upload tokens
|
||||
for i := 0; i < n; i++ {
|
||||
td.tokens <- struct{}{}
|
||||
}
|
||||
return td
|
||||
}
|
||||
|
||||
// Get gets a token from the pool - don't forget to return it with Put
|
||||
func (td *TokenDispenser) Get() {
|
||||
<-td.tokens
|
||||
return
|
||||
}
|
||||
|
||||
// Put returns a token
|
||||
func (td *TokenDispenser) Put() {
|
||||
td.tokens <- struct{}{}
|
||||
}
|
||||
28
vendor/github.com/ncw/rclone/lib/readers/counting_reader.go
generated
vendored
Executable file
28
vendor/github.com/ncw/rclone/lib/readers/counting_reader.go
generated
vendored
Executable file
@@ -0,0 +1,28 @@
|
||||
package readers
|
||||
|
||||
import "io"
|
||||
|
||||
// NewCountingReader returns a CountingReader, which will read from the given
|
||||
// reader while keeping track of how many bytes were read.
|
||||
func NewCountingReader(in io.Reader) *CountingReader {
|
||||
return &CountingReader{in: in}
|
||||
}
|
||||
|
||||
// CountingReader holds a reader and a read count of how many bytes were read
|
||||
// so far.
|
||||
type CountingReader struct {
|
||||
in io.Reader
|
||||
read uint64
|
||||
}
|
||||
|
||||
// Read reads from the underlying reader.
|
||||
func (cr *CountingReader) Read(b []byte) (int, error) {
|
||||
n, err := cr.in.Read(b)
|
||||
cr.read += uint64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BytesRead returns how many bytes were read from the underlying reader so far.
|
||||
func (cr *CountingReader) BytesRead() uint64 {
|
||||
return cr.read
|
||||
}
|
||||
22
vendor/github.com/ncw/rclone/lib/readers/limited.go
generated
vendored
Executable file
22
vendor/github.com/ncw/rclone/lib/readers/limited.go
generated
vendored
Executable file
@@ -0,0 +1,22 @@
|
||||
package readers
|
||||
|
||||
import "io"
|
||||
|
||||
// LimitedReadCloser adds io.Closer to io.LimitedReader. Create one with NewLimitedReadCloser
|
||||
type LimitedReadCloser struct {
|
||||
*io.LimitedReader
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// NewLimitedReadCloser returns a LimitedReadCloser wrapping rc to
|
||||
// limit it to reading limit bytes. If limit < 0 then it does not
|
||||
// wrap rc, it just returns it.
|
||||
func NewLimitedReadCloser(rc io.ReadCloser, limit int64) (lrc io.ReadCloser) {
|
||||
if limit < 0 {
|
||||
return rc
|
||||
}
|
||||
return &LimitedReadCloser{
|
||||
LimitedReader: &io.LimitedReader{R: rc, N: limit},
|
||||
Closer: rc,
|
||||
}
|
||||
}
|
||||
18
vendor/github.com/ncw/rclone/lib/readers/readfill.go
generated
vendored
Executable file
18
vendor/github.com/ncw/rclone/lib/readers/readfill.go
generated
vendored
Executable file
@@ -0,0 +1,18 @@
|
||||
package readers
|
||||
|
||||
import "io"
|
||||
|
||||
// ReadFill reads as much data from r into buf as it can
|
||||
//
|
||||
// It reads until the buffer is full or r.Read returned an error.
|
||||
//
|
||||
// This is io.ReadFull but when you just want as much data as
|
||||
// possible, not an exact size of block.
|
||||
func ReadFill(r io.Reader, buf []byte) (n int, err error) {
|
||||
var nn int
|
||||
for n < len(buf) && err == nil {
|
||||
nn, err = r.Read(buf[n:])
|
||||
n += nn
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
96
vendor/github.com/ncw/rclone/lib/readers/repeatable.go
generated
vendored
Executable file
96
vendor/github.com/ncw/rclone/lib/readers/repeatable.go
generated
vendored
Executable file
@@ -0,0 +1,96 @@
|
||||
package readers
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// A RepeatableReader implements the io.ReadSeeker it allow to seek cached data
|
||||
// back and forth within the reader but will only read data from the internal Reader as necessary
|
||||
// and will play nicely with the Account and io.LimitedReader to reflect current speed
|
||||
type RepeatableReader struct {
|
||||
in io.Reader // Input reader
|
||||
i int64 // current reading index
|
||||
b []byte // internal cache buffer
|
||||
}
|
||||
|
||||
var _ io.ReadSeeker = (*RepeatableReader)(nil)
|
||||
|
||||
// Seek implements the io.Seeker interface.
|
||||
// If seek position is passed the cache buffer length the function will return
|
||||
// the maximum offset that can be used and "fs.RepeatableReader.Seek: offset is unavailable" Error
|
||||
func (r *RepeatableReader) Seek(offset int64, whence int) (int64, error) {
|
||||
var abs int64
|
||||
cacheLen := int64(len(r.b))
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
abs = offset
|
||||
case io.SeekCurrent:
|
||||
abs = r.i + offset
|
||||
case io.SeekEnd:
|
||||
abs = cacheLen + offset
|
||||
default:
|
||||
return 0, errors.New("fs.RepeatableReader.Seek: invalid whence")
|
||||
}
|
||||
if abs < 0 {
|
||||
return 0, errors.New("fs.RepeatableReader.Seek: negative position")
|
||||
}
|
||||
if abs > cacheLen {
|
||||
return offset - (abs - cacheLen), errors.New("fs.RepeatableReader.Seek: offset is unavailable")
|
||||
}
|
||||
r.i = abs
|
||||
return abs, nil
|
||||
}
|
||||
|
||||
// Read data from original Reader into bytes
|
||||
// Data is either served from the underlying Reader or from cache if was already read
|
||||
func (r *RepeatableReader) Read(b []byte) (n int, err error) {
|
||||
cacheLen := int64(len(r.b))
|
||||
if r.i == cacheLen {
|
||||
n, err = r.in.Read(b)
|
||||
if n > 0 {
|
||||
r.b = append(r.b, b[:n]...)
|
||||
}
|
||||
} else {
|
||||
n = copy(b, r.b[r.i:])
|
||||
}
|
||||
r.i += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// NewRepeatableReader create new repeatable reader from Reader r
|
||||
func NewRepeatableReader(r io.Reader) *RepeatableReader {
|
||||
return &RepeatableReader{in: r}
|
||||
}
|
||||
|
||||
// NewRepeatableReaderSized create new repeatable reader from Reader r
|
||||
// with an initial buffer of size.
|
||||
func NewRepeatableReaderSized(r io.Reader, size int) *RepeatableReader {
|
||||
return &RepeatableReader{
|
||||
in: r,
|
||||
b: make([]byte, 0, size),
|
||||
}
|
||||
}
|
||||
|
||||
// NewRepeatableLimitReader create new repeatable reader from Reader r
|
||||
// with an initial buffer of size wrapped in a io.LimitReader to read
|
||||
// only size.
|
||||
func NewRepeatableLimitReader(r io.Reader, size int) *RepeatableReader {
|
||||
return NewRepeatableReaderSized(io.LimitReader(r, int64(size)), size)
|
||||
}
|
||||
|
||||
// NewRepeatableReaderBuffer create new repeatable reader from Reader r
|
||||
// using the buffer passed in.
|
||||
func NewRepeatableReaderBuffer(r io.Reader, buf []byte) *RepeatableReader {
|
||||
return &RepeatableReader{
|
||||
in: r,
|
||||
b: buf[:0],
|
||||
}
|
||||
}
|
||||
|
||||
// NewRepeatableLimitReaderBuffer create new repeatable reader from
|
||||
// Reader r and buf wrapped in a io.LimitReader to read only size.
|
||||
func NewRepeatableLimitReaderBuffer(r io.Reader, buf []byte, size int64) *RepeatableReader {
|
||||
return NewRepeatableReaderBuffer(io.LimitReader(r, int64(size)), buf)
|
||||
}
|
||||
421
vendor/github.com/ncw/rclone/lib/rest/rest.go
generated
vendored
Executable file
421
vendor/github.com/ncw/rclone/lib/rest/rest.go
generated
vendored
Executable file
@@ -0,0 +1,421 @@
|
||||
// Package rest implements a simple REST wrapper
|
||||
//
|
||||
// All methods are safe for concurrent calling.
|
||||
package rest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Client contains the info to sustain the API
|
||||
type Client struct {
|
||||
mu sync.RWMutex
|
||||
c *http.Client
|
||||
rootURL string
|
||||
errorHandler func(resp *http.Response) error
|
||||
headers map[string]string
|
||||
signer SignerFn
|
||||
}
|
||||
|
||||
// NewClient takes an oauth http.Client and makes a new api instance
|
||||
func NewClient(c *http.Client) *Client {
|
||||
api := &Client{
|
||||
c: c,
|
||||
errorHandler: defaultErrorHandler,
|
||||
headers: make(map[string]string),
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
// ReadBody reads resp.Body into result, closing the body
|
||||
func ReadBody(resp *http.Response) (result []byte, err error) {
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
||||
|
||||
// defaultErrorHandler doesn't attempt to parse the http body, just
|
||||
// returns it in the error message
|
||||
func defaultErrorHandler(resp *http.Response) (err error) {
|
||||
body, err := ReadBody(resp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error reading error out of body")
|
||||
}
|
||||
return errors.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
|
||||
}
|
||||
|
||||
// SetErrorHandler sets the handler to decode an error response when
|
||||
// the HTTP status code is not 2xx. The handler should close resp.Body.
|
||||
func (api *Client) SetErrorHandler(fn func(resp *http.Response) error) *Client {
|
||||
api.mu.Lock()
|
||||
defer api.mu.Unlock()
|
||||
api.errorHandler = fn
|
||||
return api
|
||||
}
|
||||
|
||||
// SetRoot sets the default RootURL. You can override this on a per
|
||||
// call basis using the RootURL field in Opts.
|
||||
func (api *Client) SetRoot(RootURL string) *Client {
|
||||
api.mu.Lock()
|
||||
defer api.mu.Unlock()
|
||||
api.rootURL = RootURL
|
||||
return api
|
||||
}
|
||||
|
||||
// SetHeader sets a header for all requests
|
||||
func (api *Client) SetHeader(key, value string) *Client {
|
||||
api.mu.Lock()
|
||||
defer api.mu.Unlock()
|
||||
api.headers[key] = value
|
||||
return api
|
||||
}
|
||||
|
||||
// RemoveHeader unsets a header for all requests
|
||||
func (api *Client) RemoveHeader(key string) *Client {
|
||||
api.mu.Lock()
|
||||
defer api.mu.Unlock()
|
||||
delete(api.headers, key)
|
||||
return api
|
||||
}
|
||||
|
||||
// SignerFn is used to sign an outgoing request
|
||||
type SignerFn func(*http.Request) error
|
||||
|
||||
// SetSigner sets a signer for all requests
|
||||
func (api *Client) SetSigner(signer SignerFn) *Client {
|
||||
api.mu.Lock()
|
||||
defer api.mu.Unlock()
|
||||
api.signer = signer
|
||||
return api
|
||||
}
|
||||
|
||||
// SetUserPass creates an Authorization header for all requests with
|
||||
// the UserName and Password passed in
|
||||
func (api *Client) SetUserPass(UserName, Password string) *Client {
|
||||
req, _ := http.NewRequest("GET", "http://example.com", nil)
|
||||
req.SetBasicAuth(UserName, Password)
|
||||
api.SetHeader("Authorization", req.Header.Get("Authorization"))
|
||||
return api
|
||||
}
|
||||
|
||||
// SetCookie creates an Cookies Header for all requests with the supplied
|
||||
// cookies passed in.
|
||||
// All cookies have to be supplied at once, all cookies will be overwritten
|
||||
// on a new call to the method
|
||||
func (api *Client) SetCookie(cks ...*http.Cookie) *Client {
|
||||
req, _ := http.NewRequest("GET", "http://example.com", nil)
|
||||
for _, ck := range cks {
|
||||
req.AddCookie(ck)
|
||||
}
|
||||
api.SetHeader("Cookie", req.Header.Get("Cookie"))
|
||||
return api
|
||||
}
|
||||
|
||||
// Opts contains parameters for Call, CallJSON etc
|
||||
type Opts struct {
|
||||
Method string // GET, POST etc
|
||||
Path string // relative to RootURL
|
||||
RootURL string // override RootURL passed into SetRoot()
|
||||
Body io.Reader
|
||||
NoResponse bool // set to close Body
|
||||
ContentType string
|
||||
ContentLength *int64
|
||||
ContentRange string
|
||||
ExtraHeaders map[string]string
|
||||
UserName string // username for Basic Auth
|
||||
Password string // password for Basic Auth
|
||||
Options []fs.OpenOption
|
||||
IgnoreStatus bool // if set then we don't check error status or parse error body
|
||||
MultipartParams url.Values // if set do multipart form upload with attached file
|
||||
MultipartMetadataName string // ..this is used for the name of the metadata form part if set
|
||||
MultipartContentName string // ..name of the parameter which is the attached file
|
||||
MultipartFileName string // ..name of the file for the attached file
|
||||
Parameters url.Values // any parameters for the final URL
|
||||
TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding
|
||||
Close bool // set to close the connection after this transaction
|
||||
NoRedirect bool // if this is set then the client won't follow redirects
|
||||
}
|
||||
|
||||
// Copy creates a copy of the options
|
||||
func (o *Opts) Copy() *Opts {
|
||||
newOpts := *o
|
||||
return &newOpts
|
||||
}
|
||||
|
||||
// DecodeJSON decodes resp.Body into result
|
||||
func DecodeJSON(resp *http.Response, result interface{}) (err error) {
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
return decoder.Decode(result)
|
||||
}
|
||||
|
||||
// DecodeXML decodes resp.Body into result
|
||||
func DecodeXML(resp *http.Response, result interface{}) (err error) {
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
decoder := xml.NewDecoder(resp.Body)
|
||||
return decoder.Decode(result)
|
||||
}
|
||||
|
||||
// ClientWithHeaderReset makes a new http client which resets the
|
||||
// headers passed in on redirect
|
||||
//
|
||||
// FIXME This is now unecessary with go1.8
|
||||
func ClientWithHeaderReset(c *http.Client, headers map[string]string) *http.Client {
|
||||
if len(headers) == 0 {
|
||||
return c
|
||||
}
|
||||
clientCopy := *c
|
||||
clientCopy.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
if len(via) >= 10 {
|
||||
return errors.New("stopped after 10 redirects")
|
||||
}
|
||||
// Reset the headers in the new request
|
||||
for k, v := range headers {
|
||||
if v != "" {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return &clientCopy
|
||||
}
|
||||
|
||||
// ClientWithNoRedirects makes a new http client which won't follow redirects
|
||||
func ClientWithNoRedirects(c *http.Client) *http.Client {
|
||||
clientCopy := *c
|
||||
clientCopy.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
return &clientCopy
|
||||
}
|
||||
|
||||
// Call makes the call and returns the http.Response
|
||||
//
|
||||
// if err != nil then resp.Body will need to be closed
|
||||
//
|
||||
// it will return resp if at all possible, even if err is set
|
||||
func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
|
||||
api.mu.RLock()
|
||||
defer api.mu.RUnlock()
|
||||
if opts == nil {
|
||||
return nil, errors.New("call() called with nil opts")
|
||||
}
|
||||
url := api.rootURL
|
||||
if opts.RootURL != "" {
|
||||
url = opts.RootURL
|
||||
}
|
||||
if url == "" {
|
||||
return nil, errors.New("RootURL not set")
|
||||
}
|
||||
url += opts.Path
|
||||
if opts.Parameters != nil && len(opts.Parameters) > 0 {
|
||||
url += "?" + opts.Parameters.Encode()
|
||||
}
|
||||
req, err := http.NewRequest(opts.Method, url, opts.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
headers := make(map[string]string)
|
||||
// Set default headers
|
||||
for k, v := range api.headers {
|
||||
headers[k] = v
|
||||
}
|
||||
if opts.ContentType != "" {
|
||||
headers["Content-Type"] = opts.ContentType
|
||||
}
|
||||
if opts.ContentLength != nil {
|
||||
req.ContentLength = *opts.ContentLength
|
||||
}
|
||||
if opts.ContentRange != "" {
|
||||
headers["Content-Range"] = opts.ContentRange
|
||||
}
|
||||
if len(opts.TransferEncoding) != 0 {
|
||||
req.TransferEncoding = opts.TransferEncoding
|
||||
}
|
||||
if opts.Close {
|
||||
req.Close = true
|
||||
}
|
||||
// Set any extra headers
|
||||
if opts.ExtraHeaders != nil {
|
||||
for k, v := range opts.ExtraHeaders {
|
||||
headers[k] = v
|
||||
}
|
||||
}
|
||||
// add any options to the headers
|
||||
fs.OpenOptionAddHeaders(opts.Options, headers)
|
||||
// Now set the headers
|
||||
for k, v := range headers {
|
||||
if v != "" {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
}
|
||||
if opts.UserName != "" || opts.Password != "" {
|
||||
req.SetBasicAuth(opts.UserName, opts.Password)
|
||||
}
|
||||
var c *http.Client
|
||||
if opts.NoRedirect {
|
||||
c = ClientWithNoRedirects(api.c)
|
||||
} else {
|
||||
c = ClientWithHeaderReset(api.c, headers)
|
||||
}
|
||||
if api.signer != nil {
|
||||
err = api.signer(req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signer failed")
|
||||
}
|
||||
}
|
||||
api.mu.RUnlock()
|
||||
resp, err = c.Do(req)
|
||||
api.mu.RLock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !opts.IgnoreStatus {
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
err = api.errorHandler(resp)
|
||||
if err.Error() == "" {
|
||||
// replace empty errors with something
|
||||
err = errors.Errorf("http error %d: %v", resp.StatusCode, resp.Status)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
if opts.NoResponse {
|
||||
return resp, resp.Body.Close()
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// MultipartUpload creates an io.Reader which produces an encoded a
|
||||
// multipart form upload from the params passed in and the passed in
|
||||
//
|
||||
// in - the body of the file
|
||||
// params - the form parameters
|
||||
// fileName - is the name of the attached file
|
||||
// contentName - the name of the parameter for the file
|
||||
//
|
||||
// NB This doesn't allow setting the content type of the attachment
|
||||
func MultipartUpload(in io.Reader, params url.Values, contentName, fileName string) (io.ReadCloser, string, error) {
|
||||
bodyReader, bodyWriter := io.Pipe()
|
||||
writer := multipart.NewWriter(bodyWriter)
|
||||
contentType := writer.FormDataContentType()
|
||||
|
||||
// Pump the data in the background
|
||||
go func() {
|
||||
var err error
|
||||
|
||||
for key, vals := range params {
|
||||
for _, val := range vals {
|
||||
err = writer.WriteField(key, val)
|
||||
if err != nil {
|
||||
_ = bodyWriter.CloseWithError(errors.Wrap(err, "create metadata part"))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
part, err := writer.CreateFormFile(contentName, fileName)
|
||||
if err != nil {
|
||||
_ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to create form file"))
|
||||
return
|
||||
}
|
||||
|
||||
_, err = io.Copy(part, in)
|
||||
if err != nil {
|
||||
_ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to copy data"))
|
||||
return
|
||||
}
|
||||
|
||||
err = writer.Close()
|
||||
if err != nil {
|
||||
_ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to close form"))
|
||||
return
|
||||
}
|
||||
|
||||
_ = bodyWriter.Close()
|
||||
}()
|
||||
|
||||
return bodyReader, contentType, nil
|
||||
}
|
||||
|
||||
// CallJSON runs Call and decodes the body as a JSON object into response (if not nil)
|
||||
//
|
||||
// If request is not nil then it will be JSON encoded as the body of the request
|
||||
//
|
||||
// If (opts.MultipartParams or opts.MultipartContentName) and
|
||||
// opts.Body are set then CallJSON will do a multipart upload with a
|
||||
// file attached. opts.MultipartContentName is the name of the
|
||||
// parameter and opts.MultipartFileName is the name of the file. If
|
||||
// MultpartContentName is set, and request != nil is supplied, then
|
||||
// the request will be marshalled into JSON and added to the form with
|
||||
// parameter name MultipartMetadataName.
|
||||
//
|
||||
// It will return resp if at all possible, even if err is set
|
||||
func (api *Client) CallJSON(opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
return api.callCodec(opts, request, response, json.Marshal, DecodeJSON, "application/json")
|
||||
}
|
||||
|
||||
// CallXML runs Call and decodes the body as a XML object into response (if not nil)
|
||||
//
|
||||
// If request is not nil then it will be XML encoded as the body of the request
|
||||
//
|
||||
// See CallJSON for a description of MultipartParams and related opts
|
||||
//
|
||||
// It will return resp if at all possible, even if err is set
|
||||
func (api *Client) CallXML(opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
return api.callCodec(opts, request, response, xml.Marshal, DecodeXML, "application/xml")
|
||||
}
|
||||
|
||||
type marshalFn func(v interface{}) ([]byte, error)
|
||||
type decodeFn func(resp *http.Response, result interface{}) (err error)
|
||||
|
||||
func (api *Client) callCodec(opts *Opts, request interface{}, response interface{}, marshal marshalFn, decode decodeFn, contentType string) (resp *http.Response, err error) {
|
||||
var requestBody []byte
|
||||
// Marshal the request if given
|
||||
if request != nil {
|
||||
requestBody, err = marshal(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Set the body up as a marshalled object if no body passed in
|
||||
if opts.Body == nil {
|
||||
opts = opts.Copy()
|
||||
opts.ContentType = contentType
|
||||
opts.Body = bytes.NewBuffer(requestBody)
|
||||
}
|
||||
}
|
||||
isMultipart := (opts.MultipartParams != nil || opts.MultipartMetadataName != "") && opts.Body != nil
|
||||
if isMultipart {
|
||||
params := opts.MultipartParams
|
||||
if params == nil {
|
||||
params = url.Values{}
|
||||
}
|
||||
if opts.MultipartMetadataName != "" {
|
||||
params.Add(opts.MultipartMetadataName, string(requestBody))
|
||||
}
|
||||
opts = opts.Copy()
|
||||
opts.Body, opts.ContentType, err = MultipartUpload(opts.Body, params, opts.MultipartContentName, opts.MultipartFileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
resp, err = api.Call(opts)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
if response == nil || opts.NoResponse {
|
||||
return resp, nil
|
||||
}
|
||||
err = decode(resp, response)
|
||||
return resp, err
|
||||
}
|
||||
27
vendor/github.com/ncw/rclone/lib/rest/url.go
generated
vendored
Executable file
27
vendor/github.com/ncw/rclone/lib/rest/url.go
generated
vendored
Executable file
@@ -0,0 +1,27 @@
|
||||
package rest
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// URLJoin joins a URL and a path returning a new URL
|
||||
//
|
||||
// path should be URL escaped
|
||||
func URLJoin(base *url.URL, path string) (*url.URL, error) {
|
||||
rel, err := url.Parse(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error parsing %q as URL", path)
|
||||
}
|
||||
return base.ResolveReference(rel), nil
|
||||
}
|
||||
|
||||
// URLPathEscape escapes URL path the in string using URL escaping rules
|
||||
//
|
||||
// This mimics url.PathEscape which only available from go 1.8
|
||||
func URLPathEscape(in string) string {
|
||||
var u url.URL
|
||||
u.Path = in
|
||||
return u.String()
|
||||
}
|
||||
Reference in New Issue
Block a user