VENDOR intensifies

This commit is contained in:
bel
2020-03-13 03:41:54 +00:00
parent 0d6be1e9d8
commit a1cea7d1cb
1427 changed files with 527540 additions and 1 deletions

20
vendor/github.com/ncw/rclone/COPYING generated vendored Executable file
View File

@@ -0,0 +1,20 @@
Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

1087
vendor/github.com/ncw/rclone/backend/crypt/cipher.go generated vendored Executable file

File diff suppressed because it is too large Load Diff

781
vendor/github.com/ncw/rclone/backend/crypt/crypt.go generated vendored Executable file
View File

@@ -0,0 +1,781 @@
// Package crypt provides wrappers for Fs and Object which implement encryption
package crypt
import (
"fmt"
"io"
"path"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
)
// Globals
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "crypt",
Description: "Encrypt/Decrypt a remote",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "filename_encryption",
Help: "How to encrypt the filenames.",
Default: "standard",
Examples: []fs.OptionExample{
{
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
}, {
Value: "standard",
Help: "Encrypt the filenames see the docs for the details.",
}, {
Value: "obfuscate",
Help: "Very simple filename obfuscation.",
},
},
}, {
Name: "directory_name_encryption",
Help: "Option to either encrypt directory names or leave them intact.",
Default: true,
Examples: []fs.OptionExample{
{
Value: "true",
Help: "Encrypt directory names.",
},
{
Value: "false",
Help: "Don't encrypt directory names, leave them intact.",
},
},
}, {
Name: "password",
Help: "Password or pass phrase for encryption.",
IsPassword: true,
}, {
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true,
}, {
Name: "show_mapping",
Help: "For all files listed show how the names encrypt.",
Default: false,
Hide: fs.OptionHideConfigurator,
Advanced: true,
}},
})
}
// newCipherForConfig constructs a Cipher for the given config name
func newCipherForConfig(opt *Options) (Cipher, error) {
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
if err != nil {
return nil, err
}
if opt.Password == "" {
return nil, errors.New("password not set in config file")
}
password, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password")
}
var salt string
if opt.Password2 != "" {
salt, err = obscure.Reveal(opt.Password2)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password2")
}
}
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
if err != nil {
return nil, errors.Wrap(err, "failed to make cipher")
}
return cipher, nil
}
// NewCipher constructs a Cipher for the given config
func NewCipher(m configmap.Mapper) (Cipher, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
return newCipherForConfig(opt)
}
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
cipher, err := newCipherForConfig(opt)
if err != nil {
return nil, err
}
remote := opt.Remote
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
// Look for a file first
remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
wrappedFs, err := fs.NewFs(remotePath)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = fs.NewFs(remotePath)
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
}
f := &Fs{
Fs: wrappedFs,
name: name,
root: rpath,
opt: *opt,
cipher: cipher,
}
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false,
BucketBased: true,
CanHaveEmptyDirectories: true,
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
doChangeNotify := wrappedFs.Features().ChangeNotify
if doChangeNotify != nil {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
decrypted, err := f.DecryptFileName(path)
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
return doChangeNotify(wrappedNotifyFunc, pollInterval)
}
}
return f, err
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
FilenameEncryption string `config:"filename_encryption"`
DirectoryNameEncryption bool `config:"directory_name_encryption"`
Password string `config:"password"`
Password2 string `config:"password2"`
ShowMapping bool `config:"show_mapping"`
}
// Fs represents a wrapped fs.Fs
type Fs struct {
fs.Fs
name string
root string
opt Options
features *fs.Features // optional features
cipher Cipher
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Encrypted drive '%s:%s'", f.name, f.root)
}
// Encrypt an object file name to entries.
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
remote := obj.Remote()
decryptedRemote, err := f.cipher.DecryptFileName(remote)
if err != nil {
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
return
}
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newObject(obj))
}
// Encrypt an directory file name to entries.
func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
return
}
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newDir(dir))
}
// Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
f.add(&newEntries, x)
case fs.Directory:
f.addDir(&newEntries, x)
default:
return nil, errors.Errorf("Unknown object type %T", entry)
}
}
return newEntries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
if err != nil {
return nil, err
}
return f.encryptEntries(entries)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
newEntries, err := f.encryptEntries(entries)
if err != nil {
return err
}
return callback(newEntries)
})
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
return f.newObject(o), nil
}
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put implements Put or PutStream
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
// Encrypt the data into wrappedIn
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
return nil, err
}
// Find a hash the destination supports to compute a hash of
// the encrypted data
ht := f.Fs.Hashes().GetOne()
var hasher *hash.MultiHasher
if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return nil, err
}
// unwrap the accounting
var wrap accounting.WrapFn
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
// add the hasher
wrappedIn = io.TeeReader(wrappedIn, hasher)
// wrap the accounting back on
wrappedIn = wrap(wrappedIn)
}
// Transfer the data
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
if err != nil {
return nil, err
}
// Check the hashes of the encrypted data if we were comparing them
if ht != hash.None && hasher != nil {
srcHash := hasher.Sums()[ht]
var dstHash string
dstHash, err = o.Hash(ht)
if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash")
}
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
err = o.Remove()
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
}
}
return f.newObject(o), nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(in, src, options, f.Fs.Put)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(in, src, options, f.Fs.Features().PutStream)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(dir string) error {
return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(dir string) error {
return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
}
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge() error {
do := f.Fs.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do()
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantCopy
}
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
return f.newObject(oResult), nil
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantMove
}
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
return f.newObject(oResult), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
do := f.Fs.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
return nil, err
}
o, err := do(wrappedIn, f.newObjectInfo(src))
if err != nil {
return nil, err
}
return f.newObject(o), nil
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp() error {
do := f.Fs.Features().CleanUp
if do == nil {
return errors.New("can't CleanUp")
}
return do()
}
// About gets quota information from the Fs
func (f *Fs) About() (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("About not supported")
}
return do()
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// EncryptFileName returns an encrypted file name
func (f *Fs) EncryptFileName(fileName string) string {
return f.cipher.EncryptFileName(fileName)
}
// DecryptFileName returns a decrypted file name
func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
return f.cipher.DecryptFileName(encryptedFileName)
}
// ComputeHash takes the nonce from o, and encrypts the contents of
// src with it, and calcuates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Read the nonce - opening the file is sufficient to read the nonce in
// use a limited read so we only read the header
in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce")
}
d, err := f.cipher.(*cipher).newDecrypter(in)
if err != nil {
_ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce")
}
nonce := d.nonce
// fs.Debugf(o, "Read nonce % 2x", nonce)
// Check nonce isn't all zeros
isZero := true
for i := range nonce {
if nonce[i] != 0 {
isZero = false
}
}
if isZero {
fs.Errorf(o, "empty nonce read")
}
// Close d (and hence in) once we have read the nonce
err = d.Close()
if err != nil {
return "", errors.Wrap(err, "failed to close nonce read")
}
// Open the src for input
in, err = src.Open()
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
}
// Object describes a wrapped for being read from the Fs
//
// This decrypts the remote name and decrypts the data
type Object struct {
fs.Object
f *Fs
}
func (f *Fs) newObject(o fs.Object) *Object {
return &Object{
Object: o,
f: f,
}
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.f
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
remote := o.Object.Remote()
decryptedName, err := o.f.cipher.DecryptFileName(remote)
if err != nil {
fs.Debugf(remote, "Undecryptable file name: %v", err)
return remote
}
return decryptedName
}
// Size returns the size of the file
func (o *Object) Size() int64 {
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
if err != nil {
fs.Debugf(o, "Bad size for decrypt: %v", err)
}
return size
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// UnWrap returns the wrapped Object
func (o *Object) UnWrap() fs.Object {
return o.Object
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
// pass on Options to underlying open if appropriate
openOptions = append(openOptions, option)
}
}
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
if underlyingOffset == 0 && underlyingLimit < 0 {
// Open with no seek
return o.Object.Open(openOptions...)
}
// Open stream with a range of underlyingOffset, underlyingLimit
end := int64(-1)
if underlyingLimit >= 0 {
end = underlyingOffset + underlyingLimit - 1
if end >= o.Object.Size() {
end = -1
}
}
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
return o.Object.Open(newOpenOptions...)
}, offset, limit)
if err != nil {
return nil, err
}
return rc, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return o.Object, o.Object.Update(in, src, options...)
}
_, err := o.f.put(in, src, options, update)
return err
}
// newDir returns a dir with the Name decrypted
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(dir)
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else {
newDir.SetRemote(decryptedRemote)
}
return newDir
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
//
// This encrypts the remote name and adjusts the size
type ObjectInfo struct {
fs.ObjectInfo
f *Fs
}
func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
return &ObjectInfo{
ObjectInfo: src,
f: f,
}
}
// Fs returns read only access to the Fs that this object is part of
func (o *ObjectInfo) Fs() fs.Info {
return o.f
}
// Remote returns the remote path
func (o *ObjectInfo) Remote() string {
return o.f.cipher.EncryptFileName(o.ObjectInfo.Remote())
}
// Size returns the size of the file
func (o *ObjectInfo) Size() int64 {
size := o.ObjectInfo.Size()
if size < 0 {
return size
}
return o.f.cipher.EncryptedSize(size)
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
return "", nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
)

63
vendor/github.com/ncw/rclone/backend/crypt/pkcs7/pkcs7.go generated vendored Executable file
View File

@@ -0,0 +1,63 @@
// Package pkcs7 implements PKCS#7 padding
//
// This is a standard way of encoding variable length buffers into
// buffers which are a multiple of an underlying crypto block size.
package pkcs7
import "github.com/pkg/errors"
// Errors Unpad can return
var (
ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded")
ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize")
ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long")
ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short")
ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same")
)
// Pad buf using PKCS#7 to a multiple of n.
//
// Appends the padding to buf - make a copy of it first if you don't
// want it modified.
func Pad(n int, buf []byte) []byte {
if n <= 1 || n >= 256 {
panic("bad multiple")
}
length := len(buf)
padding := n - (length % n)
for i := 0; i < padding; i++ {
buf = append(buf, byte(padding))
}
if (len(buf) % n) != 0 {
panic("padding failed")
}
return buf
}
// Unpad buf using PKCS#7 from a multiple of n returning a slice of
// buf or an error if malformed.
func Unpad(n int, buf []byte) ([]byte, error) {
if n <= 1 || n >= 256 {
panic("bad multiple")
}
length := len(buf)
if length == 0 {
return nil, ErrorPaddingNotFound
}
if (length % n) != 0 {
return nil, ErrorPaddingNotAMultiple
}
padding := int(buf[length-1])
if padding > n {
return nil, ErrorPaddingTooLong
}
if padding == 0 {
return nil, ErrorPaddingTooShort
}
for i := 0; i < padding; i++ {
if buf[length-1-i] != byte(padding) {
return nil, ErrorPaddingNotAllTheSame
}
}
return buf[:length-padding], nil
}

2088
vendor/github.com/ncw/rclone/backend/drive/drive.go generated vendored Executable file

File diff suppressed because it is too large Load Diff

249
vendor/github.com/ncw/rclone/backend/drive/upload.go generated vendored Executable file
View File

@@ -0,0 +1,249 @@
// Upload for drive
//
// Docs
// Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable
// Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices
// Files insert: https://developers.google.com/drive/v2/reference/files/insert
// Files update: https://developers.google.com/drive/v2/reference/files/update
//
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
package drive
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
const (
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
statusResumeIncomplete = 308
)
// resumableUpload is used by the generated APIs to provide resumable uploads.
// It is not used by developers directly.
type resumableUpload struct {
f *Fs
remote string
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
URI string
// Media is the object being uploaded.
Media io.Reader
// MediaType defines the media type, e.g. "image/jpeg".
MediaType string
// ContentLength is the full size of the object being uploaded.
ContentLength int64
// Return value
ret *drive.File
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
params := make(url.Values)
params.Set("alt", "json")
params.Set("uploadType", "resumable")
params.Set("fields", partialFields)
if f.isTeamDrive {
params.Set("supportsTeamDrives", "true")
}
if f.opt.KeepRevisionForever {
params.Set("keepRevisionForever", "true")
}
urls := "https://www.googleapis.com/upload/drive/v3/files"
method := "POST"
if fileID != "" {
params.Set("setModifiedDate", "true")
urls += "/{fileId}"
method = "PATCH"
}
urls += "?" + params.Encode()
var res *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
var body io.Reader
body, err = googleapi.WithoutDataWrapper.JSONReader(info)
if err != nil {
return false, err
}
var req *http.Request
req, err = http.NewRequest(method, urls, body)
if err != nil {
return false, err
}
googleapi.Expand(req.URL, map[string]string{
"fileId": fileID,
})
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
req.Header.Set("X-Upload-Content-Type", contentType)
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
res, err = f.client.Do(req)
if err == nil {
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
return shouldRetry(err)
})
if err != nil {
return nil, err
}
loc := res.Header.Get("Location")
rx := &resumableUpload{
f: f,
remote: remote,
URI: loc,
Media: in,
MediaType: contentType,
ContentLength: size,
}
return rx.Upload()
}
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequest("POST", rx.URI, body)
req.ContentLength = reqSize
if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
} else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
}
req.Header.Set("Content-Type", rx.MediaType)
return req
}
// rangeRE matches the transfer status response from the server. $1 is
// the last byte index uploaded.
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(0, nil, 0)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return rx.ContentLength, nil
}
if res.StatusCode != statusResumeIncomplete {
err = googleapi.CheckResponse(res)
if err != nil {
return 0, err
}
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err == nil {
return start, nil
}
}
return 0, errors.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, io.SeekStart)
req := rx.makeRequest(start, chunk, chunkSize)
res, err := rx.f.client.Do(req)
if err != nil {
return 599, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == statusResumeIncomplete {
return res.StatusCode, nil
}
err = googleapi.CheckResponse(res)
if err != nil {
return res.StatusCode, err
}
// When the entire file upload is complete, the server
// responds with an HTTP 201 Created along with any metadata
// associated with this resource. If this request had been
// updating an existing entity rather than creating a new one,
// the HTTP response code for a completed upload would have
// been 200 OK.
//
// So parse the response out of the body. We aren't expecting
// any other 2xx codes, so we parse it unconditionaly on
// StatusCode
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
return 598, err
}
return res.StatusCode, nil
}
// Upload uploads the chunks from the input
// It retries each chunk using the pacer and --low-level-retries
func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0)
var StatusCode int
var err error
buf := make([]byte, int(rx.f.opt.ChunkSize))
for start < rx.ContentLength {
reqSize := rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
// Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false
err = nil
}
return again, err
})
if err != nil {
return nil, err
}
start += reqSize
}
// Resume or retry uploads that fail due to connection interruptions or
// any 5xx errors, including:
//
// 500 Internal Server Error
// 502 Bad Gateway
// 503 Service Unavailable
// 504 Gateway Timeout
//
// Use an exponential backoff strategy if any 5xx server error is
// returned when resuming or retrying upload requests. These errors can
// occur if a server is getting overloaded. Exponential backoff can help
// alleviate these kinds of problems during periods of high volume of
// requests or heavy network traffic. Other kinds of requests should not
// be handled by exponential backoff but you can still retry a number of
// them. When retrying these requests, limit the number of times you
// retry them. For example your code could limit to ten retries or less
// before reporting an error.
//
// Handle 404 Not Found errors when doing resumable uploads by starting
// the entire upload over from the beginning.
if rx.ret == nil {
return nil, fserrors.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
}
return rx.ret, nil
}

127
vendor/github.com/ncw/rclone/backend/dropbox/dbhash/dbhash.go generated vendored Executable file
View File

@@ -0,0 +1,127 @@
// Package dbhash implements the dropbox hash as described in
//
// https://www.dropbox.com/developers/reference/content-hash
package dbhash
import (
"crypto/sha256"
"hash"
)
const (
// BlockSize of the checksum in bytes.
BlockSize = sha256.BlockSize
// Size of the checksum in bytes.
Size = sha256.BlockSize
bytesPerBlock = 4 * 1024 * 1024
hashReturnedError = "hash function returned error"
)
type digest struct {
n int // bytes written into blockHash so far
blockHash hash.Hash
totalHash hash.Hash
sumCalled bool
writtenMore bool
}
// New returns a new hash.Hash computing the Dropbox checksum.
func New() hash.Hash {
d := &digest{}
d.Reset()
return d
}
// writeBlockHash writes the current block hash into the total hash
func (d *digest) writeBlockHash() {
blockHash := d.blockHash.Sum(nil)
_, err := d.totalHash.Write(blockHash)
if err != nil {
panic(hashReturnedError)
}
// reset counters for blockhash
d.n = 0
d.blockHash.Reset()
}
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
func (d *digest) Write(p []byte) (n int, err error) {
n = len(p)
for len(p) > 0 {
d.writtenMore = true
toWrite := bytesPerBlock - d.n
if toWrite > len(p) {
toWrite = len(p)
}
_, err = d.blockHash.Write(p[:toWrite])
if err != nil {
panic(hashReturnedError)
}
d.n += toWrite
p = p[toWrite:]
// Accumulate the total hash
if d.n == bytesPerBlock {
d.writeBlockHash()
}
}
return n, nil
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
//
// TODO(ncw) Sum() can only be called once for this type of hash.
// If you call Sum(), then Write() then Sum() it will result in
// a panic. Calling Write() then Sum(), then Sum() is OK.
func (d *digest) Sum(b []byte) []byte {
if d.sumCalled && d.writtenMore {
panic("digest.Sum() called more than once")
}
d.sumCalled = true
d.writtenMore = false
if d.n != 0 {
d.writeBlockHash()
}
return d.totalHash.Sum(b)
}
// Reset resets the Hash to its initial state.
func (d *digest) Reset() {
d.n = 0
d.totalHash = sha256.New()
d.blockHash = sha256.New()
d.sumCalled = false
d.writtenMore = false
}
// Size returns the number of bytes Sum will return.
func (d *digest) Size() int {
return d.totalHash.Size()
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (d *digest) BlockSize() int {
return d.totalHash.BlockSize()
}
// Sum returns the Dropbox checksum of the data.
func Sum(data []byte) [Size]byte {
var d digest
d.Reset()
_, _ = d.Write(data)
var out [Size]byte
d.Sum(out[:0])
return out
}
// must implement this interface
var _ hash.Hash = (*digest)(nil)

29
vendor/github.com/ncw/rclone/backend/local/about_unix.go generated vendored Executable file
View File

@@ -0,0 +1,29 @@
// +build darwin dragonfly freebsd linux
package local
import (
"syscall"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// About gets quota information
func (f *Fs) About() (*fs.Usage, error) {
var s syscall.Statfs_t
err := syscall.Statfs(f.root, &s)
if err != nil {
return nil, errors.Wrap(err, "failed to read disk usage")
}
bs := int64(s.Bsize)
usage := &fs.Usage{
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
Free: fs.NewUsageValue(bs * int64(s.Bavail)), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// check interface
var _ fs.Abouter = &Fs{}

36
vendor/github.com/ncw/rclone/backend/local/about_windows.go generated vendored Executable file
View File

@@ -0,0 +1,36 @@
// +build windows
package local
import (
"syscall"
"unsafe"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
// About gets quota information
func (f *Fs) About() (*fs.Usage, error) {
var available, total, free int64
_, _, e1 := getFreeDiskSpace.Call(
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
)
if e1 != syscall.Errno(0) {
return nil, errors.Wrap(e1, "failed to read disk usage")
}
usage := &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used
Used: fs.NewUsageValue(total - free), // bytes in use
Free: fs.NewUsageValue(available), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// check interface
var _ fs.Abouter = &Fs{}

993
vendor/github.com/ncw/rclone/backend/local/local.go generated vendored Executable file
View File

@@ -0,0 +1,993 @@
// Package local provides a filesystem interface
package local
import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
)
// Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "local",
Description: "Local Disk",
NewFs: NewFs,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
}},
}, {
Name: "copy_links",
Help: "Follow symlinks and copy the pointed to item.",
Default: false,
NoPrefix: true,
ShortOpt: "L",
Advanced: true,
}, {
Name: "skip_links",
Help: "Don't warn about skipped symlinks.",
Default: false,
NoPrefix: true,
Advanced: true,
}, {
Name: "no_unicode_normalization",
Help: "Don't apply unicode normalization to paths and filenames",
Default: false,
Advanced: true,
}, {
Name: "no_check_updated",
Help: "Don't check to see if the files change during upload",
Default: false,
Advanced: true,
}, {
Name: "one_file_system",
Help: "Don't cross filesystem boundaries (unix/macOS only).",
Default: false,
NoPrefix: true,
ShortOpt: "x",
Advanced: true,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
FollowSymlinks bool `config:"copy_links"`
SkipSymlinks bool `config:"skip_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
}
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
opt Options // parsed config options
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
wmu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
dirNames *mapper // directory name mapping
objectHashesMu sync.Mutex // global lock for Object.hashes
}
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
}
// ------------------------------------------------------------
// NewFs constructs an Fs from the path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.NoUTFNorm {
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
}
f := &Fs{
name: name,
opt: *opt,
warned: make(map[string]struct{}),
dev: devUnset,
lstat: os.Lstat,
dirNames: newMapper(),
}
f.root = f.cleanPath(root)
f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true,
}).Fill(f)
if opt.FollowSymlinks {
f.lstat = os.Stat
}
// Check to see if this points to a file
fi, err := f.lstat(f.root)
if err == nil {
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
if err == nil && fi.Mode().IsRegular() {
// It is a file, so use the parent as the root
f.root = filepath.Dir(f.root)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Local file system at %s", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// caseInsenstive returns whether the remote is case insensitive or not
func (f *Fs) caseInsensitive() bool {
// FIXME not entirely accurate since you can have case
// sensitive Fses on darwin and case insenstive Fses on linux.
// Should probably check but that would involve creating a
// file in the remote to be most accurate which probably isn't
// desirable.
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
}
// newObject makes a half completed Object
//
// if dstPath is empty then it is made from remote
func (f *Fs) newObject(remote, dstPath string) *Object {
if dstPath == "" {
dstPath = f.cleanPath(filepath.Join(f.root, remote))
}
remote = f.cleanRemote(remote)
return &Object{
fs: f,
remote: remote,
path: dstPath,
}
}
// Return an Object from a path
//
// May return nil if an error occurred
func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Object, error) {
o := f.newObject(remote, dstPath)
if info != nil {
o.setMetadata(info)
} else {
err := o.lstat()
if err != nil {
if os.IsNotExist(err) {
return nil, fs.ErrorObjectNotFound
}
if os.IsPermission(err) {
return nil, fs.ErrorPermissionDenied
}
return nil, err
}
}
if o.mode.IsDir() {
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, "", nil)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
dir = f.dirNames.Load(dir)
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
remote := f.cleanRemote(dir)
_, err = os.Stat(fsDirPath)
if err != nil {
return nil, fs.ErrorDirNotFound
}
fd, err := os.Open(fsDirPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to open directory %q", dir)
}
defer func() {
cerr := fd.Close()
if cerr != nil && err == nil {
err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
}
}()
for {
fis, err := fd.Readdir(1024)
if err == io.EOF && len(fis) == 0 {
break
}
if err != nil {
return nil, errors.Wrapf(err, "failed to read directory %q", dir)
}
for _, fi := range fis {
name := fi.Name()
mode := fi.Mode()
newRemote := path.Join(remote, name)
newPath := filepath.Join(fsDirPath, name)
// Follow symlinks if required
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
fi, err = os.Stat(newPath)
if err != nil {
return nil, err
}
mode = fi.Mode()
}
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
entries = append(entries, d)
}
} else {
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
if err != nil {
return nil, err
}
if fso.Storable() {
entries = append(entries, fso)
}
}
}
}
return entries, nil
}
// cleanRemote makes string a valid UTF-8 string for remote strings.
//
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
// It also normalises the UTF-8 and converts the slashes if necessary.
func (f *Fs) cleanRemote(name string) string {
if !utf8.ValidString(name) {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
name = string([]rune(name))
}
name = filepath.ToSlash(name)
return name
}
// mapper maps raw to cleaned directory names
type mapper struct {
mu sync.RWMutex // mutex to protect the below
m map[string]string // map of un-normalised directory names
}
func newMapper() *mapper {
return &mapper{
m: make(map[string]string),
}
}
// Lookup a directory name to make a local name (reverses
// cleanDirName)
//
// FIXME this is temporary before we make a proper Directory object
func (m *mapper) Load(in string) string {
m.mu.RLock()
out, ok := m.m[in]
m.mu.RUnlock()
if ok {
return out
}
return in
}
// Cleans a directory name recording if it needed to be altered
//
// FIXME this is temporary before we make a proper Directory object
func (m *mapper) Save(in, out string) string {
if in != out {
m.mu.Lock()
m.m[out] = in
m.mu.Unlock()
}
return out
}
// Put the Object to the local filesystem
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
// Temporary Object under construction - info filled in by Update()
o := f.newObject(remote, "")
err := o.Update(in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
root := f.cleanPath(filepath.Join(f.root, dir))
err := os.MkdirAll(root, 0777)
if err != nil {
return err
}
if dir == "" {
fi, err := f.lstat(root)
if err != nil {
return err
}
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
return nil
}
// Rmdir removes the directory
//
// If it isn't empty it will return an error
func (f *Fs) Rmdir(dir string) error {
root := f.cleanPath(filepath.Join(f.root, dir))
return os.Remove(root)
}
// Precision of the file system
func (f *Fs) Precision() (precision time.Duration) {
f.precisionOk.Do(func() {
f.precision = f.readPrecision()
})
return f.precision
}
// Read the precision
func (f *Fs) readPrecision() (precision time.Duration) {
// Default precision of 1s
precision = time.Second
// Create temporary file and test it
fd, err := ioutil.TempFile("", "rclone")
if err != nil {
// If failed return 1s
// fmt.Println("Failed to create temp file", err)
return time.Second
}
path := fd.Name()
// fmt.Println("Created temp file", path)
err = fd.Close()
if err != nil {
return time.Second
}
// Delete it on return
defer func() {
// fmt.Println("Remove temp file")
_ = os.Remove(path) // ignore error
}()
// Find the minimum duration we can detect
for duration := time.Duration(1); duration < time.Second; duration *= 10 {
// Current time with delta
t := time.Unix(time.Now().Unix(), int64(duration))
err := os.Chtimes(path, t, t)
if err != nil {
// fmt.Println("Failed to Chtimes", err)
break
}
// Read the actual time back
fi, err := os.Stat(path)
if err != nil {
// fmt.Println("Failed to Stat", err)
break
}
// If it matches - have found the precision
// fmt.Println("compare", fi.ModTime(), t)
if fi.ModTime().Equal(t) {
// fmt.Println("Precision detected as", duration)
return duration
}
}
return
}
// Purge deletes all the files and directories
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
fi, err := f.lstat(f.root)
if err != nil {
return err
}
if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", f.root)
}
return os.RemoveAll(f.root)
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Temporary Object under construction
dstObj := f.newObject(remote, "")
// Check it is a file if it exists
err := dstObj.lstat()
if os.IsNotExist(err) {
// OK
} else if err != nil {
return nil, err
} else if !dstObj.mode.IsRegular() {
// It isn't a file
return nil, errors.New("can't move file onto non-file")
}
// Create destination
err = dstObj.mkdirAll()
if err != nil {
return nil, err
}
// Do the move
err = os.Rename(srcObj.path, dstObj.path)
if os.IsNotExist(err) {
// race condition, source was deleted in the meantime
return nil, err
} else if os.IsPermission(err) {
// not enough rights to write to dst
return nil, err
} else if err != nil {
// not quite clear, but probably trying to move a file across file system
// boundaries. Copying might still work.
fs.Debugf(src, "Can't move: %v: trying copy", err)
return nil, fs.ErrorCantMove
}
// Update the info
err = dstObj.lstat()
if err != nil {
return nil, err
}
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := f.cleanPath(filepath.Join(srcFs.root, srcRemote))
dstPath := f.cleanPath(filepath.Join(f.root, dstRemote))
// Check if destination exists
_, err := os.Lstat(dstPath)
if !os.IsNotExist(err) {
return fs.ErrorDirExists
}
// Create parent of destination
dstParentPath := filepath.Dir(dstPath)
err = os.MkdirAll(dstParentPath, 0777)
if err != nil {
return err
}
// Do the move
err = os.Rename(srcPath, dstPath)
if os.IsNotExist(err) {
// race condition, source was deleted in the meantime
return err
} else if os.IsPermission(err) {
// not enough rights to write to dst
return err
} else if err != nil {
// not quite clear, but probably trying to move directory across file system
// boundaries. Copying might still work.
fs.Debugf(src, "Can't move dir: %v: trying copy", err)
return fs.ErrorCantDirMove
}
return nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Supported
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(r hash.Type) (string, error) {
// Check that the underlying file hasn't changed
oldtime := o.modTime
oldsize := o.size
err := o.lstat()
if err != nil {
return "", errors.Wrap(err, "hash: failed to stat")
}
o.fs.objectHashesMu.Lock()
hashes := o.hashes
o.fs.objectHashesMu.Unlock()
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
in, err := os.Open(o.path)
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
hashes, err = hash.Stream(in)
closeErr := in.Close()
if err != nil {
return "", errors.Wrap(err, "hash: failed to read")
}
if closeErr != nil {
return "", errors.Wrap(closeErr, "hash: failed to close")
}
o.fs.objectHashesMu.Lock()
o.hashes = hashes
o.fs.objectHashesMu.Unlock()
}
return hashes[r], nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime() time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
return err
}
// Re-read metadata
return o.lstat()
}
// Storable returns a boolean showing if this object is storable
func (o *Object) Storable() bool {
// Check for control characters in the remote name and show non storable
for _, c := range o.Remote() {
if c >= 0x00 && c < 0x20 || c == 0x7F {
fs.Logf(o.fs, "Can't store file with control characters: %q", o.Remote())
return false
}
}
mode := o.mode
if mode&os.ModeSymlink != 0 {
if !o.fs.opt.SkipSymlinks {
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
}
return false
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
fs.Logf(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
// fs.Debugf(o, "Skipping directory")
return false
}
return true
}
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
// object that is read
type localOpenFile struct {
o *Object // object that is open
in io.ReadCloser // handle we are wrapping
hash *hash.MultiHasher // currently accumulating hashes
fd *os.File // file object reference
}
// Read bytes from the object - see io.Reader
func (file *localOpenFile) Read(p []byte) (n int, err error) {
if !file.o.fs.opt.NoCheckUpdated {
// Check if file has the same size and modTime
fi, err := file.fd.Stat()
if err != nil {
return 0, errors.Wrap(err, "can't read status of source file while transferring")
}
if file.o.size != fi.Size() {
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
}
if !file.o.modTime.Equal(fi.ModTime()) {
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
}
}
n, err = file.in.Read(p)
if n > 0 {
// Hash routines never return an error
_, _ = file.hash.Write(p[:n])
}
return
}
// Close the object and update the hashes
func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
if file.hash.Size() == file.o.Size() {
file.o.fs.objectHashesMu.Lock()
file.o.hashes = file.hash.Sums()
file.o.fs.objectHashesMu.Unlock()
}
}
return err
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.size)
case *fs.HashesOption:
hashes = x.Hashes
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
fd, err := os.Open(o.path)
if err != nil {
return
}
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
if offset != 0 {
// seek the object
_, err = fd.Seek(offset, io.SeekStart)
// don't attempt to make checksums
return wrappedFd, err
}
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return nil, err
}
// Update the md5sum as we go along
in = &localOpenFile{
o: o,
in: wrappedFd,
hash: hash,
fd: fd,
}
return in, nil
}
// mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error {
dir := filepath.Dir(o.path)
return os.MkdirAll(dir, 0777)
}
// Update the object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.HashesOption:
hashes = x.Hashes
}
}
err := o.mkdirAll()
if err != nil {
return err
}
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
// Calculate the hash of the object we are reading as we go along
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return err
}
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in)
closeErr := out.Close()
if err == nil {
err = closeErr
}
if err != nil {
fs.Logf(o, "Removing partially written file on error: %v", err)
if removeErr := os.Remove(o.path); removeErr != nil {
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
}
return err
}
// All successful so update the hashes
o.fs.objectHashesMu.Lock()
o.hashes = hash.Sums()
o.fs.objectHashesMu.Unlock()
// Set the mtime
err = o.SetModTime(src.ModTime())
if err != nil {
return err
}
// ReRead info now that we have finished
return o.lstat()
}
// setMetadata sets the file info from the os.FileInfo passed in
func (o *Object) setMetadata(info os.FileInfo) {
// Don't overwrite the info if we don't need to
// this avoids upsetting the race detector
if o.size != info.Size() {
o.size = info.Size()
}
if !o.modTime.Equal(info.ModTime()) {
o.modTime = info.ModTime()
}
if o.mode != info.Mode() {
o.mode = info.Mode()
}
}
// Stat a Object into info
func (o *Object) lstat() error {
info, err := o.fs.lstat(o.path)
if err == nil {
o.setMetadata(info)
}
return err
}
// Remove an object
func (o *Object) Remove() error {
return remove(o.path)
}
// cleanPathFragment cleans an OS path fragment which is part of a
// bigger path and not necessarily absolute
func cleanPathFragment(s string) string {
if s == "" {
return s
}
s = filepath.Clean(s)
if runtime.GOOS == "windows" {
s = strings.Replace(s, `/`, `\`, -1)
}
return s
}
// cleanPath cleans and makes absolute the path passed in and returns
// an OS path.
//
// The input might be in OS form or rclone form or a mixture, but the
// output is in OS form.
//
// On windows it makes the path UNC also and replaces any characters
// Windows can't deal with with their replacements.
func (f *Fs) cleanPath(s string) string {
s = cleanPathFragment(s)
if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
if !f.opt.NoUNC {
// Convert to UNC
s = uncPath(s)
}
s = cleanWindowsName(f, s)
} else {
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
}
return s
}
// Pattern to match a windows absolute path: "c:\" and similar
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// uncPath converts an absolute Windows path
// to a UNC long path.
func uncPath(s string) string {
// UNC can NOT use "/", so convert all to "\"
s = strings.Replace(s, `/`, `\`, -1)
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(s, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(s, `\\?\`) {
return s
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
}
if isAbsWinDrive.MatchString(s) {
return `\\?\` + s
}
return s
}
// cleanWindowsName will clean invalid Windows characters replacing them with _
func cleanWindowsName(f *Fs, name string) string {
original := name
var name2 string
if strings.HasPrefix(name, `\\?\`) {
name2 = `\\?\`
name = strings.TrimPrefix(name, `\\?\`)
}
if strings.HasPrefix(name, `//?/`) {
name2 = `//?/`
name = strings.TrimPrefix(name, `//?/`)
}
// Colon is allowed as part of a drive name X:\
colonAt := strings.Index(name, ":")
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
// Copy to name2, which is unfiltered
name2 += name[0 : colonAt+1]
name = name[colonAt+1:]
}
name2 += strings.Map(func(r rune) rune {
switch r {
case '<', '>', '"', '|', '?', '*', ':':
return '_'
}
return r
}, name)
if name2 != original && f != nil {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Logf(f, "Replacing invalid characters in %q to %q", name, name2)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
}
return name2
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -0,0 +1,13 @@
// Device reading functions
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package local
import "os"
// readDevice turns a valid os.FileInfo into a device number,
// returning devUnset if it fails.
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
return devUnset
}

View File

@@ -0,0 +1,26 @@
// Device reading functions
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package local
import (
"os"
"syscall"
"github.com/ncw/rclone/fs"
)
// readDevice turns a valid os.FileInfo into a device number,
// returning devUnset if it fails.
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
if !oneFileSystem {
return devUnset
}
statT, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys())
return devUnset
}
return uint64(statT.Dev)
}

10
vendor/github.com/ncw/rclone/backend/local/remove_other.go generated vendored Executable file
View File

@@ -0,0 +1,10 @@
//+build !windows
package local
import "os"
// Removes name, retrying on a sharing violation
func remove(name string) error {
return os.Remove(name)
}

38
vendor/github.com/ncw/rclone/backend/local/remove_windows.go generated vendored Executable file
View File

@@ -0,0 +1,38 @@
//+build windows
package local
import (
"os"
"syscall"
"time"
"github.com/ncw/rclone/fs"
)
const (
ERROR_SHARING_VIOLATION syscall.Errno = 32
)
// Removes name, retrying on a sharing violation
func remove(name string) (err error) {
const maxTries = 10
var sleepTime = 1 * time.Millisecond
for i := 0; i < maxTries; i++ {
err = os.Remove(name)
if err == nil {
break
}
pathErr, ok := err.(*os.PathError)
if !ok {
break
}
if pathErr.Err != ERROR_SHARING_VIOLATION {
break
}
fs.Logf(name, "Remove detected sharing violation - retry %d/%d sleeping %v", i+1, maxTries, sleepTime)
time.Sleep(sleepTime)
sleepTime <<= 1
}
return err
}

View File

@@ -0,0 +1,202 @@
// Package quickxorhash provides the quickXorHash algorithm which is a
// quick, simple non-cryptographic hash algorithm that works by XORing
// the bytes in a circular-shifting fashion.
//
// It is used by Microsoft Onedrive for Business to hash data.
//
// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
package quickxorhash
// This code was ported from the code snippet linked from
// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
// Which has the copyright
// ------------------------------------------------------------------------------
// Copyright (c) 2016 Microsoft Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// ------------------------------------------------------------------------------
import (
"hash"
)
const (
// BlockSize is the preferred size for hashing
BlockSize = 64
// Size of the output checksum
Size = 20
bitsInLastCell = 32
shift = 11
widthInBits = 8 * Size
dataSize = (widthInBits-1)/64 + 1
)
type quickXorHash struct {
data [dataSize]uint64
lengthSoFar uint64
shiftSoFar int
}
// New returns a new hash.Hash computing the quickXorHash checksum.
func New() hash.Hash {
return &quickXorHash{}
}
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// It never returns an error.
//
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
func (q *quickXorHash) Write(p []byte) (n int, err error) {
currentshift := q.shiftSoFar
// The bitvector where we'll start xoring
vectorArrayIndex := currentshift / 64
// The position within the bit vector at which we begin xoring
vectorOffset := currentshift % 64
iterations := len(p)
if iterations > widthInBits {
iterations = widthInBits
}
for i := 0; i < iterations; i++ {
isLastCell := vectorArrayIndex == len(q.data)-1
var bitsInVectorCell int
if isLastCell {
bitsInVectorCell = bitsInLastCell
} else {
bitsInVectorCell = 64
}
// There's at least 2 bitvectors before we reach the end of the array
if vectorOffset <= bitsInVectorCell-8 {
for j := i; j < len(p); j += widthInBits {
q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
}
} else {
index1 := vectorArrayIndex
var index2 int
if isLastCell {
index2 = 0
} else {
index2 = vectorArrayIndex + 1
}
low := byte(bitsInVectorCell - vectorOffset)
xoredByte := byte(0)
for j := i; j < len(p); j += widthInBits {
xoredByte ^= p[j]
}
q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
q.data[index2] ^= uint64(xoredByte) >> low
}
vectorOffset += shift
for vectorOffset >= bitsInVectorCell {
if isLastCell {
vectorArrayIndex = 0
} else {
vectorArrayIndex = vectorArrayIndex + 1
}
vectorOffset -= bitsInVectorCell
}
}
// Update the starting position in a circular shift pattern
q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
q.lengthSoFar += uint64(len(p))
return len(p), nil
}
// Calculate the current checksum
func (q *quickXorHash) checkSum() (h [Size]byte) {
// Output the data as little endian bytes
ph := 0
for _, d := range q.data[:len(q.data)-1] {
_ = h[ph+7] // bounds check
h[ph+0] = byte(d >> (8 * 0))
h[ph+1] = byte(d >> (8 * 1))
h[ph+2] = byte(d >> (8 * 2))
h[ph+3] = byte(d >> (8 * 3))
h[ph+4] = byte(d >> (8 * 4))
h[ph+5] = byte(d >> (8 * 5))
h[ph+6] = byte(d >> (8 * 6))
h[ph+7] = byte(d >> (8 * 7))
ph += 8
}
// remaining 32 bits
d := q.data[len(q.data)-1]
h[Size-4] = byte(d >> (8 * 0))
h[Size-3] = byte(d >> (8 * 1))
h[Size-2] = byte(d >> (8 * 2))
h[Size-1] = byte(d >> (8 * 3))
// XOR the file length with the least significant bits in little endian format
d = q.lengthSoFar
h[Size-8] ^= byte(d >> (8 * 0))
h[Size-7] ^= byte(d >> (8 * 1))
h[Size-6] ^= byte(d >> (8 * 2))
h[Size-5] ^= byte(d >> (8 * 3))
h[Size-4] ^= byte(d >> (8 * 4))
h[Size-3] ^= byte(d >> (8 * 5))
h[Size-2] ^= byte(d >> (8 * 6))
h[Size-1] ^= byte(d >> (8 * 7))
return h
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (q *quickXorHash) Sum(b []byte) []byte {
hash := q.checkSum()
return append(b, hash[:]...)
}
// Reset resets the Hash to its initial state.
func (q *quickXorHash) Reset() {
*q = quickXorHash{}
}
// Size returns the number of bytes Sum will return.
func (q *quickXorHash) Size() int {
return Size
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (q *quickXorHash) BlockSize() int {
return BlockSize
}
// Sum returns the quickXorHash checksum of the data.
func Sum(data []byte) [Size]byte {
var d quickXorHash
_, _ = d.Write(data)
return d.checkSum()
}

1493
vendor/github.com/ncw/rclone/backend/s3/s3.go generated vendored Executable file

File diff suppressed because it is too large Load Diff

115
vendor/github.com/ncw/rclone/backend/s3/v2sign.go generated vendored Executable file
View File

@@ -0,0 +1,115 @@
// v2 signing
package s3
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"net/http"
"sort"
"strings"
"time"
)
// URL parameters that need to be added to the signature
var s3ParamsToSign = map[string]struct{}{
"acl": {},
"location": {},
"logging": {},
"notification": {},
"partNumber": {},
"policy": {},
"requestPayment": {},
"torrent": {},
"uploadId": {},
"uploads": {},
"versionId": {},
"versioning": {},
"versions": {},
"response-content-type": {},
"response-content-language": {},
"response-expires": {},
"response-cache-control": {},
"response-content-disposition": {},
"response-content-encoding": {},
}
// sign signs requests using v2 auth
//
// Cobbled together from goamz and aws-sdk-go
func sign(AccessKey, SecretKey string, req *http.Request) {
// Set date
date := time.Now().UTC().Format(time.RFC1123)
req.Header.Set("Date", date)
// Sort out URI
uri := req.URL.Opaque
if uri != "" {
if strings.HasPrefix(uri, "//") {
// Strip off //host/uri
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
req.URL.Opaque = uri // reset to plain URI otherwise Ceph gets confused
}
} else {
uri = req.URL.Path
}
if uri == "" {
uri = "/"
}
// Look through headers of interest
var md5 string
var contentType string
var headersToSign []string
for k, v := range req.Header {
k = strings.ToLower(k)
switch k {
case "content-md5":
md5 = v[0]
case "content-type":
contentType = v[0]
default:
if strings.HasPrefix(k, "x-amz-") {
vall := strings.Join(v, ",")
headersToSign = append(headersToSign, k+":"+vall)
}
}
}
// Make headers of interest into canonical string
var joinedHeadersToSign string
if len(headersToSign) > 0 {
sort.StringSlice(headersToSign).Sort()
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
}
// Look for query parameters which need to be added to the signature
params := req.URL.Query()
var queriesToSign []string
for k, vs := range params {
if _, ok := s3ParamsToSign[k]; ok {
for _, v := range vs {
if v == "" {
queriesToSign = append(queriesToSign, k)
} else {
queriesToSign = append(queriesToSign, k+"="+v)
}
}
}
}
// Add query parameters to URI
if len(queriesToSign) > 0 {
sort.StringSlice(queriesToSign).Sort()
uri += "?" + strings.Join(queriesToSign, "&")
}
// Make signature
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
hash := hmac.New(sha1.New, []byte(SecretKey))
_, _ = hash.Write([]byte(payload))
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
base64.StdEncoding.Encode(signature, hash.Sum(nil))
// Set signature in request
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
}

21
vendor/github.com/ncw/rclone/cmd/serve/httplib/http_new.go generated vendored Executable file
View File

@@ -0,0 +1,21 @@
// HTTP parts go1.8+
//+build go1.8
package httplib
import (
"net/http"
"time"
)
// Initialise the http.Server for post go1.8
func initServer(s *http.Server) {
s.ReadHeaderTimeout = 10 * time.Second // time to send the headers
s.IdleTimeout = 60 * time.Second // time to keep idle connections open
}
// closeServer closes the server in a non graceful way
func closeServer(s *http.Server) error {
return s.Close()
}

18
vendor/github.com/ncw/rclone/cmd/serve/httplib/http_old.go generated vendored Executable file
View File

@@ -0,0 +1,18 @@
// HTTP parts pre go1.8
//+build !go1.8
package httplib
import (
"net/http"
)
// Initialise the http.Server for pre go1.8
func initServer(s *http.Server) {
}
// closeServer closes the server in a non graceful way
func closeServer(s *http.Server) error {
return nil
}

256
vendor/github.com/ncw/rclone/cmd/serve/httplib/httplib.go generated vendored Executable file
View File

@@ -0,0 +1,256 @@
// Package httplib provides common functionality for http servers
package httplib
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"time"
auth "github.com/abbot/go-http-auth"
"github.com/ncw/rclone/fs"
)
// Globals
var ()
// Help contains text describing the http server to add to the command
// help.
var Help = `
### Server options
Use --addr to specify which IP address and port the server should
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
IPs. By default it only listens on localhost. You can use port
:0 to let the OS choose an available port.
If you set --addr to listen on a public or LAN accessible IP address
then using Authentication is advised - see the next section for info.
--server-read-timeout and --server-write-timeout can be used to
control the timeouts on the server. Note that this is the total time
for a transfer.
--max-header-bytes controls the maximum number of bytes the server will
accept in the HTTP header.
#### Authentication
By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or
set a single username and password with the --user and --pass flags.
Use --htpasswd /path/to/htpasswd to provide an htpasswd file. This is
in standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
To create an htpasswd file:
touch htpasswd
htpasswd -B htpasswd user
htpasswd -B htpasswd anotherUser
The password file can be updated while rclone is running.
Use --realm to set the authentication realm.
#### SSL/TLS
By default this will serve over http. If you want you can serve over
https. You will need to supply the --cert and --key flags. If you
wish to do client side certificate validation then you will need to
supply --client-ca also.
--cert should be a either a PEM encoded certificate or a concatenation
of that with the CA certificate. --key should be the PEM encoded
private key and --client-ca should be the PEM encoded client
certificate authority certificate.
`
// Options contains options for the http Server
type Options struct {
ListenAddr string // Port to listen on
ServerReadTimeout time.Duration // Timeout for server reading data
ServerWriteTimeout time.Duration // Timeout for server writing data
MaxHeaderBytes int // Maximum size of request header
SslCert string // SSL PEM key (concatenation of certificate and CA certificate)
SslKey string // SSL PEM Private key
ClientCA string // Client certificate authority to verify clients with
HtPasswd string // htpasswd file - if not provided no authentication is done
Realm string // realm for authentication
BasicUser string // single username for basic auth if not using Htpasswd
BasicPass string // password for BasicUser
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
ListenAddr: "localhost:8080",
Realm: "rclone",
ServerReadTimeout: 1 * time.Hour,
ServerWriteTimeout: 1 * time.Hour,
MaxHeaderBytes: 4096,
}
// Server contains info about the running http server
type Server struct {
Opt Options
handler http.Handler // original handler
listener net.Listener
waitChan chan struct{} // for waiting on the listener to close
httpServer *http.Server
basicPassHashed string
useSSL bool // if server is configured for SSL/TLS
}
// singleUserProvider provides the encrypted password for a single user
func (s *Server) singleUserProvider(user, realm string) string {
if user == s.Opt.BasicUser {
return s.basicPassHashed
}
return ""
}
// NewServer creates an http server. The opt can be nil in which case
// the default options will be used.
func NewServer(handler http.Handler, opt *Options) *Server {
s := &Server{
handler: handler,
}
// Make a copy of the options
if opt != nil {
s.Opt = *opt
} else {
s.Opt = DefaultOpt
}
// Use htpasswd if required on everything
if s.Opt.HtPasswd != "" || s.Opt.BasicUser != "" {
var secretProvider auth.SecretProvider
if s.Opt.HtPasswd != "" {
fs.Infof(nil, "Using %q as htpasswd storage", s.Opt.HtPasswd)
secretProvider = auth.HtpasswdFileProvider(s.Opt.HtPasswd)
} else {
fs.Infof(nil, "Using --user %s --pass XXXX as authenticated user", s.Opt.BasicUser)
s.basicPassHashed = string(auth.MD5Crypt([]byte(s.Opt.BasicPass), []byte("dlPL2MqE"), []byte("$1$")))
secretProvider = s.singleUserProvider
}
authenticator := auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
handler = auth.JustCheck(authenticator, handler.ServeHTTP)
}
s.useSSL = s.Opt.SslKey != ""
if (s.Opt.SslCert != "") != s.useSSL {
log.Fatalf("Need both -cert and -key to use SSL")
}
// FIXME make a transport?
s.httpServer = &http.Server{
Addr: s.Opt.ListenAddr,
Handler: handler,
ReadTimeout: s.Opt.ServerReadTimeout,
WriteTimeout: s.Opt.ServerWriteTimeout,
MaxHeaderBytes: s.Opt.MaxHeaderBytes,
TLSConfig: &tls.Config{
MinVersion: tls.VersionTLS10, // disable SSL v3.0 and earlier
},
}
// go version specific initialisation
initServer(s.httpServer)
if s.Opt.ClientCA != "" {
if !s.useSSL {
log.Fatalf("Can't use --client-ca without --cert and --key")
}
certpool := x509.NewCertPool()
pem, err := ioutil.ReadFile(s.Opt.ClientCA)
if err != nil {
log.Fatalf("Failed to read client certificate authority: %v", err)
}
if !certpool.AppendCertsFromPEM(pem) {
log.Fatalf("Can't parse client certificate authority")
}
s.httpServer.TLSConfig.ClientCAs = certpool
s.httpServer.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
return s
}
// Serve runs the server - returns an error only if
// the listener was not started; does not block, so
// use s.Wait() to block on the listener indefinitely.
func (s *Server) Serve() error {
ln, err := net.Listen("tcp", s.httpServer.Addr)
if err != nil {
return err
}
s.listener = ln
s.waitChan = make(chan struct{})
go func() {
var err error
if s.useSSL {
// hacky hack to get this to work with old Go versions, which
// don't have ServeTLS on http.Server; see PR #2194.
type tlsServer interface {
ServeTLS(ln net.Listener, cert, key string) error
}
srvIface := interface{}(s.httpServer)
if tlsSrv, ok := srvIface.(tlsServer); ok {
// yay -- we get easy TLS support with HTTP/2
err = tlsSrv.ServeTLS(s.listener, s.Opt.SslCert, s.Opt.SslKey)
} else {
// oh well -- we can still do TLS but might not have HTTP/2
tlsConfig := new(tls.Config)
tlsConfig.Certificates = make([]tls.Certificate, 1)
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(s.Opt.SslCert, s.Opt.SslKey)
if err != nil {
log.Printf("Error loading key pair: %v", err)
}
tlsLn := tls.NewListener(s.listener, tlsConfig)
err = s.httpServer.Serve(tlsLn)
}
} else {
err = s.httpServer.Serve(s.listener)
}
if err != nil {
log.Printf("Error on serving HTTP server: %v", err)
}
}()
return nil
}
// Wait blocks while the listener is open.
func (s *Server) Wait() {
<-s.waitChan
}
// Close shuts the running server down
func (s *Server) Close() {
err := closeServer(s.httpServer)
if err != nil {
log.Printf("Error on closing HTTP server: %v", err)
return
}
close(s.waitChan)
}
// URL returns the serving address of this server
func (s *Server) URL() string {
proto := "http"
if s.useSSL {
proto = "https"
}
addr := s.Opt.ListenAddr
if s.listener != nil {
// prefer actual listener address; required if using 0-port
// (i.e. port assigned by operating system)
addr = s.listener.Addr().String()
}
return fmt.Sprintf("%s://%s/", proto, addr)
}

391
vendor/github.com/ncw/rclone/fs/accounting/accounting.go generated vendored Executable file
View File

@@ -0,0 +1,391 @@
// Package accounting providers an accounting and limiting reader
package accounting
import (
"fmt"
"io"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/asyncreader"
"github.com/ncw/rclone/fs/fserrors"
"github.com/pkg/errors"
)
// ErrorMaxTransferLimitReached is returned from Read when the max
// transfer limit is reached.
var ErrorMaxTransferLimitReached = fserrors.FatalError(errors.New("Max transfer limit reached as set by --max-transfer"))
// Account limits and accounts for one transfer
type Account struct {
// The mutex is to make sure Read() and Close() aren't called
// concurrently. Unfortunately the persistent connection loop
// in http transport calls Read() after Do() returns on
// CancelRequest so this race can happen when it apparently
// shouldn't.
mu sync.Mutex
in io.Reader
origIn io.ReadCloser
close io.Closer
size int64
name string
statmu sync.Mutex // Separate mutex for stat values.
bytes int64 // Total number of bytes read
max int64 // if >=0 the max number of bytes to transfer
start time.Time // Start time of first read
lpTime time.Time // Time of last average measurement
lpBytes int // Number of bytes read since last measurement
avg float64 // Moving average of last few measurements in bytes/s
closed bool // set if the file is closed
exit chan struct{} // channel that will be closed when transfer is finished
withBuf bool // is using a buffered in
}
const averagePeriod = 16 // period to do exponentially weighted averages over
// NewAccountSizeName makes a Account reader for an io.ReadCloser of
// the given size and name
func NewAccountSizeName(in io.ReadCloser, size int64, name string) *Account {
acc := &Account{
in: in,
close: in,
origIn: in,
size: size,
name: name,
exit: make(chan struct{}),
avg: 0,
lpTime: time.Now(),
max: int64(fs.Config.MaxTransfer),
}
go acc.averageLoop()
Stats.inProgress.set(acc.name, acc)
return acc
}
// NewAccount makes a Account reader for an object
func NewAccount(in io.ReadCloser, obj fs.Object) *Account {
return NewAccountSizeName(in, obj.Size(), obj.Remote())
}
// WithBuffer - If the file is above a certain size it adds an Async reader
func (acc *Account) WithBuffer() *Account {
acc.withBuf = true
var buffers int
if acc.size >= int64(fs.Config.BufferSize) || acc.size == -1 {
buffers = int(int64(fs.Config.BufferSize) / asyncreader.BufferSize)
} else {
buffers = int(acc.size / asyncreader.BufferSize)
}
// On big files add a buffer
if buffers > 0 {
rc, err := asyncreader.New(acc.origIn, buffers)
if err != nil {
fs.Errorf(acc.name, "Failed to make buffer: %v", err)
} else {
acc.in = rc
acc.close = rc
}
}
return acc
}
// GetReader returns the underlying io.ReadCloser under any Buffer
func (acc *Account) GetReader() io.ReadCloser {
acc.mu.Lock()
defer acc.mu.Unlock()
return acc.origIn
}
// GetAsyncReader returns the current AsyncReader or nil if Account is unbuffered
func (acc *Account) GetAsyncReader() *asyncreader.AsyncReader {
acc.mu.Lock()
defer acc.mu.Unlock()
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
return asyncIn
}
return nil
}
// StopBuffering stops the async buffer doing any more buffering
func (acc *Account) StopBuffering() {
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
asyncIn.Abandon()
}
}
// UpdateReader updates the underlying io.ReadCloser stopping the
// asynb buffer (if any) and re-adding it
func (acc *Account) UpdateReader(in io.ReadCloser) {
acc.mu.Lock()
acc.StopBuffering()
acc.in = in
acc.close = in
acc.origIn = in
acc.WithBuffer()
acc.mu.Unlock()
}
// averageLoop calculates averages for the stats in the background
func (acc *Account) averageLoop() {
tick := time.NewTicker(time.Second)
var period float64
defer tick.Stop()
for {
select {
case now := <-tick.C:
acc.statmu.Lock()
// Add average of last second.
elapsed := now.Sub(acc.lpTime).Seconds()
avg := float64(acc.lpBytes) / elapsed
// Soft start the moving average
if period < averagePeriod {
period++
}
acc.avg = (avg + (period-1)*acc.avg) / period
acc.lpBytes = 0
acc.lpTime = now
// Unlock stats
acc.statmu.Unlock()
case <-acc.exit:
return
}
}
}
// read bytes from the io.Reader passed in and account them
func (acc *Account) read(in io.Reader, p []byte) (n int, err error) {
acc.statmu.Lock()
if acc.max >= 0 && Stats.GetBytes() >= acc.max {
acc.statmu.Unlock()
return 0, ErrorMaxTransferLimitReached
}
// Set start time.
if acc.start.IsZero() {
acc.start = time.Now()
}
acc.statmu.Unlock()
n, err = in.Read(p)
// Update Stats
acc.statmu.Lock()
acc.lpBytes += n
acc.bytes += int64(n)
acc.statmu.Unlock()
Stats.Bytes(int64(n))
limitBandwidth(n)
return
}
// Read bytes from the object - see io.Reader
func (acc *Account) Read(p []byte) (n int, err error) {
acc.mu.Lock()
defer acc.mu.Unlock()
return acc.read(acc.in, p)
}
// Close the object
func (acc *Account) Close() error {
acc.mu.Lock()
defer acc.mu.Unlock()
if acc.closed {
return nil
}
acc.closed = true
close(acc.exit)
Stats.inProgress.clear(acc.name)
return acc.close.Close()
}
// progress returns bytes read as well as the size.
// Size can be <= 0 if the size is unknown.
func (acc *Account) progress() (bytes, size int64) {
if acc == nil {
return 0, 0
}
acc.statmu.Lock()
bytes, size = acc.bytes, acc.size
acc.statmu.Unlock()
return bytes, size
}
// speed returns the speed of the current file transfer
// in bytes per second, as well a an exponentially weighted moving average
// If no read has completed yet, 0 is returned for both values.
func (acc *Account) speed() (bps, current float64) {
if acc == nil {
return 0, 0
}
acc.statmu.Lock()
defer acc.statmu.Unlock()
if acc.bytes == 0 {
return 0, 0
}
// Calculate speed from first read.
total := float64(time.Now().Sub(acc.start)) / float64(time.Second)
bps = float64(acc.bytes) / total
current = acc.avg
return
}
// eta returns the ETA of the current operation,
// rounded to full seconds.
// If the ETA cannot be determined 'ok' returns false.
func (acc *Account) eta() (etaDuration time.Duration, ok bool) {
if acc == nil {
return 0, false
}
acc.statmu.Lock()
defer acc.statmu.Unlock()
return eta(acc.bytes, acc.size, acc.avg)
}
// String produces stats for this file
func (acc *Account) String() string {
a, b := acc.progress()
_, cur := acc.speed()
eta, etaok := acc.eta()
etas := "-"
if etaok {
if eta > 0 {
etas = fmt.Sprintf("%v", eta)
} else {
etas = "0s"
}
}
name := []rune(acc.name)
if fs.Config.StatsFileNameLength > 0 {
if len(name) > fs.Config.StatsFileNameLength {
where := len(name) - fs.Config.StatsFileNameLength
name = append([]rune{'.', '.', '.'}, name[where:]...)
}
}
if fs.Config.DataRateUnit == "bits" {
cur = cur * 8
}
percentageDone := 0
if b > 0 {
percentageDone = int(100 * float64(a) / float64(b))
}
done := fmt.Sprintf("%2d%% /%s", percentageDone, fs.SizeSuffix(b))
return fmt.Sprintf("%45s: %s, %s/s, %s",
string(name),
done,
fs.SizeSuffix(cur),
etas,
)
}
// RemoteStats produces stats for this file
func (acc *Account) RemoteStats() (out map[string]interface{}) {
out = make(map[string]interface{})
a, b := acc.progress()
out["bytes"] = a
out["size"] = b
spd, cur := acc.speed()
out["speed"] = spd
out["speedAvg"] = cur
eta, etaok := acc.eta()
out["eta"] = nil
if etaok {
if eta > 0 {
out["eta"] = eta.Seconds()
} else {
out["eta"] = 0
}
}
out["name"] = acc.name
percentageDone := 0
if b > 0 {
percentageDone = int(100 * float64(a) / float64(b))
}
out["percentage"] = percentageDone
return out
}
// OldStream returns the top io.Reader
func (acc *Account) OldStream() io.Reader {
acc.mu.Lock()
defer acc.mu.Unlock()
return acc.in
}
// SetStream updates the top io.Reader
func (acc *Account) SetStream(in io.Reader) {
acc.mu.Lock()
acc.in = in
acc.mu.Unlock()
}
// WrapStream wraps an io Reader so it will be accounted in the same
// way as account
func (acc *Account) WrapStream(in io.Reader) io.Reader {
return &accountStream{
acc: acc,
in: in,
}
}
// accountStream accounts a single io.Reader into a parent *Account
type accountStream struct {
acc *Account
in io.Reader
}
// OldStream return the underlying stream
func (a *accountStream) OldStream() io.Reader {
return a.in
}
// SetStream set the underlying stream
func (a *accountStream) SetStream(in io.Reader) {
a.in = in
}
// WrapStream wrap in in an accounter
func (a *accountStream) WrapStream(in io.Reader) io.Reader {
return a.acc.WrapStream(in)
}
// Read bytes from the object - see io.Reader
func (a *accountStream) Read(p []byte) (n int, err error) {
return a.acc.read(a.in, p)
}
// Accounter accounts a stream allowing the accounting to be removed and re-added
type Accounter interface {
io.Reader
OldStream() io.Reader
SetStream(io.Reader)
WrapStream(io.Reader) io.Reader
}
// WrapFn wraps an io.Reader (for accounting purposes usually)
type WrapFn func(io.Reader) io.Reader
// UnWrap unwraps a reader returning unwrapped and wrap, a function to
// wrap it back up again. If `in` is an Accounter then this function
// will take the accounting unwrapped and wrap will put it back on
// again the new Reader passed in.
//
// This allows functions which wrap io.Readers to move the accounting
// to the end of the wrapped chain of readers. This is very important
// if buffering is being introduced and if the Reader might be wrapped
// again.
func UnWrap(in io.Reader) (unwrapped io.Reader, wrap WrapFn) {
acc, ok := in.(Accounter)
if !ok {
return in, func(r io.Reader) io.Reader { return r }
}
return acc.OldStream(), acc.WrapStream
}

View File

@@ -0,0 +1,10 @@
// Accounting and limiting reader
// Non-unix specific functions.
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package accounting
// startSignalHandler() is Unix specific and does nothing under non-Unix
// platforms.
func startSignalHandler() {}

View File

@@ -0,0 +1,36 @@
// Accounting and limiting reader
// Unix specific functions.
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package accounting
import (
"os"
"os/signal"
"syscall"
"github.com/ncw/rclone/fs"
)
// startSignalHandler() sets a signal handler to catch SIGUSR2 and toggle throttling.
func startSignalHandler() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGUSR2)
go func() {
// This runs forever, but blocks until the signal is received.
for {
<-signals
tokenBucketMu.Lock()
bwLimitToggledOff = !bwLimitToggledOff
tokenBucket, prevTokenBucket = prevTokenBucket, tokenBucket
s := "disabled"
if tokenBucket != nil {
s = "enabled"
}
tokenBucketMu.Unlock()
fs.Logf(nil, "Bandwidth limit %s by user", s)
}
}()
}

41
vendor/github.com/ncw/rclone/fs/accounting/inprogress.go generated vendored Executable file
View File

@@ -0,0 +1,41 @@
package accounting
import (
"sync"
"github.com/ncw/rclone/fs"
)
// inProgress holds a synchronized map of in progress transfers
type inProgress struct {
mu sync.Mutex
m map[string]*Account
}
// newInProgress makes a new inProgress object
func newInProgress() *inProgress {
return &inProgress{
m: make(map[string]*Account, fs.Config.Transfers),
}
}
// set marks the name as in progress
func (ip *inProgress) set(name string, acc *Account) {
ip.mu.Lock()
defer ip.mu.Unlock()
ip.m[name] = acc
}
// clear marks the name as no longer in progress
func (ip *inProgress) clear(name string) {
ip.mu.Lock()
defer ip.mu.Unlock()
delete(ip.m, name)
}
// get gets the account for name, of nil if not found
func (ip *inProgress) get(name string) *Account {
ip.mu.Lock()
defer ip.mu.Unlock()
return ip.m[name]
}

406
vendor/github.com/ncw/rclone/fs/accounting/stats.go generated vendored Executable file
View File

@@ -0,0 +1,406 @@
package accounting
import (
"bytes"
"fmt"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/rc"
)
var (
// Stats is global statistics counter
Stats = NewStats()
)
func init() {
// Set the function pointer up in fs
fs.CountError = Stats.Error
rc.Add(rc.Call{
Path: "core/stats",
Fn: Stats.RemoteStats,
Title: "Returns stats about current transfers.",
Help: `
This returns all available stats
rclone rc core/stats
Returns the following values:
` + "```" + `
{
"speed": average speed in bytes/sec since start of the process,
"bytes": total transferred bytes since the start of the process,
"errors": number of errors,
"checks": number of checked files,
"transfers": number of transferred files,
"deletes" : number of deleted files,
"elapsedTime": time in seconds since the start of the process,
"lastError": last occurred error,
"transferring": an array of currently active file transfers:
[
{
"bytes": total transferred bytes for this file,
"eta": estimated time in seconds until file transfer completion
"name": name of the file,
"percentage": progress of the file transfer in percent,
"speed": speed in bytes/sec,
"speedAvg": speed in bytes/sec as an exponentially weighted moving average,
"size": size of the file in bytes
}
],
"checking": an array of names of currently active file checks
[]
}
` + "```" + `
Values for "transferring", "checking" and "lastError" are only assigned if data is available.
The value for "eta" is null if an eta cannot be determined.
`,
})
}
// StatsInfo accounts all transfers
type StatsInfo struct {
mu sync.RWMutex
bytes int64
errors int64
lastError error
checks int64
checking *stringSet
checkQueue int
checkQueueSize int64
transfers int64
transferring *stringSet
transferQueue int
transferQueueSize int64
renameQueue int
renameQueueSize int64
deletes int64
start time.Time
inProgress *inProgress
}
// NewStats cretates an initialised StatsInfo
func NewStats() *StatsInfo {
return &StatsInfo{
checking: newStringSet(fs.Config.Checkers),
transferring: newStringSet(fs.Config.Transfers),
start: time.Now(),
inProgress: newInProgress(),
}
}
// RemoteStats returns stats for rc
func (s *StatsInfo) RemoteStats(in rc.Params) (out rc.Params, err error) {
out = make(rc.Params)
s.mu.RLock()
dt := time.Now().Sub(s.start)
dtSeconds := dt.Seconds()
speed := 0.0
if dt > 0 {
speed = float64(s.bytes) / dtSeconds
}
out["speed"] = speed
out["bytes"] = s.bytes
out["errors"] = s.errors
out["checks"] = s.checks
out["transfers"] = s.transfers
out["deletes"] = s.deletes
out["elapsedTime"] = dtSeconds
s.mu.RUnlock()
if !s.checking.empty() {
var c []string
s.checking.mu.RLock()
defer s.checking.mu.RUnlock()
for name := range s.checking.items {
c = append(c, name)
}
out["checking"] = c
}
if !s.transferring.empty() {
var t []interface{}
s.transferring.mu.RLock()
defer s.transferring.mu.RUnlock()
for name := range s.transferring.items {
if acc := s.inProgress.get(name); acc != nil {
t = append(t, acc.RemoteStats())
} else {
t = append(t, name)
}
}
out["transferring"] = t
}
if s.errors > 0 {
out["lastError"] = s.lastError
}
return out, nil
}
// eta returns the ETA of the current operation,
// rounded to full seconds.
// If the ETA cannot be determined 'ok' returns false.
func eta(size, total int64, rate float64) (eta time.Duration, ok bool) {
if total <= 0 || size < 0 || rate <= 0 {
return 0, false
}
remaining := total - size
if remaining < 0 {
return 0, false
}
seconds := float64(remaining) / rate
return time.Second * time.Duration(seconds), true
}
// etaString returns the ETA of the current operation,
// rounded to full seconds.
// If the ETA cannot be determined it returns "-"
func etaString(done, total int64, rate float64) string {
d, ok := eta(done, total, rate)
if !ok {
return "-"
}
return d.String()
}
// percent returns a/b as a percentage rounded to the nearest integer
// as a string
//
// if the percentage is invalid it returns "-"
func percent(a int64, b int64) string {
if a < 0 || b <= 0 {
return "-"
}
return fmt.Sprintf("%d%%", int(float64(a)*100/float64(b)+0.5))
}
// String convert the StatsInfo to a string for printing
func (s *StatsInfo) String() string {
// checking and transferring have their own locking so read
// here before lock to prevent deadlock on GetBytes
transferring, checking := s.transferring.count(), s.checking.count()
transferringBytesDone, transferringBytesTotal := s.transferring.progress()
s.mu.RLock()
dt := time.Now().Sub(s.start)
dtSeconds := dt.Seconds()
speed := 0.0
if dt > 0 {
speed = float64(s.bytes) / dtSeconds
}
dtRounded := dt - (dt % (time.Second / 10))
if fs.Config.DataRateUnit == "bits" {
speed = speed * 8
}
var (
totalChecks = int64(s.checkQueue) + s.checks + int64(checking)
totalTransfer = int64(s.transferQueue) + s.transfers + int64(transferring)
// note that s.bytes already includes transferringBytesDone so
// we take it off here to avoid double counting
totalSize = s.transferQueueSize + s.bytes + transferringBytesTotal - transferringBytesDone
currentSize = s.bytes
buf = &bytes.Buffer{}
xfrchkString = ""
)
if !fs.Config.StatsOneLine {
_, _ = fmt.Fprintf(buf, "\nTransferred: ")
} else {
xfrchk := []string{}
if totalTransfer > 0 && s.transferQueue > 0 {
xfrchk = append(xfrchk, fmt.Sprintf("xfr#%d/%d", s.transfers, totalTransfer))
}
if totalChecks > 0 && s.checkQueue > 0 {
xfrchk = append(xfrchk, fmt.Sprintf("chk#%d/%d", s.checks, totalChecks))
}
if len(xfrchk) > 0 {
xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", "))
}
}
_, _ = fmt.Fprintf(buf, "%10s / %s, %s, %s, ETA %s%s",
fs.SizeSuffix(s.bytes),
fs.SizeSuffix(totalSize).Unit("Bytes"),
percent(s.bytes, totalSize),
fs.SizeSuffix(speed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"),
etaString(currentSize, totalSize, speed),
xfrchkString,
)
if !fs.Config.StatsOneLine {
_, _ = fmt.Fprintf(buf, `
Errors: %10d
Checks: %10d / %d, %s
Transferred: %10d / %d, %s
Elapsed time: %10v
`,
s.errors,
s.checks, totalChecks, percent(s.checks, totalChecks),
s.transfers, totalTransfer, percent(s.transfers, totalTransfer),
dtRounded)
}
// checking and transferring have their own locking so unlock
// here to prevent deadlock on GetBytes
s.mu.RUnlock()
// Add per transfer stats if required
if !fs.Config.StatsOneLine {
if !s.checking.empty() {
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking)
}
if !s.transferring.empty() {
_, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring)
}
}
return buf.String()
}
// Log outputs the StatsInfo to the log
func (s *StatsInfo) Log() {
fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "%v\n", s)
}
// Bytes updates the stats for bytes bytes
func (s *StatsInfo) Bytes(bytes int64) {
s.mu.Lock()
defer s.mu.Unlock()
s.bytes += bytes
}
// GetBytes returns the number of bytes transferred so far
func (s *StatsInfo) GetBytes() int64 {
s.mu.RLock()
defer s.mu.RUnlock()
return s.bytes
}
// Errors updates the stats for errors
func (s *StatsInfo) Errors(errors int64) {
s.mu.Lock()
defer s.mu.Unlock()
s.errors += errors
}
// GetErrors reads the number of errors
func (s *StatsInfo) GetErrors() int64 {
s.mu.RLock()
defer s.mu.RUnlock()
return s.errors
}
// GetLastError returns the lastError
func (s *StatsInfo) GetLastError() error {
s.mu.RLock()
defer s.mu.RUnlock()
return s.lastError
}
// Deletes updates the stats for deletes
func (s *StatsInfo) Deletes(deletes int64) int64 {
s.mu.Lock()
defer s.mu.Unlock()
s.deletes += deletes
return s.deletes
}
// ResetCounters sets the counters (bytes, checks, errors, transfers) to 0
func (s *StatsInfo) ResetCounters() {
s.mu.Lock()
defer s.mu.Unlock()
s.bytes = 0
s.errors = 0
s.checks = 0
s.transfers = 0
s.deletes = 0
}
// ResetErrors sets the errors count to 0
func (s *StatsInfo) ResetErrors() {
s.mu.Lock()
defer s.mu.Unlock()
s.errors = 0
}
// Errored returns whether there have been any errors
func (s *StatsInfo) Errored() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.errors != 0
}
// Error adds a single error into the stats and assigns lastError
func (s *StatsInfo) Error(err error) {
s.mu.Lock()
defer s.mu.Unlock()
s.errors++
s.lastError = err
}
// Checking adds a check into the stats
func (s *StatsInfo) Checking(remote string) {
s.checking.add(remote)
}
// DoneChecking removes a check from the stats
func (s *StatsInfo) DoneChecking(remote string) {
s.checking.del(remote)
s.mu.Lock()
s.checks++
s.mu.Unlock()
}
// GetTransfers reads the number of transfers
func (s *StatsInfo) GetTransfers() int64 {
s.mu.RLock()
defer s.mu.RUnlock()
return s.transfers
}
// Transferring adds a transfer into the stats
func (s *StatsInfo) Transferring(remote string) {
s.transferring.add(remote)
}
// DoneTransferring removes a transfer from the stats
//
// if ok is true then it increments the transfers count
func (s *StatsInfo) DoneTransferring(remote string, ok bool) {
s.transferring.del(remote)
if ok {
s.mu.Lock()
s.transfers++
s.mu.Unlock()
}
}
// SetCheckQueue sets the number of queued checks
func (s *StatsInfo) SetCheckQueue(n int, size int64) {
s.mu.Lock()
s.checkQueue = n
s.checkQueueSize = size
s.mu.Unlock()
}
// SetTransferQueue sets the number of queued transfers
func (s *StatsInfo) SetTransferQueue(n int, size int64) {
s.mu.Lock()
s.transferQueue = n
s.transferQueueSize = size
s.mu.Unlock()
}
// SetRenameQueue sets the number of queued transfers
func (s *StatsInfo) SetRenameQueue(n int, size int64) {
s.mu.Lock()
s.renameQueue = n
s.renameQueueSize = size
s.mu.Unlock()
}

88
vendor/github.com/ncw/rclone/fs/accounting/stringset.go generated vendored Executable file
View File

@@ -0,0 +1,88 @@
package accounting
import (
"sort"
"strings"
"sync"
)
// stringSet holds a set of strings
type stringSet struct {
mu sync.RWMutex
items map[string]struct{}
}
// newStringSet creates a new empty string set of capacity size
func newStringSet(size int) *stringSet {
return &stringSet{
items: make(map[string]struct{}, size),
}
}
// add adds remote to the set
func (ss *stringSet) add(remote string) {
ss.mu.Lock()
ss.items[remote] = struct{}{}
ss.mu.Unlock()
}
// del removes remote from the set
func (ss *stringSet) del(remote string) {
ss.mu.Lock()
delete(ss.items, remote)
ss.mu.Unlock()
}
// empty returns whether the set has any items
func (ss *stringSet) empty() bool {
ss.mu.RLock()
defer ss.mu.RUnlock()
return len(ss.items) == 0
}
// count returns the number of items in the set
func (ss *stringSet) count() int {
ss.mu.RLock()
defer ss.mu.RUnlock()
return len(ss.items)
}
// Strings returns all the strings in the stringSet
func (ss *stringSet) Strings() []string {
ss.mu.RLock()
defer ss.mu.RUnlock()
strings := make([]string, 0, len(ss.items))
for name := range ss.items {
var out string
if acc := Stats.inProgress.get(name); acc != nil {
out = acc.String()
} else {
out = name
}
strings = append(strings, " * "+out)
}
sorted := sort.StringSlice(strings)
sorted.Sort()
return sorted
}
// String returns all the file names in the stringSet joined by newline
func (ss *stringSet) String() string {
return strings.Join(ss.Strings(), "\n")
}
// progress returns total bytes read as well as the size.
func (ss *stringSet) progress() (totalBytes, totalSize int64) {
ss.mu.RLock()
defer ss.mu.RUnlock()
for name := range ss.items {
if acc := Stats.inProgress.get(name); acc != nil {
bytes, size := acc.progress()
if size >= 0 && bytes >= 0 {
totalBytes += bytes
totalSize += size
}
}
}
return totalBytes, totalSize
}

169
vendor/github.com/ncw/rclone/fs/accounting/token_bucket.go generated vendored Executable file
View File

@@ -0,0 +1,169 @@
package accounting
import (
"context"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/rc"
"github.com/pkg/errors"
"golang.org/x/time/rate"
)
// Globals
var (
tokenBucketMu sync.Mutex // protects the token bucket variables
tokenBucket *rate.Limiter
prevTokenBucket = tokenBucket
bwLimitToggledOff = false
currLimitMu sync.Mutex // protects changes to the timeslot
currLimit fs.BwTimeSlot
)
const maxBurstSize = 4 * 1024 * 1024 // must be bigger than the biggest request
// make a new empty token bucket with the bandwidth given
func newTokenBucket(bandwidth fs.SizeSuffix) *rate.Limiter {
newTokenBucket := rate.NewLimiter(rate.Limit(bandwidth), maxBurstSize)
// empty the bucket
err := newTokenBucket.WaitN(context.Background(), maxBurstSize)
if err != nil {
fs.Errorf(nil, "Failed to empty token bucket: %v", err)
}
return newTokenBucket
}
// StartTokenBucket starts the token bucket if necessary
func StartTokenBucket() {
currLimitMu.Lock()
currLimit := fs.Config.BwLimit.LimitAt(time.Now())
currLimitMu.Unlock()
if currLimit.Bandwidth > 0 {
tokenBucket = newTokenBucket(currLimit.Bandwidth)
fs.Infof(nil, "Starting bandwidth limiter at %vBytes/s", &currLimit.Bandwidth)
// Start the SIGUSR2 signal handler to toggle bandwidth.
// This function does nothing in windows systems.
startSignalHandler()
}
}
// StartTokenTicker creates a ticker to update the bandwidth limiter every minute.
func StartTokenTicker() {
// If the timetable has a single entry or was not specified, we don't need
// a ticker to update the bandwidth.
if len(fs.Config.BwLimit) <= 1 {
return
}
ticker := time.NewTicker(time.Minute)
go func() {
for range ticker.C {
limitNow := fs.Config.BwLimit.LimitAt(time.Now())
currLimitMu.Lock()
if currLimit.Bandwidth != limitNow.Bandwidth {
tokenBucketMu.Lock()
// If bwlimit is toggled off, the change should only
// become active on the next toggle, which causes
// an exchange of tokenBucket <-> prevTokenBucket
var targetBucket **rate.Limiter
if bwLimitToggledOff {
targetBucket = &prevTokenBucket
} else {
targetBucket = &tokenBucket
}
// Set new bandwidth. If unlimited, set tokenbucket to nil.
if limitNow.Bandwidth > 0 {
*targetBucket = newTokenBucket(limitNow.Bandwidth)
if bwLimitToggledOff {
fs.Logf(nil, "Scheduled bandwidth change. "+
"Limit will be set to %vBytes/s when toggled on again.", &limitNow.Bandwidth)
} else {
fs.Logf(nil, "Scheduled bandwidth change. Limit set to %vBytes/s", &limitNow.Bandwidth)
}
} else {
*targetBucket = nil
fs.Logf(nil, "Scheduled bandwidth change. Bandwidth limits disabled")
}
currLimit = limitNow
tokenBucketMu.Unlock()
}
currLimitMu.Unlock()
}
}()
}
// limitBandwith sleeps for the correct amount of time for the passage
// of n bytes according to the current bandwidth limit
func limitBandwidth(n int) {
tokenBucketMu.Lock()
// Limit the transfer speed if required
if tokenBucket != nil {
err := tokenBucket.WaitN(context.Background(), n)
if err != nil {
fs.Errorf(nil, "Token bucket error: %v", err)
}
}
tokenBucketMu.Unlock()
}
// SetBwLimit sets the current bandwidth limit
func SetBwLimit(bandwidth fs.SizeSuffix) {
tokenBucketMu.Lock()
defer tokenBucketMu.Unlock()
if bandwidth > 0 {
tokenBucket = newTokenBucket(bandwidth)
fs.Logf(nil, "Bandwidth limit set to %v", bandwidth)
} else {
tokenBucket = nil
fs.Logf(nil, "Bandwidth limit reset to unlimited")
}
}
// Remote control for the token bucket
func init() {
rc.Add(rc.Call{
Path: "core/bwlimit",
Fn: func(in rc.Params) (out rc.Params, err error) {
ibwlimit, ok := in["rate"]
if !ok {
return out, errors.Errorf("parameter rate not found")
}
bwlimit, ok := ibwlimit.(string)
if !ok {
return out, errors.Errorf("value must be string rate=%v", ibwlimit)
}
var bws fs.BwTimetable
err = bws.Set(bwlimit)
if err != nil {
return out, errors.Wrap(err, "bad bwlimit")
}
if len(bws) != 1 {
return out, errors.New("need exactly 1 bandwidth setting")
}
bw := bws[0]
SetBwLimit(bw.Bandwidth)
return rc.Params{"rate": bw.Bandwidth.String()}, nil
},
Title: "Set the bandwidth limit.",
Help: `
This sets the bandwidth limit to that passed in.
Eg
rclone rc core/bwlimit rate=1M
rclone rc core/bwlimit rate=off
The format of the parameter is exactly the same as passed to --bwlimit
except only one bandwidth may be specified.
`,
})
}

343
vendor/github.com/ncw/rclone/fs/asyncreader/asyncreader.go generated vendored Executable file
View File

@@ -0,0 +1,343 @@
// Package asyncreader provides an asynchronous reader which reads
// independently of write
package asyncreader
import (
"io"
"sync"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
)
const (
// BufferSize is the default size of the async buffer
BufferSize = 1024 * 1024
softStartInitial = 4 * 1024
)
var asyncBufferPool = sync.Pool{
New: func() interface{} { return newBuffer() },
}
var errorStreamAbandoned = errors.New("stream abandoned")
// AsyncReader will do async read-ahead from the input reader
// and make the data available as an io.Reader.
// This should be fully transparent, except that once an error
// has been returned from the Reader, it will not recover.
type AsyncReader struct {
in io.ReadCloser // Input reader
ready chan *buffer // Buffers ready to be handed to the reader
token chan struct{} // Tokens which allow a buffer to be taken
exit chan struct{} // Closes when finished
buffers int // Number of buffers
err error // If an error has occurred it is here
cur *buffer // Current buffer being served
exited chan struct{} // Channel is closed been the async reader shuts down
size int // size of buffer to use
closed bool // whether we have closed the underlying stream
mu sync.Mutex // lock for Read/WriteTo/Abandon/Close
}
// New returns a reader that will asynchronously read from
// the supplied Reader into a number of buffers each of size BufferSize
// It will start reading from the input at once, maybe even before this
// function has returned.
// The input can be read from the returned reader.
// When done use Close to release the buffers and close the supplied input.
func New(rd io.ReadCloser, buffers int) (*AsyncReader, error) {
if buffers <= 0 {
return nil, errors.New("number of buffers too small")
}
if rd == nil {
return nil, errors.New("nil reader supplied")
}
a := &AsyncReader{}
a.init(rd, buffers)
return a, nil
}
func (a *AsyncReader) init(rd io.ReadCloser, buffers int) {
a.in = rd
a.ready = make(chan *buffer, buffers)
a.token = make(chan struct{}, buffers)
a.exit = make(chan struct{}, 0)
a.exited = make(chan struct{}, 0)
a.buffers = buffers
a.cur = nil
a.size = softStartInitial
// Create tokens
for i := 0; i < buffers; i++ {
a.token <- struct{}{}
}
// Start async reader
go func() {
// Ensure that when we exit this is signalled.
defer close(a.exited)
defer close(a.ready)
for {
select {
case <-a.token:
b := a.getBuffer()
if a.size < BufferSize {
b.buf = b.buf[:a.size]
a.size <<= 1
}
err := b.read(a.in)
a.ready <- b
if err != nil {
return
}
case <-a.exit:
return
}
}
}()
}
// return the buffer to the pool (clearing it)
func (a *AsyncReader) putBuffer(b *buffer) {
b.clear()
asyncBufferPool.Put(b)
}
// get a buffer from the pool
func (a *AsyncReader) getBuffer() *buffer {
b := asyncBufferPool.Get().(*buffer)
return b
}
// Read will return the next available data.
func (a *AsyncReader) fill() (err error) {
if a.cur.isEmpty() {
if a.cur != nil {
a.putBuffer(a.cur)
a.token <- struct{}{}
a.cur = nil
}
b, ok := <-a.ready
if !ok {
// Return an error to show fill failed
if a.err == nil {
return errorStreamAbandoned
}
return a.err
}
a.cur = b
}
return nil
}
// Read will return the next available data.
func (a *AsyncReader) Read(p []byte) (n int, err error) {
a.mu.Lock()
defer a.mu.Unlock()
// Swap buffer and maybe return error
err = a.fill()
if err != nil {
return 0, err
}
// Copy what we can
n = copy(p, a.cur.buffer())
a.cur.increment(n)
// If at end of buffer, return any error, if present
if a.cur.isEmpty() {
a.err = a.cur.err
return n, a.err
}
return n, nil
}
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) {
a.mu.Lock()
defer a.mu.Unlock()
n = 0
for {
err = a.fill()
if err != nil {
return n, err
}
n2, err := w.Write(a.cur.buffer())
a.cur.increment(n2)
n += int64(n2)
if err != nil {
return n, err
}
if a.cur.err != nil {
a.err = a.cur.err
return n, a.cur.err
}
}
}
// SkipBytes will try to seek 'skip' bytes relative to the current position.
// On success it returns true. If 'skip' is outside the current buffer data or
// an error occurs, Abandon is called and false is returned.
func (a *AsyncReader) SkipBytes(skip int) (ok bool) {
a.mu.Lock()
defer func() {
a.mu.Unlock()
if !ok {
a.Abandon()
}
}()
if a.err != nil {
return false
}
if skip < 0 {
// seek backwards if skip is inside current buffer
if a.cur != nil && a.cur.offset+skip >= 0 {
a.cur.offset += skip
return true
}
return false
}
// early return if skip is past the maximum buffer capacity
if skip >= (len(a.ready)+1)*BufferSize {
return false
}
refillTokens := 0
for {
if a.cur.isEmpty() {
if a.cur != nil {
a.putBuffer(a.cur)
refillTokens++
a.cur = nil
}
select {
case b, ok := <-a.ready:
if !ok {
return false
}
a.cur = b
default:
return false
}
}
n := len(a.cur.buffer())
if n > skip {
n = skip
}
a.cur.increment(n)
skip -= n
if skip == 0 {
for ; refillTokens > 0; refillTokens-- {
a.token <- struct{}{}
}
// If at end of buffer, store any error, if present
if a.cur.isEmpty() && a.cur.err != nil {
a.err = a.cur.err
}
return true
}
if a.cur.err != nil {
a.err = a.cur.err
return false
}
}
}
// Abandon will ensure that the underlying async reader is shut down.
// It will NOT close the input supplied on New.
func (a *AsyncReader) Abandon() {
select {
case <-a.exit:
// Do nothing if reader routine already exited
return
default:
}
// Close and wait for go routine
close(a.exit)
<-a.exited
// take the lock to wait for Read/WriteTo to complete
a.mu.Lock()
defer a.mu.Unlock()
// Return any outstanding buffers to the Pool
if a.cur != nil {
a.putBuffer(a.cur)
a.cur = nil
}
for b := range a.ready {
a.putBuffer(b)
}
}
// Close will ensure that the underlying async reader is shut down.
// It will also close the input supplied on New.
func (a *AsyncReader) Close() (err error) {
a.Abandon()
if a.closed {
return nil
}
a.closed = true
return a.in.Close()
}
// Internal buffer
// If an error is present, it must be returned
// once all buffer content has been served.
type buffer struct {
buf []byte
err error
offset int
}
func newBuffer() *buffer {
return &buffer{
buf: make([]byte, BufferSize),
err: nil,
}
}
// clear returns the buffer to its full size and clears the members
func (b *buffer) clear() {
b.buf = b.buf[:cap(b.buf)]
b.err = nil
b.offset = 0
}
// isEmpty returns true is offset is at end of
// buffer, or
func (b *buffer) isEmpty() bool {
if b == nil {
return true
}
if len(b.buf)-b.offset <= 0 {
return true
}
return false
}
// read into start of the buffer from the supplied reader,
// resets the offset and updates the size of the buffer.
// Any error encountered during the read is returned.
func (b *buffer) read(rd io.Reader) error {
var n int
n, b.err = readers.ReadFill(rd, b.buf)
b.buf = b.buf[0:n]
b.offset = 0
return b.err
}
// Return the buffer at current offset
func (b *buffer) buffer() []byte {
return b.buf[b.offset:]
}
// increment the offset
func (b *buffer) increment(n int) {
b.offset += n
}

214
vendor/github.com/ncw/rclone/fs/bwtimetable.go generated vendored Executable file
View File

@@ -0,0 +1,214 @@
package fs
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
)
// BwTimeSlot represents a bandwidth configuration at a point in time.
type BwTimeSlot struct {
DayOfTheWeek int
HHMM int
Bandwidth SizeSuffix
}
// BwTimetable contains all configured time slots.
type BwTimetable []BwTimeSlot
// String returns a printable representation of BwTimetable.
func (x BwTimetable) String() string {
ret := []string{}
for _, ts := range x {
ret = append(ret, fmt.Sprintf("%s-%04.4d,%s", time.Weekday(ts.DayOfTheWeek), ts.HHMM, ts.Bandwidth.String()))
}
return strings.Join(ret, " ")
}
// Basic hour format checking
func validateHour(HHMM string) error {
if len(HHMM) != 5 {
return errors.Errorf("invalid time specification (hh:mm): %q", HHMM)
}
hh, err := strconv.Atoi(HHMM[0:2])
if err != nil {
return errors.Errorf("invalid hour in time specification %q: %v", HHMM, err)
}
if hh < 0 || hh > 23 {
return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh)
}
mm, err := strconv.Atoi(HHMM[3:])
if err != nil {
return errors.Errorf("invalid minute in time specification: %q: %v", HHMM, err)
}
if mm < 0 || mm > 59 {
return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh)
}
return nil
}
// Basic weekday format checking
func parseWeekday(dayOfWeek string) (int, error) {
dayOfWeek = strings.ToLower(dayOfWeek)
if dayOfWeek == "sun" || dayOfWeek == "sunday" {
return 0, nil
}
if dayOfWeek == "mon" || dayOfWeek == "monday" {
return 1, nil
}
if dayOfWeek == "tue" || dayOfWeek == "tuesday" {
return 2, nil
}
if dayOfWeek == "wed" || dayOfWeek == "wednesday" {
return 3, nil
}
if dayOfWeek == "thu" || dayOfWeek == "thursday" {
return 4, nil
}
if dayOfWeek == "fri" || dayOfWeek == "friday" {
return 5, nil
}
if dayOfWeek == "sat" || dayOfWeek == "saturday" {
return 6, nil
}
return 0, errors.Errorf("invalid weekday: %q", dayOfWeek)
}
// Set the bandwidth timetable.
func (x *BwTimetable) Set(s string) error {
// The timetable is formatted as:
// "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,banwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off"
// If only a single bandwidth identifier is provided, we assume constant bandwidth.
if len(s) == 0 {
return errors.New("empty string")
}
// Single value without time specification.
if !strings.Contains(s, " ") && !strings.Contains(s, ",") {
ts := BwTimeSlot{}
if err := ts.Bandwidth.Set(s); err != nil {
return err
}
ts.DayOfTheWeek = 0
ts.HHMM = 0
*x = BwTimetable{ts}
return nil
}
for _, tok := range strings.Split(s, " ") {
tv := strings.Split(tok, ",")
// Format must be dayOfWeek-HH:MM,BW
if len(tv) != 2 {
return errors.Errorf("invalid time/bandwidth specification: %q", tok)
}
weekday := 0
HHMM := ""
if !strings.Contains(tv[0], "-") {
HHMM = tv[0]
if err := validateHour(HHMM); err != nil {
return err
}
for i := 0; i < 7; i++ {
hh, _ := strconv.Atoi(HHMM[0:2])
mm, _ := strconv.Atoi(HHMM[3:])
ts := BwTimeSlot{
DayOfTheWeek: i,
HHMM: (hh * 100) + mm,
}
if err := ts.Bandwidth.Set(tv[1]); err != nil {
return err
}
*x = append(*x, ts)
}
} else {
timespec := strings.Split(tv[0], "-")
if len(timespec) != 2 {
return errors.Errorf("invalid time specification: %q", tv[0])
}
var err error
weekday, err = parseWeekday(timespec[0])
if err != nil {
return err
}
HHMM = timespec[1]
if err := validateHour(HHMM); err != nil {
return err
}
hh, _ := strconv.Atoi(HHMM[0:2])
mm, _ := strconv.Atoi(HHMM[3:])
ts := BwTimeSlot{
DayOfTheWeek: weekday,
HHMM: (hh * 100) + mm,
}
// Bandwidth limit for this time slot.
if err := ts.Bandwidth.Set(tv[1]); err != nil {
return err
}
*x = append(*x, ts)
}
}
return nil
}
// Difference in minutes between lateDayOfWeekHHMM and earlyDayOfWeekHHMM
func timeDiff(lateDayOfWeekHHMM int, earlyDayOfWeekHHMM int) int {
lateTimeMinutes := (lateDayOfWeekHHMM / 10000) * 24 * 60
lateTimeMinutes += ((lateDayOfWeekHHMM / 100) % 100) * 60
lateTimeMinutes += lateDayOfWeekHHMM % 100
earlyTimeMinutes := (earlyDayOfWeekHHMM / 10000) * 24 * 60
earlyTimeMinutes += ((earlyDayOfWeekHHMM / 100) % 100) * 60
earlyTimeMinutes += earlyDayOfWeekHHMM % 100
return lateTimeMinutes - earlyTimeMinutes
}
// LimitAt returns a BwTimeSlot for the time requested.
func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot {
// If the timetable is empty, we return an unlimited BwTimeSlot starting at Sunday midnight.
if len(x) == 0 {
return BwTimeSlot{DayOfTheWeek: 0, HHMM: 0, Bandwidth: -1}
}
dayOfWeekHHMM := int(tt.Weekday())*10000 + tt.Hour()*100 + tt.Minute()
// By default, we return the last element in the timetable. This
// satisfies two conditions: 1) If there's only one element it
// will always be selected, and 2) The last element of the table
// will "wrap around" until overridden by an earlier time slot.
// there's only one time slot in the timetable.
ret := x[len(x)-1]
mindif := 0
first := true
// Look for most recent time slot.
for _, ts := range x {
// Ignore the past
if dayOfWeekHHMM < (ts.DayOfTheWeek*10000)+ts.HHMM {
continue
}
dif := timeDiff(dayOfWeekHHMM, (ts.DayOfTheWeek*10000)+ts.HHMM)
if first {
mindif = dif
first = false
}
if dif <= mindif {
mindif = dif
ret = ts
}
}
return ret
}
// Type of the value
func (x BwTimetable) Type() string {
return "BwTimetable"
}

131
vendor/github.com/ncw/rclone/fs/config.go generated vendored Executable file
View File

@@ -0,0 +1,131 @@
package fs
import (
"net"
"strings"
"time"
)
// Global
var (
// Config is the global config
Config = NewConfig()
// Read a value from the config file
//
// This is a function pointer to decouple the config
// implementation from the fs
ConfigFileGet = func(section, key string) (string, bool) { return "", false }
// Set a value into the config file
//
// This is a function pointer to decouple the config
// implementation from the fs
ConfigFileSet = func(section, key, value string) {
Errorf(nil, "No config handler to set %q = %q in section %q of the config file", key, value, section)
}
// CountError counts an error. If any errors have been
// counted then it will exit with a non zero error code.
//
// This is a function pointer to decouple the config
// implementation from the fs
CountError = func(err error) {}
// ConfigProvider is the config key used for provider options
ConfigProvider = "provider"
)
// ConfigInfo is filesystem config options
type ConfigInfo struct {
LogLevel LogLevel
StatsLogLevel LogLevel
DryRun bool
CheckSum bool
SizeOnly bool
IgnoreTimes bool
IgnoreExisting bool
IgnoreErrors bool
ModifyWindow time.Duration
Checkers int
Transfers int
ConnectTimeout time.Duration // Connect timeout
Timeout time.Duration // Data channel timeout
Dump DumpFlags
InsecureSkipVerify bool // Skip server certificate verification
DeleteMode DeleteMode
MaxDelete int64
TrackRenames bool // Track file renames.
LowLevelRetries int
UpdateOlder bool // Skip files that are newer on the destination
NoGzip bool // Disable compression
MaxDepth int
IgnoreSize bool
IgnoreChecksum bool
NoUpdateModTime bool
DataRateUnit string
BackupDir string
Suffix string
UseListR bool
BufferSize SizeSuffix
BwLimit BwTimetable
TPSLimit float64
TPSLimitBurst int
BindAddr net.IP
DisableFeatures []string
UserAgent string
Immutable bool
AutoConfirm bool
StreamingUploadCutoff SizeSuffix
StatsFileNameLength int
AskPassword bool
UseServerModTime bool
MaxTransfer SizeSuffix
MaxBacklog int
StatsOneLine bool
Progress bool
}
// NewConfig creates a new config with everything set to the default
// value. These are the ultimate defaults and are overriden by the
// config module.
func NewConfig() *ConfigInfo {
c := new(ConfigInfo)
// Set any values which aren't the zero for the type
c.LogLevel = LogLevelNotice
c.StatsLogLevel = LogLevelInfo
c.ModifyWindow = time.Nanosecond
c.Checkers = 8
c.Transfers = 4
c.ConnectTimeout = 60 * time.Second
c.Timeout = 5 * 60 * time.Second
c.DeleteMode = DeleteModeDefault
c.MaxDelete = -1
c.LowLevelRetries = 10
c.MaxDepth = -1
c.DataRateUnit = "bytes"
c.BufferSize = SizeSuffix(16 << 20)
c.UserAgent = "rclone/" + Version
c.StreamingUploadCutoff = SizeSuffix(100 * 1024)
c.StatsFileNameLength = 40
c.AskPassword = true
c.TPSLimitBurst = 1
c.MaxTransfer = -1
c.MaxBacklog = 10000
return c
}
// ConfigToEnv converts an config section and name, eg ("myremote",
// "ignore-size") into an environment name
// "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE"
func ConfigToEnv(section, name string) string {
return "RCLONE_CONFIG_" + strings.ToUpper(strings.Replace(section+"_"+name, "-", "_", -1))
}
// OptionToEnv converts an option name, eg "ignore-size" into an
// environment name "RCLONE_IGNORE_SIZE"
func OptionToEnv(name string) string {
return "RCLONE_" + strings.ToUpper(strings.Replace(name, "-", "_", -1))
}

1346
vendor/github.com/ncw/rclone/fs/config/config.go generated vendored Executable file

File diff suppressed because it is too large Load Diff

10
vendor/github.com/ncw/rclone/fs/config/config_other.go generated vendored Executable file
View File

@@ -0,0 +1,10 @@
// Read, write and edit the config file
// Non-unix specific functions.
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package config
// attemptCopyGroups tries to keep the group the same, which only makes sense
// for system with user-group-world permission model.
func attemptCopyGroup(fromPath, toPath string) {}

View File

@@ -0,0 +1,29 @@
// ReadPassword for OSes which are supported by golang.org/x/crypto/ssh/terminal
// See https://github.com/golang/go/issues/14441 - plan9
// https://github.com/golang/go/issues/13085 - solaris
// +build !solaris,!plan9
package config
import (
"fmt"
"log"
"os"
"golang.org/x/crypto/ssh/terminal"
)
// ReadPassword reads a password without echoing it to the terminal.
func ReadPassword() string {
stdin := int(os.Stdin.Fd())
if !terminal.IsTerminal(stdin) {
return ReadLine()
}
line, err := terminal.ReadPassword(stdin)
_, _ = fmt.Fprintln(os.Stderr)
if err != nil {
log.Fatalf("Failed to read password: %v", err)
}
return string(line)
}

View File

@@ -0,0 +1,12 @@
// ReadPassword for OSes which are not supported by golang.org/x/crypto/ssh/terminal
// See https://github.com/golang/go/issues/14441 - plan9
// https://github.com/golang/go/issues/13085 - solaris
// +build solaris plan9
package config
// ReadPassword reads a password with echoing it to the terminal.
func ReadPassword() string {
return ReadLine()
}

37
vendor/github.com/ncw/rclone/fs/config/config_unix.go generated vendored Executable file
View File

@@ -0,0 +1,37 @@
// Read, write and edit the config file
// Unix specific functions.
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package config
import (
"os"
"os/user"
"strconv"
"syscall"
"github.com/ncw/rclone/fs"
)
// attemptCopyGroups tries to keep the group the same. User will be the one
// who is currently running this process.
func attemptCopyGroup(fromPath, toPath string) {
info, err := os.Stat(fromPath)
if err != nil || info.Sys() == nil {
return
}
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
uid := int(stat.Uid)
// prefer self over previous owner of file, because it has a higher chance
// of success
if user, err := user.Current(); err == nil {
if tmpUID, err := strconv.Atoi(user.Uid); err == nil {
uid = tmpUID
}
}
if err = os.Chown(toPath, uid, int(stat.Gid)); err != nil {
fs.Debugf(nil, "Failed to keep previous owner of config file: %v", err)
}
}
}

View File

@@ -0,0 +1,86 @@
// Package configmap provides an abstraction for reading and writing config
package configmap
// Getter provides an interface to get config items
type Getter interface {
// Get should get an item with the key passed in and return
// the value. If the item is found then it should return true,
// otherwise false.
Get(key string) (value string, ok bool)
}
// Setter provides an interface to set config items
type Setter interface {
// Set should set an item into persistent config store.
Set(key, value string)
}
// Mapper provides an interface to read and write config
type Mapper interface {
Getter
Setter
}
// Map provides a wrapper around multiple Setter and
// Getter interfaces.
type Map struct {
setters []Setter
getters []Getter
}
// New returns an empty Map
func New() *Map {
return &Map{}
}
// AddGetter appends a getter onto the end of the getters
func (c *Map) AddGetter(getter Getter) *Map {
c.getters = append(c.getters, getter)
return c
}
// AddGetters appends multiple getters onto the end of the getters
func (c *Map) AddGetters(getters ...Getter) *Map {
c.getters = append(c.getters, getters...)
return c
}
// AddSetter appends a setter onto the end of the setters
func (c *Map) AddSetter(setter Setter) *Map {
c.setters = append(c.setters, setter)
return c
}
// Get gets an item with the key passed in and return the value from
// the first getter. If the item is found then it returns true,
// otherwise false.
func (c *Map) Get(key string) (value string, ok bool) {
for _, do := range c.getters {
value, ok = do.Get(key)
if ok {
return value, ok
}
}
return "", false
}
// Set sets an item into all the stored setters.
func (c *Map) Set(key, value string) {
for _, do := range c.setters {
do.Set(key, value)
}
}
// Simple is a simple Mapper for testing
type Simple map[string]string
// Get the value
func (c Simple) Get(key string) (value string, ok bool) {
value, ok = c[key]
return value, ok
}
// Set the value
func (c Simple) Set(key, value string) {
c[key] = value
}

View File

@@ -0,0 +1,127 @@
// Package configstruct parses unstructured maps into structures
package configstruct
import (
"fmt"
"reflect"
"regexp"
"strings"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/pkg/errors"
)
var matchUpper = regexp.MustCompile("([A-Z]+)")
// camelToSnake converts CamelCase to snake_case
func camelToSnake(in string) string {
out := matchUpper.ReplaceAllString(in, "_$1")
out = strings.ToLower(out)
out = strings.Trim(out, "_")
return out
}
// StringToInterface turns in into an interface{} the same type as def
func StringToInterface(def interface{}, in string) (newValue interface{}, err error) {
typ := reflect.TypeOf(def)
switch typ.Kind() {
case reflect.String:
// Pass strings unmodified
return in, nil
}
// Otherwise parse with Sscanln
//
// This means any types we use here must implement fmt.Scanner
o := reflect.New(typ)
n, err := fmt.Sscanln(in, o.Interface())
if err != nil {
return newValue, errors.Wrapf(err, "parsing %q as %T failed", in, def)
}
if n != 1 {
return newValue, errors.New("no items parsed")
}
return o.Elem().Interface(), nil
}
// Item descripts a single entry in the options structure
type Item struct {
Name string // snake_case
Field string // CamelCase
Num int // number of the field in the struct
Value interface{}
}
// Items parses the opt struct and returns a slice of Item objects.
//
// opt must be a pointer to a struct. The struct should have entirely
// public fields.
//
// The config_name is looked up in a struct tag called "config" or if
// not found is the field name converted from CamelCase to snake_case.
func Items(opt interface{}) (items []Item, err error) {
def := reflect.ValueOf(opt)
if def.Kind() != reflect.Ptr {
return nil, errors.New("argument must be a pointer")
}
def = def.Elem() // indirect the pointer
if def.Kind() != reflect.Struct {
return nil, errors.New("argument must be a pointer to a struct")
}
defType := def.Type()
for i := 0; i < def.NumField(); i++ {
field := defType.Field(i)
fieldName := field.Name
configName, ok := field.Tag.Lookup("config")
if !ok {
configName = camelToSnake(fieldName)
}
defaultItem := Item{
Name: configName,
Field: fieldName,
Num: i,
Value: def.Field(i).Interface(),
}
items = append(items, defaultItem)
}
return items, nil
}
// Set interprets the field names in defaults and looks up config
// values in the config passed in. Any values found in config will be
// set in the opt structure.
//
// opt must be a pointer to a struct. The struct should have entirely
// public fields. The field names are converted from CamelCase to
// snake_case and looked up in the config supplied or a
// `config:"field_name"` is looked up.
//
// If items are found then they are converted from string to native
// types and set in opt.
//
// All the field types in the struct must implement fmt.Scanner.
func Set(config configmap.Getter, opt interface{}) (err error) {
defaultItems, err := Items(opt)
if err != nil {
return err
}
defStruct := reflect.ValueOf(opt).Elem()
for _, defaultItem := range defaultItems {
newValue := defaultItem.Value
if configValue, ok := config.Get(defaultItem.Name); ok {
var newNewValue interface{}
newNewValue, err = StringToInterface(newValue, configValue)
if err != nil {
// Mask errors if setting an empty string as
// it isn't valid for all types. This makes
// empty string be the equivalent of unset.
if configValue != "" {
return errors.Wrapf(err, "couldn't parse config item %q = %q as %T", defaultItem.Name, configValue, defaultItem.Value)
}
} else {
newValue = newNewValue
}
}
defStruct.Field(defaultItem.Num).Set(reflect.ValueOf(newValue))
}
return nil
}

94
vendor/github.com/ncw/rclone/fs/config/obscure/obscure.go generated vendored Executable file
View File

@@ -0,0 +1,94 @@
// Package obscure contains the Obscure and Reveal commands
package obscure
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"io"
"log"
"github.com/pkg/errors"
)
// crypt internals
var (
cryptKey = []byte{
0x9c, 0x93, 0x5b, 0x48, 0x73, 0x0a, 0x55, 0x4d,
0x6b, 0xfd, 0x7c, 0x63, 0xc8, 0x86, 0xa9, 0x2b,
0xd3, 0x90, 0x19, 0x8e, 0xb8, 0x12, 0x8a, 0xfb,
0xf4, 0xde, 0x16, 0x2b, 0x8b, 0x95, 0xf6, 0x38,
}
cryptBlock cipher.Block
cryptRand = rand.Reader
)
// crypt transforms in to out using iv under AES-CTR.
//
// in and out may be the same buffer.
//
// Note encryption and decryption are the same operation
func crypt(out, in, iv []byte) error {
if cryptBlock == nil {
var err error
cryptBlock, err = aes.NewCipher(cryptKey)
if err != nil {
return err
}
}
stream := cipher.NewCTR(cryptBlock, iv)
stream.XORKeyStream(out, in)
return nil
}
// Obscure a value
//
// This is done by encrypting with AES-CTR
func Obscure(x string) (string, error) {
plaintext := []byte(x)
ciphertext := make([]byte, aes.BlockSize+len(plaintext))
iv := ciphertext[:aes.BlockSize]
if _, err := io.ReadFull(cryptRand, iv); err != nil {
return "", errors.Wrap(err, "failed to read iv")
}
if err := crypt(ciphertext[aes.BlockSize:], plaintext, iv); err != nil {
return "", errors.Wrap(err, "encrypt failed")
}
return base64.RawURLEncoding.EncodeToString(ciphertext), nil
}
// MustObscure obscures a value, exiting with a fatal error if it failed
func MustObscure(x string) string {
out, err := Obscure(x)
if err != nil {
log.Fatalf("Obscure failed: %v", err)
}
return out
}
// Reveal an obscured value
func Reveal(x string) (string, error) {
ciphertext, err := base64.RawURLEncoding.DecodeString(x)
if err != nil {
return "", errors.Wrap(err, "base64 decode failed when revealing password - is it obscured?")
}
if len(ciphertext) < aes.BlockSize {
return "", errors.New("input too short when revealing password - is it obscured?")
}
buf := ciphertext[aes.BlockSize:]
iv := ciphertext[:aes.BlockSize]
if err := crypt(buf, buf, iv); err != nil {
return "", errors.Wrap(err, "decrypt failed when revealing password - is it obscured?")
}
return string(buf), nil
}
// MustReveal reveals an obscured value, exiting with a fatal error if it failed
func MustReveal(x string) string {
out, err := Reveal(x)
if err != nil {
log.Fatalf("Reveal failed: %v", err)
}
return out
}

94
vendor/github.com/ncw/rclone/fs/config_list.go generated vendored Executable file
View File

@@ -0,0 +1,94 @@
package fs
import (
"bytes"
"encoding/csv"
"fmt"
)
// CommaSepList is a comma separated config value
// It uses the encoding/csv rules for quoting and escaping
type CommaSepList []string
// SpaceSepList is a space separated config value
// It uses the encoding/csv rules for quoting and escaping
type SpaceSepList []string
type genericList []string
func (l CommaSepList) String() string {
return genericList(l).string(',')
}
// Set the List entries
func (l *CommaSepList) Set(s string) error {
return (*genericList)(l).set(',', []byte(s))
}
// Type of the value
func (CommaSepList) Type() string {
return "[]string"
}
// Scan implements the fmt.Scanner interface
func (l *CommaSepList) Scan(s fmt.ScanState, ch rune) error {
return (*genericList)(l).scan(',', s, ch)
}
func (l SpaceSepList) String() string {
return genericList(l).string(' ')
}
// Set the List entries
func (l *SpaceSepList) Set(s string) error {
return (*genericList)(l).set(' ', []byte(s))
}
// Type of the value
func (SpaceSepList) Type() string {
return "[]string"
}
// Scan implements the fmt.Scanner interface
func (l *SpaceSepList) Scan(s fmt.ScanState, ch rune) error {
return (*genericList)(l).scan(' ', s, ch)
}
func (gl genericList) string(sep rune) string {
var buf bytes.Buffer
w := csv.NewWriter(&buf)
w.Comma = sep
err := w.Write(gl)
if err != nil {
// can only happen if w.Comma is invalid
panic(err)
}
w.Flush()
return string(bytes.TrimSpace(buf.Bytes()))
}
func (gl *genericList) set(sep rune, b []byte) error {
if len(b) == 0 {
*gl = nil
return nil
}
r := csv.NewReader(bytes.NewReader(b))
r.Comma = sep
record, err := r.Read()
switch _err := err.(type) {
case nil:
*gl = record
case *csv.ParseError:
err = _err.Err // remove line numbers from the error message
}
return err
}
func (gl *genericList) scan(sep rune, s fmt.ScanState, ch rune) error {
token, err := s.Token(true, func(rune) bool { return true })
if err != nil {
return err
}
return gl.set(sep, bytes.TrimSpace(token))
}

14
vendor/github.com/ncw/rclone/fs/deletemode.go generated vendored Executable file
View File

@@ -0,0 +1,14 @@
package fs
// DeleteMode describes the possible delete modes in the config
type DeleteMode byte
// DeleteMode constants
const (
DeleteModeOff DeleteMode = iota
DeleteModeBefore
DeleteModeDuring
DeleteModeAfter
DeleteModeOnly
DeleteModeDefault = DeleteModeAfter
)

97
vendor/github.com/ncw/rclone/fs/dir.go generated vendored Executable file
View File

@@ -0,0 +1,97 @@
package fs
import "time"
// Dir describes an unspecialized directory for directory/container/bucket lists
type Dir struct {
remote string // name of the directory
modTime time.Time // modification or creation time - IsZero for unknown
size int64 // size of directory and contents or -1 if unknown
items int64 // number of objects or -1 for unknown
id string // optional ID
}
// NewDir creates an unspecialized Directory object
func NewDir(remote string, modTime time.Time) *Dir {
return &Dir{
remote: remote,
modTime: modTime,
size: -1,
items: -1,
}
}
// NewDirCopy creates an unspecialized copy of the Directory object passed in
func NewDirCopy(d Directory) *Dir {
return &Dir{
remote: d.Remote(),
modTime: d.ModTime(),
size: d.Size(),
items: d.Items(),
}
}
// String returns the name
func (d *Dir) String() string {
return d.remote
}
// Remote returns the remote path
func (d *Dir) Remote() string {
return d.remote
}
// SetRemote sets the remote
func (d *Dir) SetRemote(remote string) *Dir {
d.remote = remote
return d
}
// ID gets the optional ID
func (d *Dir) ID() string {
return d.id
}
// SetID sets the optional ID
func (d *Dir) SetID(id string) *Dir {
d.id = id
return d
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (d *Dir) ModTime() time.Time {
if !d.modTime.IsZero() {
return d.modTime
}
return time.Now()
}
// Size returns the size of the file
func (d *Dir) Size() int64 {
return d.size
}
// SetSize sets the size of the directory
func (d *Dir) SetSize(size int64) *Dir {
d.size = size
return d
}
// Items returns the count of items in this directory or this
// directory and subdirectories if known, -1 for unknown
func (d *Dir) Items() int64 {
return d.items
}
// SetItems sets the number of items in the directory
func (d *Dir) SetItems(items int64) *Dir {
d.items = items
return d
}
// Check interfaces
var (
_ DirEntry = (*Dir)(nil)
_ Directory = (*Dir)(nil)
)

81
vendor/github.com/ncw/rclone/fs/direntries.go generated vendored Executable file
View File

@@ -0,0 +1,81 @@
package fs
import "fmt"
// DirEntries is a slice of Object or *Dir
type DirEntries []DirEntry
// Len is part of sort.Interface.
func (ds DirEntries) Len() int {
return len(ds)
}
// Swap is part of sort.Interface.
func (ds DirEntries) Swap(i, j int) {
ds[i], ds[j] = ds[j], ds[i]
}
// Less is part of sort.Interface.
func (ds DirEntries) Less(i, j int) bool {
return ds[i].Remote() < ds[j].Remote()
}
// ForObject runs the function supplied on every object in the entries
func (ds DirEntries) ForObject(fn func(o Object)) {
for _, entry := range ds {
o, ok := entry.(Object)
if ok {
fn(o)
}
}
}
// ForObjectError runs the function supplied on every object in the entries
func (ds DirEntries) ForObjectError(fn func(o Object) error) error {
for _, entry := range ds {
o, ok := entry.(Object)
if ok {
err := fn(o)
if err != nil {
return err
}
}
}
return nil
}
// ForDir runs the function supplied on every Directory in the entries
func (ds DirEntries) ForDir(fn func(dir Directory)) {
for _, entry := range ds {
dir, ok := entry.(Directory)
if ok {
fn(dir)
}
}
}
// ForDirError runs the function supplied on every Directory in the entries
func (ds DirEntries) ForDirError(fn func(dir Directory) error) error {
for _, entry := range ds {
dir, ok := entry.(Directory)
if ok {
err := fn(dir)
if err != nil {
return err
}
}
}
return nil
}
// DirEntryType returns a string description of the DirEntry, either
// "object", "directory" or "unknown type XXX"
func DirEntryType(d DirEntry) string {
switch d.(type) {
case Object:
return "object"
case Directory:
return "directory"
}
return fmt.Sprintf("unknown type %T", d)
}

14
vendor/github.com/ncw/rclone/fs/driveletter/driveletter.go generated vendored Executable file
View File

@@ -0,0 +1,14 @@
// Package driveletter returns whether a name is a valid drive letter
// +build !windows
package driveletter
// IsDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
//
// On non windows platforms we don't have drive letters so we always
// return false
func IsDriveLetter(name string) bool {
return false
}

View File

@@ -0,0 +1,13 @@
// +build windows
package driveletter
// IsDriveLetter returns a bool indicating whether name is a valid
// Windows drive letter
func IsDriveLetter(name string) bool {
if len(name) != 1 {
return false
}
c := name[0]
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}

93
vendor/github.com/ncw/rclone/fs/dump.go generated vendored Executable file
View File

@@ -0,0 +1,93 @@
package fs
import (
"fmt"
"strings"
"github.com/pkg/errors"
)
// DumpFlags describes the Dump options in force
type DumpFlags int
// DumpFlags definitions
const (
DumpHeaders DumpFlags = 1 << iota
DumpBodies
DumpRequests
DumpResponses
DumpAuth
DumpFilters
DumpGoRoutines
DumpOpenFiles
)
var dumpFlags = []struct {
flag DumpFlags
name string
}{
{DumpHeaders, "headers"},
{DumpBodies, "bodies"},
{DumpRequests, "requests"},
{DumpResponses, "responses"},
{DumpAuth, "auth"},
{DumpFilters, "filters"},
{DumpGoRoutines, "goroutines"},
{DumpOpenFiles, "openfiles"},
}
// DumpFlagsList is a list of dump flags used in the help
var DumpFlagsList string
func init() {
// calculate the dump flags list
var out []string
for _, info := range dumpFlags {
out = append(out, info.name)
}
DumpFlagsList = strings.Join(out, ",")
}
// String turns a DumpFlags into a string
func (f DumpFlags) String() string {
var out []string
for _, info := range dumpFlags {
if f&info.flag != 0 {
out = append(out, info.name)
f &^= info.flag
}
}
if f != 0 {
out = append(out, fmt.Sprintf("Unknown-0x%X", int(f)))
}
return strings.Join(out, ",")
}
// Set a DumpFlags as a comma separated list of flags
func (f *DumpFlags) Set(s string) error {
var flags DumpFlags
parts := strings.Split(s, ",")
for _, part := range parts {
found := false
part = strings.ToLower(strings.TrimSpace(part))
if part == "" {
continue
}
for _, info := range dumpFlags {
if part == info.name {
found = true
flags |= info.flag
}
}
if !found {
return errors.Errorf("Unknown dump flag %q", part)
}
}
*f = flags
return nil
}
// Type of the value
func (f *DumpFlags) Type() string {
return "string"
}

498
vendor/github.com/ncw/rclone/fs/filter/filter.go generated vendored Executable file
View File

@@ -0,0 +1,498 @@
// Package filter controls the filtering of files
package filter
import (
"bufio"
"fmt"
"log"
"os"
"path"
"regexp"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// Active is the globally active filter
var Active = mustNewFilter(nil)
// rule is one filter rule
type rule struct {
Include bool
Regexp *regexp.Regexp
}
// Match returns true if rule matches path
func (r *rule) Match(path string) bool {
return r.Regexp.MatchString(path)
}
// String the rule
func (r *rule) String() string {
c := "-"
if r.Include {
c = "+"
}
return fmt.Sprintf("%s %s", c, r.Regexp.String())
}
// rules is a slice of rules
type rules struct {
rules []rule
existing map[string]struct{}
}
// add adds a rule if it doesn't exist already
func (rs *rules) add(Include bool, re *regexp.Regexp) {
if rs.existing == nil {
rs.existing = make(map[string]struct{})
}
newRule := rule{
Include: Include,
Regexp: re,
}
newRuleString := newRule.String()
if _, ok := rs.existing[newRuleString]; ok {
return // rule already exists
}
rs.rules = append(rs.rules, newRule)
rs.existing[newRuleString] = struct{}{}
}
// clear clears all the rules
func (rs *rules) clear() {
rs.rules = nil
rs.existing = nil
}
// len returns the number of rules
func (rs *rules) len() int {
return len(rs.rules)
}
// FilesMap describes the map of files to transfer
type FilesMap map[string]struct{}
// Opt configues the filter
type Opt struct {
DeleteExcluded bool
FilterRule []string
FilterFrom []string
ExcludeRule []string
ExcludeFrom []string
ExcludeFile string
IncludeRule []string
IncludeFrom []string
FilesFrom []string
MinAge fs.Duration
MaxAge fs.Duration
MinSize fs.SizeSuffix
MaxSize fs.SizeSuffix
}
// DefaultOpt is the default config for the filter
var DefaultOpt = Opt{
MinAge: fs.DurationOff,
MaxAge: fs.DurationOff,
MinSize: fs.SizeSuffix(-1),
MaxSize: fs.SizeSuffix(-1),
}
// Filter describes any filtering in operation
type Filter struct {
Opt Opt
ModTimeFrom time.Time
ModTimeTo time.Time
fileRules rules
dirRules rules
files FilesMap // files if filesFrom
dirs FilesMap // dirs from filesFrom
}
// NewFilter parses the command line options and creates a Filter
// object. If opt is nil, then DefaultOpt will be used
func NewFilter(opt *Opt) (f *Filter, err error) {
f = &Filter{}
// Make a copy of the options
if opt != nil {
f.Opt = *opt
} else {
f.Opt = DefaultOpt
}
// Filter flags
if f.Opt.MinAge.IsSet() {
f.ModTimeTo = time.Now().Add(-time.Duration(f.Opt.MinAge))
fs.Debugf(nil, "--min-age %v to %v", f.Opt.MinAge, f.ModTimeTo)
}
if f.Opt.MaxAge.IsSet() {
f.ModTimeFrom = time.Now().Add(-time.Duration(f.Opt.MaxAge))
if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) {
log.Fatal("filter: --min-age can't be larger than --max-age")
}
fs.Debugf(nil, "--max-age %v to %v", f.Opt.MaxAge, f.ModTimeFrom)
}
addImplicitExclude := false
foundExcludeRule := false
for _, rule := range f.Opt.IncludeRule {
err = f.Add(true, rule)
if err != nil {
return nil, err
}
addImplicitExclude = true
}
for _, rule := range f.Opt.IncludeFrom {
err := forEachLine(rule, func(line string) error {
return f.Add(true, line)
})
if err != nil {
return nil, err
}
addImplicitExclude = true
}
for _, rule := range f.Opt.ExcludeRule {
err = f.Add(false, rule)
if err != nil {
return nil, err
}
foundExcludeRule = true
}
for _, rule := range f.Opt.ExcludeFrom {
err := forEachLine(rule, func(line string) error {
return f.Add(false, line)
})
if err != nil {
return nil, err
}
foundExcludeRule = true
}
if addImplicitExclude && foundExcludeRule {
fs.Errorf(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
}
for _, rule := range f.Opt.FilterRule {
err = f.AddRule(rule)
if err != nil {
return nil, err
}
}
for _, rule := range f.Opt.FilterFrom {
err := forEachLine(rule, f.AddRule)
if err != nil {
return nil, err
}
}
for _, rule := range f.Opt.FilesFrom {
f.initAddFile() // init to show --files-from set even if no files within
err := forEachLine(rule, func(line string) error {
return f.AddFile(line)
})
if err != nil {
return nil, err
}
}
if addImplicitExclude {
err = f.Add(false, "/**")
if err != nil {
return nil, err
}
}
if fs.Config.Dump&fs.DumpFilters != 0 {
fmt.Println("--- start filters ---")
fmt.Println(f.DumpFilters())
fmt.Println("--- end filters ---")
}
return f, nil
}
func mustNewFilter(opt *Opt) *Filter {
f, err := NewFilter(opt)
if err != nil {
panic(err)
}
return f
}
// addDirGlobs adds directory globs from the file glob passed in
func (f *Filter) addDirGlobs(Include bool, glob string) error {
for _, dirGlob := range globToDirGlobs(glob) {
// Don't add "/" as we always include the root
if dirGlob == "/" {
continue
}
dirRe, err := globToRegexp(dirGlob)
if err != nil {
return err
}
f.dirRules.add(Include, dirRe)
}
return nil
}
// Add adds a filter rule with include or exclude status indicated
func (f *Filter) Add(Include bool, glob string) error {
isDirRule := strings.HasSuffix(glob, "/")
isFileRule := !isDirRule
if strings.Contains(glob, "**") {
isDirRule, isFileRule = true, true
}
re, err := globToRegexp(glob)
if err != nil {
return err
}
if isFileRule {
f.fileRules.add(Include, re)
// If include rule work out what directories are needed to scan
// if exclude rule, we can't rule anything out
// Unless it is `*` which matches everything
// NB ** and /** are DirRules
if Include || glob == "*" {
err = f.addDirGlobs(Include, glob)
if err != nil {
return err
}
}
}
if isDirRule {
f.dirRules.add(Include, re)
}
return nil
}
// AddRule adds a filter rule with include/exclude indicated by the prefix
//
// These are
//
// + glob
// - glob
// !
//
// '+' includes the glob, '-' excludes it and '!' resets the filter list
//
// Line comments may be introduced with '#' or ';'
func (f *Filter) AddRule(rule string) error {
switch {
case rule == "!":
f.Clear()
return nil
case strings.HasPrefix(rule, "- "):
return f.Add(false, rule[2:])
case strings.HasPrefix(rule, "+ "):
return f.Add(true, rule[2:])
}
return errors.Errorf("malformed rule %q", rule)
}
// initAddFile creates f.files and f.dirs
func (f *Filter) initAddFile() {
if f.files == nil {
f.files = make(FilesMap)
f.dirs = make(FilesMap)
}
}
// AddFile adds a single file to the files from list
func (f *Filter) AddFile(file string) error {
f.initAddFile()
file = strings.Trim(file, "/")
f.files[file] = struct{}{}
// Put all the parent directories into f.dirs
for {
file = path.Dir(file)
if file == "." {
break
}
if _, found := f.dirs[file]; found {
break
}
f.dirs[file] = struct{}{}
}
return nil
}
// Files returns all the files from the `--files-from` list
//
// It may be nil if the list is empty
func (f *Filter) Files() FilesMap {
return f.files
}
// Clear clears all the filter rules
func (f *Filter) Clear() {
f.fileRules.clear()
f.dirRules.clear()
}
// InActive returns false if any filters are active
func (f *Filter) InActive() bool {
return (f.files == nil &&
f.ModTimeFrom.IsZero() &&
f.ModTimeTo.IsZero() &&
f.Opt.MinSize < 0 &&
f.Opt.MaxSize < 0 &&
f.fileRules.len() == 0 &&
f.dirRules.len() == 0 &&
len(f.Opt.ExcludeFile) == 0)
}
// includeRemote returns whether this remote passes the filter rules.
func (f *Filter) includeRemote(remote string) bool {
for _, rule := range f.fileRules.rules {
if rule.Match(remote) {
return rule.Include
}
}
return true
}
// ListContainsExcludeFile checks if exclude file is present in the list.
func (f *Filter) ListContainsExcludeFile(entries fs.DirEntries) bool {
if len(f.Opt.ExcludeFile) == 0 {
return false
}
for _, entry := range entries {
obj, ok := entry.(fs.Object)
if ok {
basename := path.Base(obj.Remote())
if basename == f.Opt.ExcludeFile {
return true
}
}
}
return false
}
// IncludeDirectory returns a function which checks whether this
// directory should be included in the sync or not.
func (f *Filter) IncludeDirectory(fs fs.Fs) func(string) (bool, error) {
return func(remote string) (bool, error) {
remote = strings.Trim(remote, "/")
// first check if we need to remove directory based on
// the exclude file
excl, err := f.DirContainsExcludeFile(fs, remote)
if err != nil {
return false, err
}
if excl {
return false, nil
}
// filesFrom takes precedence
if f.files != nil {
_, include := f.dirs[remote]
return include, nil
}
remote += "/"
for _, rule := range f.dirRules.rules {
if rule.Match(remote) {
return rule.Include, nil
}
}
return true, nil
}
}
// DirContainsExcludeFile checks if exclude file is present in a
// directroy. If fs is nil, it works properly if ExcludeFile is an
// empty string (for testing).
func (f *Filter) DirContainsExcludeFile(fremote fs.Fs, remote string) (bool, error) {
if len(f.Opt.ExcludeFile) > 0 {
exists, err := fs.FileExists(fremote, path.Join(remote, f.Opt.ExcludeFile))
if err != nil {
return false, err
}
if exists {
return true, nil
}
}
return false, nil
}
// Include returns whether this object should be included into the
// sync or not
func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
// filesFrom takes precedence
if f.files != nil {
_, include := f.files[remote]
return include
}
if !f.ModTimeFrom.IsZero() && modTime.Before(f.ModTimeFrom) {
return false
}
if !f.ModTimeTo.IsZero() && modTime.After(f.ModTimeTo) {
return false
}
if f.Opt.MinSize >= 0 && size < int64(f.Opt.MinSize) {
return false
}
if f.Opt.MaxSize >= 0 && size > int64(f.Opt.MaxSize) {
return false
}
return f.includeRemote(remote)
}
// IncludeObject returns whether this object should be included into
// the sync or not. This is a convenience function to avoid calling
// o.ModTime(), which is an expensive operation.
func (f *Filter) IncludeObject(o fs.Object) bool {
var modTime time.Time
if !f.ModTimeFrom.IsZero() || !f.ModTimeTo.IsZero() {
modTime = o.ModTime()
} else {
modTime = time.Unix(0, 0)
}
return f.Include(o.Remote(), o.Size(), modTime)
}
// forEachLine calls fn on every line in the file pointed to by path
//
// It ignores empty lines and lines starting with '#' or ';'
func forEachLine(path string, fn func(string) error) (err error) {
in, err := os.Open(path)
if err != nil {
return err
}
defer fs.CheckClose(in, &err)
scanner := bufio.NewScanner(in)
for scanner.Scan() {
line := scanner.Text()
line = strings.TrimSpace(line)
if len(line) == 0 || line[0] == '#' || line[0] == ';' {
continue
}
err := fn(line)
if err != nil {
return err
}
}
return scanner.Err()
}
// DumpFilters dumps the filters in textual form, 1 per line
func (f *Filter) DumpFilters() string {
rules := []string{}
if !f.ModTimeFrom.IsZero() {
rules = append(rules, fmt.Sprintf("Last-modified date must be equal or greater than: %s", f.ModTimeFrom.String()))
}
if !f.ModTimeTo.IsZero() {
rules = append(rules, fmt.Sprintf("Last-modified date must be equal or less than: %s", f.ModTimeTo.String()))
}
rules = append(rules, "--- File filter rules ---")
for _, rule := range f.fileRules.rules {
rules = append(rules, rule.String())
}
rules = append(rules, "--- Directory filter rules ---")
for _, dirRule := range f.dirRules.rules {
rules = append(rules, dirRule.String())
}
return strings.Join(rules, "\n")
}

166
vendor/github.com/ncw/rclone/fs/filter/glob.go generated vendored Executable file
View File

@@ -0,0 +1,166 @@
// rsync style glob parser
package filter
import (
"bytes"
"regexp"
"strings"
"github.com/pkg/errors"
)
// globToRegexp converts an rsync style glob to a regexp
//
// documented in filtering.md
func globToRegexp(glob string) (*regexp.Regexp, error) {
var re bytes.Buffer
if strings.HasPrefix(glob, "/") {
glob = glob[1:]
_, _ = re.WriteRune('^')
} else {
_, _ = re.WriteString("(^|/)")
}
consecutiveStars := 0
insertStars := func() error {
if consecutiveStars > 0 {
switch consecutiveStars {
case 1:
_, _ = re.WriteString(`[^/]*`)
case 2:
_, _ = re.WriteString(`.*`)
default:
return errors.Errorf("too many stars in %q", glob)
}
}
consecutiveStars = 0
return nil
}
inBraces := false
inBrackets := 0
slashed := false
for _, c := range glob {
if slashed {
_, _ = re.WriteRune(c)
slashed = false
continue
}
if c != '*' {
err := insertStars()
if err != nil {
return nil, err
}
}
if inBrackets > 0 {
_, _ = re.WriteRune(c)
if c == '[' {
inBrackets++
}
if c == ']' {
inBrackets--
}
continue
}
switch c {
case '\\':
_, _ = re.WriteRune(c)
slashed = true
case '*':
consecutiveStars++
case '?':
_, _ = re.WriteString(`[^/]`)
case '[':
_, _ = re.WriteRune(c)
inBrackets++
case ']':
return nil, errors.Errorf("mismatched ']' in glob %q", glob)
case '{':
if inBraces {
return nil, errors.Errorf("can't nest '{' '}' in glob %q", glob)
}
inBraces = true
_, _ = re.WriteRune('(')
case '}':
if !inBraces {
return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
}
_, _ = re.WriteRune(')')
inBraces = false
case ',':
if inBraces {
_, _ = re.WriteRune('|')
} else {
_, _ = re.WriteRune(c)
}
case '.', '+', '(', ')', '|', '^', '$': // regexp meta characters not dealt with above
_, _ = re.WriteRune('\\')
_, _ = re.WriteRune(c)
default:
_, _ = re.WriteRune(c)
}
}
err := insertStars()
if err != nil {
return nil, err
}
if inBrackets > 0 {
return nil, errors.Errorf("mismatched '[' and ']' in glob %q", glob)
}
if inBraces {
return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob)
}
_, _ = re.WriteRune('$')
result, err := regexp.Compile(re.String())
if err != nil {
return nil, errors.Wrapf(err, "bad glob pattern %q (regexp %q)", glob, re.String())
}
return result, nil
}
var (
// Can't deal with / or ** in {}
tooHardRe = regexp.MustCompile(`{[^{}]*(\*\*|/)[^{}]*}`)
// Squash all /
squashSlash = regexp.MustCompile(`/{2,}`)
)
// globToDirGlobs takes a file glob and turns it into a series of
// directory globs. When matched with a directory (with a trailing /)
// this should answer the question as to whether this glob could be in
// this directory.
func globToDirGlobs(glob string) (out []string) {
if tooHardRe.MatchString(glob) {
// Can't figure this one out so return any directory might match
out = append(out, "/**")
return out
}
// Get rid of multiple /s
glob = squashSlash.ReplaceAllString(glob, "/")
// Split on / or **
// (** can contain /)
for {
i := strings.LastIndex(glob, "/")
j := strings.LastIndex(glob, "**")
what := ""
if j > i {
i = j
what = "**"
}
if i < 0 {
if len(out) == 0 {
out = append(out, "/**")
}
break
}
glob = glob[:i]
newGlob := glob + what + "/"
if len(out) == 0 || out[len(out)-1] != newGlob {
out = append(out, newGlob)
}
}
return out
}

1063
vendor/github.com/ncw/rclone/fs/fs.go generated vendored Executable file

File diff suppressed because it is too large Load Diff

292
vendor/github.com/ncw/rclone/fs/fserrors/error.go generated vendored Executable file
View File

@@ -0,0 +1,292 @@
// Package fserrors provides errors and error handling
package fserrors
import (
"fmt"
"io"
"net/http"
"reflect"
"strings"
"github.com/pkg/errors"
)
// Retrier is an optional interface for error as to whether the
// operation should be retried at a high level.
//
// This should be returned from Update or Put methods as required
type Retrier interface {
error
Retry() bool
}
// retryError is a type of error
type retryError string
// Error interface
func (r retryError) Error() string {
return string(r)
}
// Retry interface
func (r retryError) Retry() bool {
return true
}
// Check interface
var _ Retrier = retryError("")
// RetryErrorf makes an error which indicates it would like to be retried
func RetryErrorf(format string, a ...interface{}) error {
return retryError(fmt.Sprintf(format, a...))
}
// wrappedRetryError is an error wrapped so it will satisfy the
// Retrier interface and return true
type wrappedRetryError struct {
error
}
// Retry interface
func (err wrappedRetryError) Retry() bool {
return true
}
// Check interface
var _ Retrier = wrappedRetryError{error(nil)}
// RetryError makes an error which indicates it would like to be retried
func RetryError(err error) error {
if err == nil {
err = errors.New("needs retry")
}
return wrappedRetryError{err}
}
// IsRetryError returns true if err conforms to the Retry interface
// and calling the Retry method returns true.
func IsRetryError(err error) bool {
if err == nil {
return false
}
_, err = Cause(err)
if r, ok := err.(Retrier); ok {
return r.Retry()
}
return false
}
// Fataler is an optional interface for error as to whether the
// operation should cause the entire operation to finish immediately.
//
// This should be returned from Update or Put methods as required
type Fataler interface {
error
Fatal() bool
}
// wrappedFatalError is an error wrapped so it will satisfy the
// Retrier interface and return true
type wrappedFatalError struct {
error
}
// Fatal interface
func (err wrappedFatalError) Fatal() bool {
return true
}
// Check interface
var _ Fataler = wrappedFatalError{error(nil)}
// FatalError makes an error which indicates it is a fatal error and
// the sync should stop.
func FatalError(err error) error {
if err == nil {
err = errors.New("fatal error")
}
return wrappedFatalError{err}
}
// IsFatalError returns true if err conforms to the Fatal interface
// and calling the Fatal method returns true.
func IsFatalError(err error) bool {
if err == nil {
return false
}
_, err = Cause(err)
if r, ok := err.(Fataler); ok {
return r.Fatal()
}
return false
}
// NoRetrier is an optional interface for error as to whether the
// operation should not be retried at a high level.
//
// If only NoRetry errors are returned in a sync then the sync won't
// be retried.
//
// This should be returned from Update or Put methods as required
type NoRetrier interface {
error
NoRetry() bool
}
// wrappedNoRetryError is an error wrapped so it will satisfy the
// Retrier interface and return true
type wrappedNoRetryError struct {
error
}
// NoRetry interface
func (err wrappedNoRetryError) NoRetry() bool {
return true
}
// Check interface
var _ NoRetrier = wrappedNoRetryError{error(nil)}
// NoRetryError makes an error which indicates the sync shouldn't be
// retried.
func NoRetryError(err error) error {
return wrappedNoRetryError{err}
}
// IsNoRetryError returns true if err conforms to the NoRetry
// interface and calling the NoRetry method returns true.
func IsNoRetryError(err error) bool {
if err == nil {
return false
}
_, err = Cause(err)
if r, ok := err.(NoRetrier); ok {
return r.NoRetry()
}
return false
}
// Cause is a souped up errors.Cause which can unwrap some standard
// library errors too. It returns true if any of the intermediate
// errors had a Timeout() or Temporary() method which returned true.
func Cause(cause error) (retriable bool, err error) {
err = cause
for prev := err; err != nil; prev = err {
// Check for net error Timeout()
if x, ok := err.(interface {
Timeout() bool
}); ok && x.Timeout() {
retriable = true
}
// Check for net error Temporary()
if x, ok := err.(interface {
Temporary() bool
}); ok && x.Temporary() {
retriable = true
}
// Unwrap 1 level if possible
err = errors.Cause(err)
if err == nil {
// errors.Cause can return nil which isn't
// desirable so pick the previous error in
// this case.
err = prev
}
if err == prev {
// Unpack any struct or *struct with a field
// of name Err which satisfies the error
// interface. This includes *url.Error,
// *net.OpError, *os.SyscallError and many
// others in the stdlib
errType := reflect.TypeOf(err)
errValue := reflect.ValueOf(err)
if errValue.IsValid() && errType.Kind() == reflect.Ptr {
errType = errType.Elem()
errValue = errValue.Elem()
}
if errValue.IsValid() && errType.Kind() == reflect.Struct {
if errField := errValue.FieldByName("Err"); errField.IsValid() {
errFieldValue := errField.Interface()
if newErr, ok := errFieldValue.(error); ok {
err = newErr
}
}
}
}
if err == prev {
break
}
}
return retriable, err
}
// retriableErrorStrings is a list of phrases which when we find it
// in an an error, we know it is a networking error which should be
// retried.
//
// This is incredibly ugly - if only errors.Cause worked for all
// errors and all errors were exported from the stdlib.
var retriableErrorStrings = []string{
"use of closed network connection", // internal/poll/fd.go
"unexpected EOF reading trailer", // net/http/transfer.go
"transport connection broken", // net/http/transport.go
"http: ContentLength=", // net/http/transfer.go
}
// Errors which indicate networking errors which should be retried
//
// These are added to in retriable_errors*.go
var retriableErrors = []error{
io.EOF,
io.ErrUnexpectedEOF,
}
// ShouldRetry looks at an error and tries to work out if retrying the
// operation that caused it would be a good idea. It returns true if
// the error implements Timeout() or Temporary() or if the error
// indicates a premature closing of the connection.
func ShouldRetry(err error) bool {
if err == nil {
return false
}
// Find root cause if available
retriable, err := Cause(err)
if retriable {
return true
}
// Check if it is a retriable error
for _, retriableErr := range retriableErrors {
if err == retriableErr {
return true
}
}
// Check error strings (yuch!) too
errString := err.Error()
for _, phrase := range retriableErrorStrings {
if strings.Contains(errString, phrase) {
return true
}
}
return false
}
// ShouldRetryHTTP returns a boolean as to whether this resp deserves.
// It checks to see if the HTTP response code is in the slice
// retryErrorCodes.
func ShouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
if resp == nil {
return false
}
for _, e := range retryErrorCodes {
if resp.StatusCode == e {
return true
}
}
return false
}

21
vendor/github.com/ncw/rclone/fs/fserrors/retriable_errors.go generated vendored Executable file
View File

@@ -0,0 +1,21 @@
// +build !plan9
package fserrors
import (
"syscall"
)
func init() {
retriableErrors = append(retriableErrors,
syscall.EPIPE,
syscall.ETIMEDOUT,
syscall.ECONNREFUSED,
syscall.EHOSTDOWN,
syscall.EHOSTUNREACH,
syscall.ECONNABORTED,
syscall.EAGAIN,
syscall.EWOULDBLOCK,
syscall.ECONNRESET,
)
}

View File

@@ -0,0 +1,31 @@
// +build windows
package fserrors
import (
"syscall"
)
const (
WSAECONNABORTED syscall.Errno = 10053
WSAHOST_NOT_FOUND syscall.Errno = 11001
WSATRY_AGAIN syscall.Errno = 11002
WSAENETRESET syscall.Errno = 10052
WSAETIMEDOUT syscall.Errno = 10060
)
func init() {
// append some lower level errors since the standardized ones
// don't seem to happen
retriableErrors = append(retriableErrors,
syscall.WSAECONNRESET,
WSAECONNABORTED,
WSAHOST_NOT_FOUND,
WSATRY_AGAIN,
WSAENETRESET,
WSAETIMEDOUT,
syscall.ERROR_HANDLE_EOF,
syscall.ERROR_NETNAME_DELETED,
syscall.ERROR_BROKEN_PIPE,
)
}

312
vendor/github.com/ncw/rclone/fs/fshttp/http.go generated vendored Executable file
View File

@@ -0,0 +1,312 @@
// Package fshttp contains the common http parts of the config, Transport and Client
package fshttp
import (
"bytes"
"context"
"crypto/tls"
"net"
"net/http"
"net/http/httputil"
"reflect"
"sync"
"time"
"github.com/ncw/rclone/fs"
"golang.org/x/time/rate"
)
const (
separatorReq = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
separatorResp = "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
)
var (
transport http.RoundTripper
noTransport sync.Once
tpsBucket *rate.Limiter // for limiting number of http transactions per second
)
// StartHTTPTokenBucket starts the token bucket if necessary
func StartHTTPTokenBucket() {
if fs.Config.TPSLimit > 0 {
tpsBurst := fs.Config.TPSLimitBurst
if tpsBurst < 1 {
tpsBurst = 1
}
tpsBucket = rate.NewLimiter(rate.Limit(fs.Config.TPSLimit), tpsBurst)
fs.Infof(nil, "Starting HTTP transaction limiter: max %g transactions/s with burst %d", fs.Config.TPSLimit, tpsBurst)
}
}
// A net.Conn that sets a deadline for every Read or Write operation
type timeoutConn struct {
net.Conn
timeout time.Duration
}
// create a timeoutConn using the timeout
func newTimeoutConn(conn net.Conn, timeout time.Duration) (c *timeoutConn, err error) {
c = &timeoutConn{
Conn: conn,
timeout: timeout,
}
err = c.nudgeDeadline()
return
}
// Nudge the deadline for an idle timeout on by c.timeout if non-zero
func (c *timeoutConn) nudgeDeadline() (err error) {
if c.timeout == 0 {
return nil
}
when := time.Now().Add(c.timeout)
return c.Conn.SetDeadline(when)
}
// readOrWrite bytes doing idle timeouts
func (c *timeoutConn) readOrWrite(f func([]byte) (int, error), b []byte) (n int, err error) {
n, err = f(b)
// Don't nudge if no bytes or an error
if n == 0 || err != nil {
return
}
// Nudge the deadline on successful Read or Write
err = c.nudgeDeadline()
return
}
// Read bytes doing idle timeouts
func (c *timeoutConn) Read(b []byte) (n int, err error) {
return c.readOrWrite(c.Conn.Read, b)
}
// Write bytes doing idle timeouts
func (c *timeoutConn) Write(b []byte) (n int, err error) {
return c.readOrWrite(c.Conn.Write, b)
}
// setDefaults for a from b
//
// Copy the public members from b to a. We can't just use a struct
// copy as Transport contains a private mutex.
func setDefaults(a, b interface{}) {
pt := reflect.TypeOf(a)
t := pt.Elem()
va := reflect.ValueOf(a).Elem()
vb := reflect.ValueOf(b).Elem()
for i := 0; i < t.NumField(); i++ {
aField := va.Field(i)
// Set a from b if it is public
if aField.CanSet() {
bField := vb.Field(i)
aField.Set(bField)
}
}
}
// dial with context and timeouts
func dialContextTimeout(ctx context.Context, network, address string, ci *fs.ConfigInfo) (net.Conn, error) {
dialer := NewDialer(ci)
c, err := dialer.DialContext(ctx, network, address)
if err != nil {
return c, err
}
return newTimeoutConn(c, ci.Timeout)
}
// NewTransport returns an http.RoundTripper with the correct timeouts
func NewTransport(ci *fs.ConfigInfo) http.RoundTripper {
noTransport.Do(func() {
// Start with a sensible set of defaults then override.
// This also means we get new stuff when it gets added to go
t := new(http.Transport)
setDefaults(t, http.DefaultTransport.(*http.Transport))
t.Proxy = http.ProxyFromEnvironment
t.MaxIdleConnsPerHost = 2 * (ci.Checkers + ci.Transfers + 1)
t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost
t.TLSHandshakeTimeout = ci.ConnectTimeout
t.ResponseHeaderTimeout = ci.Timeout
t.TLSClientConfig = &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify}
t.DisableCompression = ci.NoGzip
t.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
return dialContextTimeout(ctx, network, addr, ci)
}
t.IdleConnTimeout = 60 * time.Second
t.ExpectContinueTimeout = ci.ConnectTimeout
// Wrap that http.Transport in our own transport
transport = newTransport(ci, t)
})
return transport
}
// NewClient returns an http.Client with the correct timeouts
func NewClient(ci *fs.ConfigInfo) *http.Client {
return &http.Client{
Transport: NewTransport(ci),
}
}
// Transport is a our http Transport which wraps an http.Transport
// * Sets the User Agent
// * Does logging
type Transport struct {
*http.Transport
dump fs.DumpFlags
filterRequest func(req *http.Request)
userAgent string
}
// newTransport wraps the http.Transport passed in and logs all
// roundtrips including the body if logBody is set.
func newTransport(ci *fs.ConfigInfo, transport *http.Transport) *Transport {
return &Transport{
Transport: transport,
dump: ci.Dump,
userAgent: ci.UserAgent,
}
}
// SetRequestFilter sets a filter to be used on each request
func (t *Transport) SetRequestFilter(f func(req *http.Request)) {
t.filterRequest = f
}
// A mutex to protect this map
var checkedHostMu sync.RWMutex
// A map of servers we have checked for time
var checkedHost = make(map[string]struct{}, 1)
// Check the server time is the same as ours, once for each server
func checkServerTime(req *http.Request, resp *http.Response) {
host := req.URL.Host
if req.Host != "" {
host = req.Host
}
checkedHostMu.RLock()
_, ok := checkedHost[host]
checkedHostMu.RUnlock()
if ok {
return
}
dateString := resp.Header.Get("Date")
if dateString == "" {
return
}
date, err := http.ParseTime(dateString)
if err != nil {
fs.Debugf(nil, "Couldn't parse Date: from server %s: %q: %v", host, dateString, err)
return
}
dt := time.Since(date)
const window = 5 * 60 * time.Second
if dt > window || dt < -window {
fs.Logf(nil, "Time may be set wrong - time from %q is %v different from this computer", host, dt)
}
checkedHostMu.Lock()
checkedHost[host] = struct{}{}
checkedHostMu.Unlock()
}
// cleanAuth gets rid of one authBuf header within the first 4k
func cleanAuth(buf, authBuf []byte) []byte {
// Find how much buffer to check
n := 4096
if len(buf) < n {
n = len(buf)
}
// See if there is an Authorization: header
i := bytes.Index(buf[:n], authBuf)
if i < 0 {
return buf
}
i += len(authBuf)
// Overwrite the next 4 chars with 'X'
for j := 0; i < len(buf) && j < 4; j++ {
if buf[i] == '\n' {
break
}
buf[i] = 'X'
i++
}
// Snip out to the next '\n'
j := bytes.IndexByte(buf[i:], '\n')
if j < 0 {
return buf[:i]
}
n = copy(buf[i:], buf[i+j:])
return buf[:i+n]
}
var authBufs = [][]byte{
[]byte("Authorization: "),
[]byte("X-Auth-Token: "),
}
// cleanAuths gets rid of all the possible Auth headers
func cleanAuths(buf []byte) []byte {
for _, authBuf := range authBufs {
buf = cleanAuth(buf, authBuf)
}
return buf
}
// RoundTrip implements the RoundTripper interface.
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
// Get transactions per second token first if limiting
if tpsBucket != nil {
tbErr := tpsBucket.Wait(req.Context())
if tbErr != nil {
fs.Errorf(nil, "HTTP token bucket error: %v", err)
}
}
// Force user agent
req.Header.Set("User-Agent", t.userAgent)
// Filter the request if required
if t.filterRequest != nil {
t.filterRequest(req)
}
// Logf request
if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
buf, _ := httputil.DumpRequestOut(req, t.dump&(fs.DumpBodies|fs.DumpRequests) != 0)
if t.dump&fs.DumpAuth == 0 {
buf = cleanAuths(buf)
}
fs.Debugf(nil, "%s", separatorReq)
fs.Debugf(nil, "%s (req %p)", "HTTP REQUEST", req)
fs.Debugf(nil, "%s", string(buf))
fs.Debugf(nil, "%s", separatorReq)
}
// Do round trip
resp, err = t.Transport.RoundTrip(req)
// Logf response
if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
fs.Debugf(nil, "%s", separatorResp)
fs.Debugf(nil, "%s (req %p)", "HTTP RESPONSE", req)
if err != nil {
fs.Debugf(nil, "Error: %v", err)
} else {
buf, _ := httputil.DumpResponse(resp, t.dump&(fs.DumpBodies|fs.DumpResponses) != 0)
fs.Debugf(nil, "%s", string(buf))
}
fs.Debugf(nil, "%s", separatorResp)
}
if err == nil {
checkServerTime(req, resp)
}
return resp, err
}
// NewDialer creates a net.Dialer structure with Timeout, Keepalive
// and LocalAddr set from rclone flags.
func NewDialer(ci *fs.ConfigInfo) *net.Dialer {
dialer := &net.Dialer{
Timeout: ci.ConnectTimeout,
KeepAlive: 30 * time.Second,
}
if ci.BindAddr != nil {
dialer.LocalAddr = &net.TCPAddr{IP: ci.BindAddr}
}
return dialer
}

50
vendor/github.com/ncw/rclone/fs/fspath/path.go generated vendored Executable file
View File

@@ -0,0 +1,50 @@
// Package fspath contains routines for fspath manipulation
package fspath
import (
"path"
"path/filepath"
"regexp"
"github.com/ncw/rclone/fs/driveletter"
)
// Matcher is a pattern to match an rclone URL
var Matcher = regexp.MustCompile(`^(:?[\w_ -]+):(.*)$`)
// Parse deconstructs a remote path into configName and fsPath
//
// If the path is a local path then configName will be returned as "".
//
// So "remote:path/to/dir" will return "remote", "path/to/dir"
// and "/path/to/local" will return ("", "/path/to/local")
//
// Note that this will turn \ into / in the fsPath on Windows
func Parse(path string) (configName, fsPath string) {
parts := Matcher.FindStringSubmatch(path)
configName, fsPath = "", path
if parts != nil && !driveletter.IsDriveLetter(parts[1]) {
configName, fsPath = parts[1], parts[2]
}
// change native directory separators to / if there are any
fsPath = filepath.ToSlash(fsPath)
return configName, fsPath
}
// Split splits a remote into a parent and a leaf
//
// if it returns leaf as an empty string then remote is a directory
//
// if it returns parent as an empty string then that means the current directory
//
// The returned values have the property that parent + leaf == remote
// (except under Windows where \ will be translated into /)
func Split(remote string) (parent string, leaf string) {
remoteName, remotePath := Parse(remote)
if remoteName != "" {
remoteName += ":"
}
// Construct new remote name without last segment
parent, leaf = path.Split(remotePath)
return remoteName + parent, leaf
}

308
vendor/github.com/ncw/rclone/fs/hash/hash.go generated vendored Executable file
View File

@@ -0,0 +1,308 @@
package hash
import (
"crypto/md5"
"crypto/sha1"
"encoding/hex"
"fmt"
"hash"
"io"
"strings"
"github.com/ncw/rclone/backend/dropbox/dbhash"
"github.com/ncw/rclone/backend/onedrive/quickxorhash"
"github.com/pkg/errors"
)
// Type indicates a standard hashing algorithm
type Type int
// ErrUnsupported should be returned by filesystem,
// if it is requested to deliver an unsupported hash type.
var ErrUnsupported = errors.New("hash type not supported")
const (
// MD5 indicates MD5 support
MD5 Type = 1 << iota
// SHA1 indicates SHA-1 support
SHA1
// Dropbox indicates Dropbox special hash
// https://www.dropbox.com/developers/reference/content-hash
Dropbox
// QuickXorHash indicates Microsoft onedrive hash
// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
QuickXorHash
// None indicates no hashes are supported
None Type = 0
)
// Supported returns a set of all the supported hashes by
// HashStream and MultiHasher.
var Supported = NewHashSet(MD5, SHA1, Dropbox, QuickXorHash)
// Width returns the width in characters for any HashType
var Width = map[Type]int{
MD5: 32,
SHA1: 40,
Dropbox: 64,
QuickXorHash: 40,
}
// Stream will calculate hashes of all supported hash types.
func Stream(r io.Reader) (map[Type]string, error) {
return StreamTypes(r, Supported)
}
// StreamTypes will calculate hashes of the requested hash types.
func StreamTypes(r io.Reader, set Set) (map[Type]string, error) {
hashers, err := fromTypes(set)
if err != nil {
return nil, err
}
_, err = io.Copy(toMultiWriter(hashers), r)
if err != nil {
return nil, err
}
var ret = make(map[Type]string)
for k, v := range hashers {
ret[k] = hex.EncodeToString(v.Sum(nil))
}
return ret, nil
}
// String returns a string representation of the hash type.
// The function will panic if the hash type is unknown.
func (h Type) String() string {
switch h {
case None:
return "None"
case MD5:
return "MD5"
case SHA1:
return "SHA-1"
case Dropbox:
return "DropboxHash"
case QuickXorHash:
return "QuickXorHash"
default:
err := fmt.Sprintf("internal error: unknown hash type: 0x%x", int(h))
panic(err)
}
}
// Set a Type from a flag
func (h *Type) Set(s string) error {
switch s {
case "None":
*h = None
case "MD5":
*h = MD5
case "SHA-1":
*h = SHA1
case "DropboxHash":
*h = Dropbox
case "QuickXorHash":
*h = QuickXorHash
default:
return errors.Errorf("Unknown hash type %q", s)
}
return nil
}
// Type of the value
func (h Type) Type() string {
return "string"
}
// fromTypes will return hashers for all the requested types.
// The types must be a subset of SupportedHashes,
// and this function must support all types.
func fromTypes(set Set) (map[Type]hash.Hash, error) {
if !set.SubsetOf(Supported) {
return nil, errors.Errorf("requested set %08x contains unknown hash types", int(set))
}
var hashers = make(map[Type]hash.Hash)
types := set.Array()
for _, t := range types {
switch t {
case MD5:
hashers[t] = md5.New()
case SHA1:
hashers[t] = sha1.New()
case Dropbox:
hashers[t] = dbhash.New()
case QuickXorHash:
hashers[t] = quickxorhash.New()
default:
err := fmt.Sprintf("internal error: Unsupported hash type %v", t)
panic(err)
}
}
return hashers, nil
}
// toMultiWriter will return a set of hashers into a
// single multiwriter, where one write will update all
// the hashers.
func toMultiWriter(h map[Type]hash.Hash) io.Writer {
// Convert to to slice
var w = make([]io.Writer, 0, len(h))
for _, v := range h {
w = append(w, v)
}
return io.MultiWriter(w...)
}
// A MultiHasher will construct various hashes on
// all incoming writes.
type MultiHasher struct {
w io.Writer
size int64
h map[Type]hash.Hash // Hashes
}
// NewMultiHasher will return a hash writer that will write all
// supported hash types.
func NewMultiHasher() *MultiHasher {
h, err := NewMultiHasherTypes(Supported)
if err != nil {
panic("internal error: could not create multihasher")
}
return h
}
// NewMultiHasherTypes will return a hash writer that will write
// the requested hash types.
func NewMultiHasherTypes(set Set) (*MultiHasher, error) {
hashers, err := fromTypes(set)
if err != nil {
return nil, err
}
m := MultiHasher{h: hashers, w: toMultiWriter(hashers)}
return &m, nil
}
func (m *MultiHasher) Write(p []byte) (n int, err error) {
n, err = m.w.Write(p)
m.size += int64(n)
return n, err
}
// Sums returns the sums of all accumulated hashes as hex encoded
// strings.
func (m *MultiHasher) Sums() map[Type]string {
dst := make(map[Type]string)
for k, v := range m.h {
dst[k] = hex.EncodeToString(v.Sum(nil))
}
return dst
}
// Size returns the number of bytes written
func (m *MultiHasher) Size() int64 {
return m.size
}
// A Set Indicates one or more hash types.
type Set int
// NewHashSet will create a new hash set with the hash types supplied
func NewHashSet(t ...Type) Set {
h := Set(None)
return h.Add(t...)
}
// Add one or more hash types to the set.
// Returns the modified hash set.
func (h *Set) Add(t ...Type) Set {
for _, v := range t {
*h |= Set(v)
}
return *h
}
// Contains returns true if the
func (h Set) Contains(t Type) bool {
return int(h)&int(t) != 0
}
// Overlap returns the overlapping hash types
func (h Set) Overlap(t Set) Set {
return Set(int(h) & int(t))
}
// SubsetOf will return true if all types of h
// is present in the set c
func (h Set) SubsetOf(c Set) bool {
return int(h)|int(c) == int(c)
}
// GetOne will return a hash type.
// Currently the first is returned, but it could be
// improved to return the strongest.
func (h Set) GetOne() Type {
v := int(h)
i := uint(0)
for v != 0 {
if v&1 != 0 {
return Type(1 << i)
}
i++
v >>= 1
}
return Type(None)
}
// Array returns an array of all hash types in the set
func (h Set) Array() (ht []Type) {
v := int(h)
i := uint(0)
for v != 0 {
if v&1 != 0 {
ht = append(ht, Type(1<<i))
}
i++
v >>= 1
}
return ht
}
// Count returns the number of hash types in the set
func (h Set) Count() int {
if int(h) == 0 {
return 0
}
// credit: https://code.google.com/u/arnehormann/
x := uint64(h)
x -= (x >> 1) & 0x5555555555555555
x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
x += x >> 4
x &= 0x0f0f0f0f0f0f0f0f
x *= 0x0101010101010101
return int(x >> 56)
}
// String returns a string representation of the hash set.
// The function will panic if it contains an unknown type.
func (h Set) String() string {
a := h.Array()
var r []string
for _, v := range a {
r = append(r, v.String())
}
return "[" + strings.Join(r, ", ") + "]"
}
// Equals checks to see if src == dst, but ignores empty strings
// and returns true if either is empty.
func Equals(src, dst string) bool {
if src == "" || dst == "" {
return true
}
return src == dst
}

102
vendor/github.com/ncw/rclone/fs/list/list.go generated vendored Executable file
View File

@@ -0,0 +1,102 @@
// Package list contains list functions
package list
import (
"sort"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/filter"
"github.com/pkg/errors"
)
// DirSorted reads Object and *Dir into entries for the given Fs.
//
// dir is the start directory, "" for root
//
// If includeAll is specified all files will be added, otherwise only
// files and directories passing the filter will be added.
//
// Files will be returned in sorted order
func DirSorted(f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) {
// Get unfiltered entries from the fs
entries, err = f.List(dir)
if err != nil {
return nil, err
}
// This should happen only if exclude files lives in the
// starting directory, otherwise ListDirSorted should not be
// called.
if !includeAll && filter.Active.ListContainsExcludeFile(entries) {
fs.Debugf(dir, "Excluded from sync (and deletion)")
return nil, nil
}
return filterAndSortDir(entries, includeAll, dir, filter.Active.IncludeObject, filter.Active.IncludeDirectory(f))
}
// filter (if required) and check the entries, then sort them
func filterAndSortDir(entries fs.DirEntries, includeAll bool, dir string,
IncludeObject func(o fs.Object) bool,
IncludeDirectory func(remote string) (bool, error)) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter
prefix := ""
if dir != "" {
prefix = dir + "/"
}
for _, entry := range entries {
ok := true
// check includes and types
switch x := entry.(type) {
case fs.Object:
// Make sure we don't delete excluded files if not required
if !includeAll && !IncludeObject(x) {
ok = false
fs.Debugf(x, "Excluded from sync (and deletion)")
}
case fs.Directory:
if !includeAll {
include, err := IncludeDirectory(x.Remote())
if err != nil {
return nil, err
}
if !include {
ok = false
fs.Debugf(x, "Excluded from sync (and deletion)")
}
}
default:
return nil, errors.Errorf("unknown object type %T", entry)
}
// check remote name belongs in this directry
remote := entry.Remote()
switch {
case !ok:
// ignore
case !strings.HasPrefix(remote, prefix):
ok = false
fs.Errorf(entry, "Entry doesn't belong in directory %q (too short) - ignoring", dir)
case remote == prefix:
ok = false
fs.Errorf(entry, "Entry doesn't belong in directory %q (same as directory) - ignoring", dir)
case strings.ContainsRune(remote[len(prefix):], '/'):
ok = false
fs.Errorf(entry, "Entry doesn't belong in directory %q (contains subdir) - ignoring", dir)
default:
// ok
}
if ok {
newEntries = append(newEntries, entry)
}
}
entries = newEntries
// Sort the directory entries by Remote
//
// We use a stable sort here just in case there are
// duplicates. Assuming the remote delivers the entries in a
// consistent order, this will give the best user experience
// in syncing as it will use the first entry for the sync
// comparison.
sort.Stable(entries)
return entries, nil
}

135
vendor/github.com/ncw/rclone/fs/log.go generated vendored Executable file
View File

@@ -0,0 +1,135 @@
package fs
import (
"fmt"
"log"
"github.com/pkg/errors"
)
// LogLevel describes rclone's logs. These are a subset of the syslog log levels.
type LogLevel byte
// Log levels. These are the syslog levels of which we only use a
// subset.
//
// LOG_EMERG system is unusable
// LOG_ALERT action must be taken immediately
// LOG_CRIT critical conditions
// LOG_ERR error conditions
// LOG_WARNING warning conditions
// LOG_NOTICE normal, but significant, condition
// LOG_INFO informational message
// LOG_DEBUG debug-level message
const (
LogLevelEmergency LogLevel = iota
LogLevelAlert
LogLevelCritical
LogLevelError // Error - can't be suppressed
LogLevelWarning
LogLevelNotice // Normal logging, -q suppresses
LogLevelInfo // Transfers, needs -v
LogLevelDebug // Debug level, needs -vv
)
var logLevelToString = []string{
LogLevelEmergency: "EMERGENCY",
LogLevelAlert: "ALERT",
LogLevelCritical: "CRITICAL",
LogLevelError: "ERROR",
LogLevelWarning: "WARNING",
LogLevelNotice: "NOTICE",
LogLevelInfo: "INFO",
LogLevelDebug: "DEBUG",
}
// String turns a LogLevel into a string
func (l LogLevel) String() string {
if l >= LogLevel(len(logLevelToString)) {
return fmt.Sprintf("LogLevel(%d)", l)
}
return logLevelToString[l]
}
// Set a LogLevel
func (l *LogLevel) Set(s string) error {
for n, name := range logLevelToString {
if s != "" && name == s {
*l = LogLevel(n)
return nil
}
}
return errors.Errorf("Unknown log level %q", s)
}
// Type of the value
func (l *LogLevel) Type() string {
return "string"
}
// LogPrint sends the text to the logger of level
var LogPrint = func(level LogLevel, text string) {
text = fmt.Sprintf("%-6s: %s", level, text)
log.Print(text)
}
// LogPrintf produces a log string from the arguments passed in
func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
out := fmt.Sprintf(text, args...)
if o != nil {
out = fmt.Sprintf("%v: %s", o, out)
}
LogPrint(level, out)
}
// LogLevelPrintf writes logs at the given level
func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= level {
LogPrintf(level, o, text, args...)
}
}
// Errorf writes error log output for this Object or Fs. It
// should always be seen by the user.
func Errorf(o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= LogLevelError {
LogPrintf(LogLevelError, o, text, args...)
}
}
// Logf writes log output for this Object or Fs. This should be
// considered to be Info level logging. It is the default level. By
// default rclone should not log very much so only use this for
// important things the user should see. The user can filter these
// out with the -q flag.
func Logf(o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= LogLevelNotice {
LogPrintf(LogLevelNotice, o, text, args...)
}
}
// Infof writes info on transfers for this Object or Fs. Use this
// level for logging transfers, deletions and things which should
// appear with the -v flag.
func Infof(o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= LogLevelInfo {
LogPrintf(LogLevelInfo, o, text, args...)
}
}
// Debugf writes debugging output for this Object or Fs. Use this for
// debug only. The user must have to specify -vv to see this.
func Debugf(o interface{}, text string, args ...interface{}) {
if Config.LogLevel >= LogLevelDebug {
LogPrintf(LogLevelDebug, o, text, args...)
}
}
// LogDirName returns an object for the logger, logging a root
// directory which would normally be "" as the Fs
func LogDirName(f Fs, dir string) interface{} {
if dir != "" {
return dir
}
return f
}

44
vendor/github.com/ncw/rclone/fs/mimetype.go generated vendored Executable file
View File

@@ -0,0 +1,44 @@
package fs
import (
"mime"
"path"
"strings"
)
// MimeTypeFromName returns a guess at the mime type from the name
func MimeTypeFromName(remote string) (mimeType string) {
mimeType = mime.TypeByExtension(path.Ext(remote))
if !strings.ContainsRune(mimeType, '/') {
mimeType = "application/octet-stream"
}
return mimeType
}
// MimeType returns the MimeType from the object, either by calling
// the MimeTyper interface or using MimeTypeFromName
func MimeType(o ObjectInfo) (mimeType string) {
// Read the MimeType from the optional interface if available
if do, ok := o.(MimeTyper); ok {
mimeType = do.MimeType()
// Debugf(o, "Read MimeType as %q", mimeType)
if mimeType != "" {
return mimeType
}
}
return MimeTypeFromName(o.Remote())
}
// MimeTypeDirEntry returns the MimeType of a DirEntry
//
// It returns "inode/directory" for directories, or uses
// MimeType(Object)
func MimeTypeDirEntry(item DirEntry) string {
switch x := item.(type) {
case Object:
return MimeType(x)
case Directory:
return "inode/directory"
}
return ""
}

239
vendor/github.com/ncw/rclone/fs/object/object.go generated vendored Executable file
View File

@@ -0,0 +1,239 @@
// Package object defines some useful Objects
package object
import (
"bytes"
"errors"
"io"
"io/ioutil"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
)
// NewStaticObjectInfo returns a static ObjectInfo
// If hashes is nil and fs is not nil, the hash map will be replaced with
// empty hashes of the types supported by the fs.
func NewStaticObjectInfo(remote string, modTime time.Time, size int64, storable bool, hashes map[hash.Type]string, fs fs.Info) fs.ObjectInfo {
info := &staticObjectInfo{
remote: remote,
modTime: modTime,
size: size,
storable: storable,
hashes: hashes,
fs: fs,
}
if fs != nil && hashes == nil {
set := fs.Hashes().Array()
info.hashes = make(map[hash.Type]string)
for _, ht := range set {
info.hashes[ht] = ""
}
}
return info
}
type staticObjectInfo struct {
remote string
modTime time.Time
size int64
storable bool
hashes map[hash.Type]string
fs fs.Info
}
func (i *staticObjectInfo) Fs() fs.Info { return i.fs }
func (i *staticObjectInfo) Remote() string { return i.remote }
func (i *staticObjectInfo) String() string { return i.remote }
func (i *staticObjectInfo) ModTime() time.Time { return i.modTime }
func (i *staticObjectInfo) Size() int64 { return i.size }
func (i *staticObjectInfo) Storable() bool { return i.storable }
func (i *staticObjectInfo) Hash(h hash.Type) (string, error) {
if len(i.hashes) == 0 {
return "", hash.ErrUnsupported
}
if hash, ok := i.hashes[h]; ok {
return hash, nil
}
return "", hash.ErrUnsupported
}
// MemoryFs is an in memory Fs, it only supports FsInfo and Put
var MemoryFs memoryFs
// memoryFs is an in memory fs
type memoryFs struct{}
// Name of the remote (as passed into NewFs)
func (memoryFs) Name() string { return "memory" }
// Root of the remote (as passed into NewFs)
func (memoryFs) Root() string { return "" }
// String returns a description of the FS
func (memoryFs) String() string { return "memory" }
// Precision of the ModTimes in this Fs
func (memoryFs) Precision() time.Duration { return time.Nanosecond }
// Returns the supported hash types of the filesystem
func (memoryFs) Hashes() hash.Set { return hash.Supported }
// Features returns the optional features of this Fs
func (memoryFs) Features() *fs.Features { return &fs.Features{} }
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (memoryFs) List(dir string) (entries fs.DirEntries, err error) {
return nil, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (memoryFs) NewObject(remote string) (fs.Object, error) {
return nil, fs.ErrorObjectNotFound
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (memoryFs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o := NewMemoryObject(src.Remote(), src.ModTime(), nil)
return o, o.Update(in, src, options...)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (memoryFs) Mkdir(dir string) error {
return errors.New("memoryFs: can't make directory")
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (memoryFs) Rmdir(dir string) error {
return fs.ErrorDirNotFound
}
var _ fs.Fs = MemoryFs
// MemoryObject is an in memory object
type MemoryObject struct {
remote string
modTime time.Time
content []byte
}
// NewMemoryObject returns an in memory Object with the modTime and content passed in
func NewMemoryObject(remote string, modTime time.Time, content []byte) *MemoryObject {
return &MemoryObject{
remote: remote,
modTime: modTime,
content: content,
}
}
// Content returns the underlying buffer
func (o *MemoryObject) Content() []byte {
return o.content
}
// Fs returns read only access to the Fs that this object is part of
func (o *MemoryObject) Fs() fs.Info {
return MemoryFs
}
// Remote returns the remote path
func (o *MemoryObject) Remote() string {
return o.remote
}
// String returns a description of the Object
func (o *MemoryObject) String() string {
return o.remote
}
// ModTime returns the modification date of the file
func (o *MemoryObject) ModTime() time.Time {
return o.modTime
}
// Size returns the size of the file
func (o *MemoryObject) Size() int64 {
return int64(len(o.content))
}
// Storable says whether this object can be stored
func (o *MemoryObject) Storable() bool {
return true
}
// Hash returns the requested hash of the contents
func (o *MemoryObject) Hash(h hash.Type) (string, error) {
hash, err := hash.NewMultiHasherTypes(hash.Set(h))
if err != nil {
return "", err
}
_, err = hash.Write(o.content)
if err != nil {
return "", err
}
return hash.Sums()[h], nil
}
// SetModTime sets the metadata on the object to set the modification date
func (o *MemoryObject) SetModTime(modTime time.Time) error {
o.modTime = modTime
return nil
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *MemoryObject) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
content := o.content
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
content = o.content[x.Start:x.End]
case *fs.SeekOption:
content = o.content[x.Offset:]
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
return ioutil.NopCloser(bytes.NewBuffer(content)), nil
}
// Update in to the object with the modTime given of the given size
//
// This re-uses the internal buffer if at all possible.
func (o *MemoryObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
size := src.Size()
if size == 0 {
o.content = nil
} else if size < 0 || int64(cap(o.content)) < size {
o.content, err = ioutil.ReadAll(in)
} else {
o.content = o.content[:size]
_, err = io.ReadFull(in, o.content)
}
o.modTime = src.ModTime()
return err
}
// Remove this object
func (o *MemoryObject) Remove() error {
return errors.New("memoryObject.Remove not supported")
}

262
vendor/github.com/ncw/rclone/fs/options.go generated vendored Executable file
View File

@@ -0,0 +1,262 @@
// Define the options for Open
package fs
import (
"fmt"
"net/http"
"strconv"
"strings"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
)
// OpenOption is an interface describing options for Open
type OpenOption interface {
fmt.Stringer
// Header returns the option as an HTTP header
Header() (key string, value string)
// Mandatory returns whether this option can be ignored or not
Mandatory() bool
}
// RangeOption defines an HTTP Range option with start and end. If
// either start or end are < 0 then they will be omitted.
//
// End may be bigger than the Size of the object in which case it will
// be capped to the size of the object.
//
// Note that the End is inclusive, so to fetch 100 bytes you would use
// RangeOption{Start: 0, End: 99}
//
// If Start is specified but End is not then it will fetch from Start
// to the end of the file.
//
// If End is specified, but Start is not then it will fetch the last
// End bytes.
//
// Examples:
//
// RangeOption{Start: 0, End: 99} - fetch the first 100 bytes
// RangeOption{Start: 100, End: 199} - fetch the second 100 bytes
// RangeOption{Start: 100} - fetch bytes from offset 100 to the end
// RangeOption{End: 100} - fetch the last 100 bytes
//
// A RangeOption implements a single byte-range-spec from
// https://tools.ietf.org/html/rfc7233#section-2.1
type RangeOption struct {
Start int64
End int64
}
// Header formats the option as an http header
func (o *RangeOption) Header() (key string, value string) {
key = "Range"
value = "bytes="
if o.Start >= 0 {
value += strconv.FormatInt(o.Start, 10)
}
value += "-"
if o.End >= 0 {
value += strconv.FormatInt(o.End, 10)
}
return key, value
}
// ParseRangeOption parses a RangeOption from a Range: header.
// It only appects single ranges.
func ParseRangeOption(s string) (po *RangeOption, err error) {
const preamble = "bytes="
if !strings.HasPrefix(s, preamble) {
return nil, errors.New("Range: header invalid: doesn't start with " + preamble)
}
s = s[len(preamble):]
if strings.IndexRune(s, ',') >= 0 {
return nil, errors.New("Range: header invalid: contains multiple ranges which isn't supported")
}
dash := strings.IndexRune(s, '-')
if dash < 0 {
return nil, errors.New("Range: header invalid: contains no '-'")
}
start, end := strings.TrimSpace(s[:dash]), strings.TrimSpace(s[dash+1:])
o := RangeOption{Start: -1, End: -1}
if start != "" {
o.Start, err = strconv.ParseInt(start, 10, 64)
if err != nil || o.Start < 0 {
return nil, errors.New("Range: header invalid: bad start")
}
}
if end != "" {
o.End, err = strconv.ParseInt(end, 10, 64)
if err != nil || o.End < 0 {
return nil, errors.New("Range: header invalid: bad end")
}
}
return &o, nil
}
// String formats the option into human readable form
func (o *RangeOption) String() string {
return fmt.Sprintf("RangeOption(%d,%d)", o.Start, o.End)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *RangeOption) Mandatory() bool {
return true
}
// Decode interprets the RangeOption into an offset and a limit
//
// The offset is the start of the stream and the limit is how many
// bytes should be read from it. If the limit is -1 then the stream
// should be read to the end.
func (o *RangeOption) Decode(size int64) (offset, limit int64) {
if o.Start >= 0 {
offset = o.Start
if o.End >= 0 {
limit = o.End - o.Start + 1
} else {
limit = -1
}
} else {
if o.End >= 0 {
offset = size - o.End
} else {
offset = 0
}
limit = -1
}
return offset, limit
}
// FixRangeOption looks through the slice of options and adjusts any
// RangeOption~s found that request a fetch from the end into an
// absolute fetch using the size passed in and makes sure the range does
// not exceed filesize. Some remotes (eg Onedrive, Box) don't support
// range requests which index from the end.
func FixRangeOption(options []OpenOption, size int64) {
for i := range options {
option := options[i]
if x, ok := option.(*RangeOption); ok {
// If start is < 0 then fetch from the end
if x.Start < 0 {
x = &RangeOption{Start: size - x.End, End: -1}
options[i] = x
}
if x.End > size {
x = &RangeOption{Start: x.Start, End: size}
options[i] = x
}
}
}
}
// SeekOption defines an HTTP Range option with start only.
type SeekOption struct {
Offset int64
}
// Header formats the option as an http header
func (o *SeekOption) Header() (key string, value string) {
key = "Range"
value = fmt.Sprintf("bytes=%d-", o.Offset)
return key, value
}
// String formats the option into human readable form
func (o *SeekOption) String() string {
return fmt.Sprintf("SeekOption(%d)", o.Offset)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *SeekOption) Mandatory() bool {
return true
}
// HTTPOption defines a general purpose HTTP option
type HTTPOption struct {
Key string
Value string
}
// Header formats the option as an http header
func (o *HTTPOption) Header() (key string, value string) {
return o.Key, o.Value
}
// String formats the option into human readable form
func (o *HTTPOption) String() string {
return fmt.Sprintf("HTTPOption(%q,%q)", o.Key, o.Value)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *HTTPOption) Mandatory() bool {
return false
}
// HashesOption defines an option used to tell the local fs to limit
// the number of hashes it calculates.
type HashesOption struct {
Hashes hash.Set
}
// Header formats the option as an http header
func (o *HashesOption) Header() (key string, value string) {
return "", ""
}
// String formats the option into human readable form
func (o *HashesOption) String() string {
return fmt.Sprintf("HashesOption(%v)", o.Hashes)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *HashesOption) Mandatory() bool {
return false
}
// OpenOptionAddHeaders adds each header found in options to the
// headers map provided the key was non empty.
func OpenOptionAddHeaders(options []OpenOption, headers map[string]string) {
for _, option := range options {
key, value := option.Header()
if key != "" && value != "" {
headers[key] = value
}
}
}
// OpenOptionHeaders adds each header found in options to the
// headers map provided the key was non empty.
//
// It returns a nil map if options was empty
func OpenOptionHeaders(options []OpenOption) (headers map[string]string) {
if len(options) == 0 {
return nil
}
headers = make(map[string]string, len(options))
OpenOptionAddHeaders(options, headers)
return headers
}
// OpenOptionAddHTTPHeaders Sets each header found in options to the
// http.Header map provided the key was non empty.
func OpenOptionAddHTTPHeaders(headers http.Header, options []OpenOption) {
for _, option := range options {
key, value := option.Header()
if key != "" && value != "" {
headers.Set(key, value)
}
}
}
// check interface
var (
_ OpenOption = (*RangeOption)(nil)
_ OpenOption = (*SeekOption)(nil)
_ OpenOption = (*HTTPOption)(nil)
)

103
vendor/github.com/ncw/rclone/fs/parseduration.go generated vendored Executable file
View File

@@ -0,0 +1,103 @@
package fs
import (
"fmt"
"math"
"strconv"
"strings"
"time"
)
// Duration is a time.Duration with some more parsing options
type Duration time.Duration
// DurationOff is the default value for flags which can be turned off
const DurationOff = Duration((1 << 63) - 1)
// Turn Duration into a string
func (d Duration) String() string {
if d == DurationOff {
return "off"
}
for i := len(ageSuffixes) - 2; i >= 0; i-- {
ageSuffix := &ageSuffixes[i]
if math.Abs(float64(d)) >= float64(ageSuffix.Multiplier) {
timeUnits := float64(d) / float64(ageSuffix.Multiplier)
return strconv.FormatFloat(timeUnits, 'f', -1, 64) + ageSuffix.Suffix
}
}
return time.Duration(d).String()
}
// IsSet returns if the duration is != DurationOff
func (d Duration) IsSet() bool {
return d != DurationOff
}
// We use time conventions
var ageSuffixes = []struct {
Suffix string
Multiplier time.Duration
}{
{Suffix: "d", Multiplier: time.Hour * 24},
{Suffix: "w", Multiplier: time.Hour * 24 * 7},
{Suffix: "M", Multiplier: time.Hour * 24 * 30},
{Suffix: "y", Multiplier: time.Hour * 24 * 365},
// Default to second
{Suffix: "", Multiplier: time.Second},
}
// ParseDuration parses a duration string. Accept ms|s|m|h|d|w|M|y suffixes. Defaults to second if not provided
func ParseDuration(age string) (time.Duration, error) {
var period float64
if age == "off" {
return time.Duration(DurationOff), nil
}
// Attempt to parse as a time.Duration first
d, err := time.ParseDuration(age)
if err == nil {
return d, nil
}
for _, ageSuffix := range ageSuffixes {
if strings.HasSuffix(age, ageSuffix.Suffix) {
numberString := age[:len(age)-len(ageSuffix.Suffix)]
var err error
period, err = strconv.ParseFloat(numberString, 64)
if err != nil {
return time.Duration(0), err
}
period *= float64(ageSuffix.Multiplier)
break
}
}
return time.Duration(period), nil
}
// Set a Duration
func (d *Duration) Set(s string) error {
duration, err := ParseDuration(s)
if err != nil {
return err
}
*d = Duration(duration)
return nil
}
// Type of the value
func (d Duration) Type() string {
return "duration"
}
// Scan implements the fmt.Scanner interface
func (d *Duration) Scan(s fmt.ScanState, ch rune) error {
token, err := s.Token(true, nil)
if err != nil {
return err
}
return d.Set(string(token))
}

131
vendor/github.com/ncw/rclone/fs/rc/internal.go generated vendored Executable file
View File

@@ -0,0 +1,131 @@
// Define the internal rc functions
package rc
import (
"os"
"runtime"
"github.com/pkg/errors"
)
func init() {
Add(Call{
Path: "rc/noop",
Fn: rcNoop,
Title: "Echo the input to the output parameters",
Help: `
This echoes the input parameters to the output parameters for testing
purposes. It can be used to check that rclone is still alive and to
check that parameter passing is working properly.`,
})
Add(Call{
Path: "rc/error",
Fn: rcError,
Title: "This returns an error",
Help: `
This returns an error with the input as part of its error string.
Useful for testing error handling.`,
})
Add(Call{
Path: "rc/list",
Fn: rcList,
Title: "List all the registered remote control commands",
Help: `
This lists all the registered remote control commands as a JSON map in
the commands response.`,
})
Add(Call{
Path: "core/pid",
Fn: rcPid,
Title: "Return PID of current process",
Help: `
This returns PID of current process.
Useful for stopping rclone process.`,
})
Add(Call{
Path: "core/memstats",
Fn: rcMemStats,
Title: "Returns the memory statistics",
Help: `
This returns the memory statistics of the running program. What the values mean
are explained in the go docs: https://golang.org/pkg/runtime/#MemStats
The most interesting values for most people are:
* HeapAlloc: This is the amount of memory rclone is actually using
* HeapSys: This is the amount of memory rclone has obtained from the OS
* Sys: this is the total amount of memory requested from the OS
* It is virtual memory so may include unused memory
`,
})
Add(Call{
Path: "core/gc",
Fn: rcGc,
Title: "Runs a garbage collection.",
Help: `
This tells the go runtime to do a garbage collection run. It isn't
necessary to call this normally, but it can be useful for debugging
memory problems.
`,
})
}
// Echo the input to the ouput parameters
func rcNoop(in Params) (out Params, err error) {
return in, nil
}
// Return an error regardless
func rcError(in Params) (out Params, err error) {
return nil, errors.Errorf("arbitrary error on input %+v", in)
}
// List the registered commands
func rcList(in Params) (out Params, err error) {
out = make(Params)
out["commands"] = registry.list()
return out, nil
}
// Return PID of current process
func rcPid(in Params) (out Params, err error) {
out = make(Params)
out["pid"] = os.Getpid()
return out, nil
}
// Return the memory statistics
func rcMemStats(in Params) (out Params, err error) {
out = make(Params)
var m runtime.MemStats
runtime.ReadMemStats(&m)
out["Alloc"] = m.Alloc
out["TotalAlloc"] = m.TotalAlloc
out["Sys"] = m.Sys
out["Mallocs"] = m.Mallocs
out["Frees"] = m.Frees
out["HeapAlloc"] = m.HeapAlloc
out["HeapSys"] = m.HeapSys
out["HeapIdle"] = m.HeapIdle
out["HeapInuse"] = m.HeapInuse
out["HeapReleased"] = m.HeapReleased
out["HeapObjects"] = m.HeapObjects
out["StackInuse"] = m.StackInuse
out["StackSys"] = m.StackSys
out["MSpanInuse"] = m.MSpanInuse
out["MSpanSys"] = m.MSpanSys
out["MCacheInuse"] = m.MCacheInuse
out["MCacheSys"] = m.MCacheSys
out["BuckHashSys"] = m.BuckHashSys
out["GCSys"] = m.GCSys
out["OtherSys"] = m.OtherSys
return out, nil
}
// Do a garbage collection run
func rcGc(in Params) (out Params, err error) {
out = make(Params)
runtime.GC()
return out, nil
}

146
vendor/github.com/ncw/rclone/fs/rc/rc.go generated vendored Executable file
View File

@@ -0,0 +1,146 @@
// Package rc implements a remote control server and registry for rclone
//
// To register your internal calls, call rc.Add(path, function). Your
// function should take ane return a Param. It can also return an
// error. Use rc.NewError to wrap an existing error along with an
// http response type if another response other than 500 internal
// error is required on error.
package rc
import (
"encoding/json"
"io"
"net/http"
_ "net/http/pprof" // install the pprof http handlers
"strings"
"github.com/ncw/rclone/cmd/serve/httplib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// Options contains options for the remote control server
type Options struct {
HTTPOptions httplib.Options
Enabled bool
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
HTTPOptions: httplib.DefaultOpt,
Enabled: false,
}
func init() {
DefaultOpt.HTTPOptions.ListenAddr = "localhost:5572"
}
// Start the remote control server if configured
func Start(opt *Options) {
if opt.Enabled {
s := newServer(opt)
go s.serve()
}
}
// server contains everything to run the server
type server struct {
srv *httplib.Server
}
func newServer(opt *Options) *server {
// Serve on the DefaultServeMux so can have global registrations appear
mux := http.DefaultServeMux
s := &server{
srv: httplib.NewServer(mux, &opt.HTTPOptions),
}
mux.HandleFunc("/", s.handler)
return s
}
// serve runs the http server - doesn't return
func (s *server) serve() {
err := s.srv.Serve()
if err != nil {
fs.Errorf(nil, "Opening listener: %v", err)
}
fs.Logf(nil, "Serving remote control on %s", s.srv.URL())
s.srv.Wait()
}
// WriteJSON writes JSON in out to w
func WriteJSON(w io.Writer, out Params) error {
enc := json.NewEncoder(w)
enc.SetIndent("", "\t")
return enc.Encode(out)
}
// handler reads incoming requests and dispatches them
func (s *server) handler(w http.ResponseWriter, r *http.Request) {
path := strings.Trim(r.URL.Path, "/")
in := make(Params)
writeError := func(err error, status int) {
fs.Errorf(nil, "rc: %q: error: %v", path, err)
w.WriteHeader(status)
err = WriteJSON(w, Params{
"error": err.Error(),
"input": in,
})
if err != nil {
// can't return the error at this point
fs.Errorf(nil, "rc: failed to write JSON output: %v", err)
}
}
if r.Method != "POST" {
writeError(errors.Errorf("method %q not allowed - POST required", r.Method), http.StatusMethodNotAllowed)
return
}
// Find the call
call := registry.get(path)
if call == nil {
writeError(errors.Errorf("couldn't find method %q", path), http.StatusMethodNotAllowed)
return
}
// Parse the POST and URL parameters into r.Form
err := r.ParseForm()
if err != nil {
writeError(errors.Wrap(err, "failed to parse form/URL parameters"), http.StatusBadRequest)
return
}
// Read the POST and URL parameters into in
for k, vs := range r.Form {
if len(vs) > 0 {
in[k] = vs[len(vs)-1]
}
}
fs.Debugf(nil, "form = %+v", r.Form)
// Parse a JSON blob from the input
if r.Header.Get("Content-Type") == "application/json" {
err := json.NewDecoder(r.Body).Decode(&in)
if err != nil {
writeError(errors.Wrap(err, "failed to read input JSON"), http.StatusBadRequest)
return
}
}
fs.Debugf(nil, "rc: %q: with parameters %+v", path, in)
out, err := call.Fn(in)
if err != nil {
writeError(errors.Wrap(err, "remote control command failed"), http.StatusInternalServerError)
return
}
fs.Debugf(nil, "rc: %q: reply %+v: %v", path, out, err)
err = WriteJSON(w, out)
if err != nil {
// can't return the error at this point
fs.Errorf(nil, "rc: failed to write JSON output: %v", err)
}
}

79
vendor/github.com/ncw/rclone/fs/rc/registry.go generated vendored Executable file
View File

@@ -0,0 +1,79 @@
// Define the registry
package rc
import (
"sort"
"strings"
"sync"
"github.com/ncw/rclone/fs"
)
// Params is the input and output type for the Func
type Params map[string]interface{}
// Func defines a type for a remote control function
type Func func(in Params) (out Params, err error)
// Call defines info about a remote control function and is used in
// the Add function to create new entry points.
type Call struct {
Path string // path to activate this RC
Fn Func `json:"-"` // function to call
Title string // help for the function
Help string // multi-line markdown formatted help
}
// Registry holds the list of all the registered remote control functions
type Registry struct {
mu sync.RWMutex
call map[string]*Call
}
// NewRegistry makes a new registry for remote control functions
func NewRegistry() *Registry {
return &Registry{
call: make(map[string]*Call),
}
}
// Add a call to the registry
func (r *Registry) add(call Call) {
r.mu.Lock()
defer r.mu.Unlock()
call.Path = strings.Trim(call.Path, "/")
call.Help = strings.TrimSpace(call.Help)
fs.Debugf(nil, "Adding path %q to remote control registry", call.Path)
r.call[call.Path] = &call
}
// get a Call from a path or nil
func (r *Registry) get(path string) *Call {
r.mu.RLock()
defer r.mu.RUnlock()
return r.call[path]
}
// get a list of all calls in alphabetical order
func (r *Registry) list() (out []*Call) {
r.mu.RLock()
defer r.mu.RUnlock()
var keys []string
for key := range r.call {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
out = append(out, r.call[key])
}
return out
}
// The global registry
var registry = NewRegistry()
// Add a function to the global registry
func Add(call Call) {
registry.add(call)
}

132
vendor/github.com/ncw/rclone/fs/sizesuffix.go generated vendored Executable file
View File

@@ -0,0 +1,132 @@
package fs
// SizeSuffix is parsed by flag with k/M/G suffixes
import (
"fmt"
"math"
"strconv"
"strings"
"github.com/pkg/errors"
)
// SizeSuffix is an int64 with a friendly way of printing setting
type SizeSuffix int64
// Common multipliers for SizeSuffix
const (
Byte SizeSuffix = 1 << (iota * 10)
KibiByte
MebiByte
GibiByte
TebiByte
PebiByte
ExbiByte
)
// Turn SizeSuffix into a string and a suffix
func (x SizeSuffix) string() (string, string) {
scaled := float64(0)
suffix := ""
switch {
case x < 0:
return "off", ""
case x == 0:
return "0", ""
case x < 1<<10:
scaled = float64(x)
suffix = ""
case x < 1<<20:
scaled = float64(x) / (1 << 10)
suffix = "k"
case x < 1<<30:
scaled = float64(x) / (1 << 20)
suffix = "M"
case x < 1<<40:
scaled = float64(x) / (1 << 30)
suffix = "G"
case x < 1<<50:
scaled = float64(x) / (1 << 40)
suffix = "T"
default:
scaled = float64(x) / (1 << 50)
suffix = "P"
}
if math.Floor(scaled) == scaled {
return fmt.Sprintf("%.0f", scaled), suffix
}
return fmt.Sprintf("%.3f", scaled), suffix
}
// String turns SizeSuffix into a string
func (x SizeSuffix) String() string {
val, suffix := x.string()
return val + suffix
}
// Unit turns SizeSuffix into a string with a unit
func (x SizeSuffix) Unit(unit string) string {
val, suffix := x.string()
if val == "off" {
return val
}
return val + " " + suffix + unit
}
// Set a SizeSuffix
func (x *SizeSuffix) Set(s string) error {
if len(s) == 0 {
return errors.New("empty string")
}
if strings.ToLower(s) == "off" {
*x = -1
return nil
}
suffix := s[len(s)-1]
suffixLen := 1
var multiplier float64
switch suffix {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
suffixLen = 0
multiplier = 1 << 10
case 'b', 'B':
multiplier = 1
case 'k', 'K':
multiplier = 1 << 10
case 'm', 'M':
multiplier = 1 << 20
case 'g', 'G':
multiplier = 1 << 30
case 't', 'T':
multiplier = 1 << 40
case 'p', 'P':
multiplier = 1 << 50
default:
return errors.Errorf("bad suffix %q", suffix)
}
s = s[:len(s)-suffixLen]
value, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
if value < 0 {
return errors.Errorf("size can't be negative %q", s)
}
value *= multiplier
*x = SizeSuffix(value)
return nil
}
// Type of the value
func (x *SizeSuffix) Type() string {
return "int64"
}
// Scan implements the fmt.Scanner interface
func (x *SizeSuffix) Scan(s fmt.ScanState, ch rune) error {
token, err := s.Token(true, nil)
if err != nil {
return err
}
return x.Set(string(token))
}

4
vendor/github.com/ncw/rclone/fs/version.go generated vendored Executable file
View File

@@ -0,0 +1,4 @@
package fs
// Version of rclone
var Version = "v1.43.1"

7
vendor/github.com/ncw/rclone/fs/versioncheck.go generated vendored Executable file
View File

@@ -0,0 +1,7 @@
//+build !go1.7
package fs
// Upgrade to Go version 1.7 to compile rclone - latest stable go
// compiler recommended.
func init() { Go_version_1_7_required_for_compilation() }

552
vendor/github.com/ncw/rclone/fs/walk/walk.go generated vendored Executable file
View File

@@ -0,0 +1,552 @@
// Package walk walks directories
package walk
import (
"bytes"
"fmt"
"path"
"sort"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fs/list"
"github.com/pkg/errors"
)
// ErrorSkipDir is used as a return value from Walk to indicate that the
// directory named in the call is to be skipped. It is not returned as
// an error by any function.
var ErrorSkipDir = errors.New("skip this directory")
// ErrorCantListR is returned by WalkR if the underlying Fs isn't
// capable of doing a recursive listing.
var ErrorCantListR = errors.New("recursive directory listing not available")
// Func is the type of the function called for directory
// visited by Walk. The path argument contains remote path to the directory.
//
// If there was a problem walking to directory named by path, the
// incoming error will describe the problem and the function can
// decide how to handle that error (and Walk will not descend into
// that directory). If an error is returned, processing stops. The
// sole exception is when the function returns the special value
// ErrorSkipDir. If the function returns ErrorSkipDir, Walk skips the
// directory's contents entirely.
type Func func(path string, entries fs.DirEntries, err error) error
// Walk lists the directory.
//
// If includeAll is not set it will use the filters defined.
//
// If maxLevel is < 0 then it will recurse indefinitely, else it will
// only do maxLevel levels.
//
// It calls fn for each tranche of DirEntries read.
//
// Note that fn will not be called concurrently whereas the directory
// listing will proceed concurrently.
//
// Parent directories are always listed before their children
//
// This is implemented by WalkR if Config.UseRecursiveListing is true
// and f supports it and level > 1, or WalkN otherwise.
//
// NB (f, path) to be replaced by fs.Dir at some point
func Walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
if (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && f.Features().ListR != nil {
return walkListR(f, path, includeAll, maxLevel, fn)
}
return walkListDirSorted(f, path, includeAll, maxLevel, fn)
}
// walkListDirSorted lists the directory.
//
// It implements Walk using non recursive directory listing.
func walkListDirSorted(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
return walk(f, path, includeAll, maxLevel, fn, list.DirSorted)
}
// walkListR lists the directory.
//
// It implements Walk using recursive directory listing if
// available, or returns ErrorCantListR if not.
func walkListR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
listR := f.Features().ListR
if listR == nil {
return ErrorCantListR
}
return walkR(f, path, includeAll, maxLevel, fn, listR)
}
type listDirFunc func(fs fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error)
func walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listDir listDirFunc) error {
var (
wg sync.WaitGroup // sync closing of go routines
traversing sync.WaitGroup // running directory traversals
doClose sync.Once // close the channel once
mu sync.Mutex // stop fn being called concurrently
)
// listJob describe a directory listing that needs to be done
type listJob struct {
remote string
depth int
}
in := make(chan listJob, fs.Config.Checkers)
errs := make(chan error, 1)
quit := make(chan struct{})
closeQuit := func() {
doClose.Do(func() {
close(quit)
go func() {
for range in {
traversing.Done()
}
}()
})
}
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case job, ok := <-in:
if !ok {
return
}
entries, err := listDir(f, includeAll, job.remote)
var jobs []listJob
if err == nil && job.depth != 0 {
entries.ForDir(func(dir fs.Directory) {
// Recurse for the directory
jobs = append(jobs, listJob{
remote: dir.Remote(),
depth: job.depth - 1,
})
})
}
mu.Lock()
err = fn(job.remote, entries, err)
mu.Unlock()
// NB once we have passed entries to fn we mustn't touch it again
if err != nil && err != ErrorSkipDir {
traversing.Done()
fs.CountError(err)
fs.Errorf(job.remote, "error listing: %v", err)
closeQuit()
// Send error to error channel if space
select {
case errs <- err:
default:
}
continue
}
if err == nil && len(jobs) > 0 {
traversing.Add(len(jobs))
go func() {
// Now we have traversed this directory, send these
// jobs off for traversal in the background
for _, newJob := range jobs {
in <- newJob
}
}()
}
traversing.Done()
case <-quit:
return
}
}
}()
}
// Start the process
traversing.Add(1)
in <- listJob{
remote: path,
depth: maxLevel - 1,
}
traversing.Wait()
close(in)
wg.Wait()
close(errs)
// return the first error returned or nil
return <-errs
}
// DirTree is a map of directories to entries
type DirTree map[string]fs.DirEntries
// parentDir finds the parent directory of path
func parentDir(entryPath string) string {
dirPath := path.Dir(entryPath)
if dirPath == "." {
dirPath = ""
}
return dirPath
}
// add an entry to the tree
func (dt DirTree) add(entry fs.DirEntry) {
dirPath := parentDir(entry.Remote())
dt[dirPath] = append(dt[dirPath], entry)
}
// add a directory entry to the tree
func (dt DirTree) addDir(entry fs.DirEntry) {
dt.add(entry)
// create the directory itself if it doesn't exist already
dirPath := entry.Remote()
if _, ok := dt[dirPath]; !ok {
dt[dirPath] = nil
}
}
// Find returns the DirEntry for filePath or nil if not found
func (dt DirTree) Find(filePath string) (parentPath string, entry fs.DirEntry) {
parentPath = parentDir(filePath)
for _, entry := range dt[parentPath] {
if entry.Remote() == filePath {
return parentPath, entry
}
}
return parentPath, nil
}
// check that dirPath has a *Dir in its parent
func (dt DirTree) checkParent(root, dirPath string) {
if dirPath == root {
return
}
parentPath, entry := dt.Find(dirPath)
if entry != nil {
return
}
dt[parentPath] = append(dt[parentPath], fs.NewDir(dirPath, time.Now()))
dt.checkParent(root, parentPath)
}
// check every directory in the tree has *Dir in its parent
func (dt DirTree) checkParents(root string) {
for dirPath := range dt {
dt.checkParent(root, dirPath)
}
}
// Sort sorts all the Entries
func (dt DirTree) Sort() {
for _, entries := range dt {
sort.Stable(entries)
}
}
// Dirs returns the directories in sorted order
func (dt DirTree) Dirs() (dirNames []string) {
for dirPath := range dt {
dirNames = append(dirNames, dirPath)
}
sort.Strings(dirNames)
return dirNames
}
// Prune remove directories from a directory tree. dirNames contains
// all directories to remove as keys, with true as values. dirNames
// will be modified in the function.
func (dt DirTree) Prune(dirNames map[string]bool) error {
// We use map[string]bool to avoid recursion (and potential
// stack exhaustion).
// First we need delete directories from their parents.
for dName, remove := range dirNames {
if !remove {
// Currently all values should be
// true, therefore this should not
// happen. But this makes function
// more predictable.
fs.Infof(dName, "Directory in the map for prune, but the value is false")
continue
}
if dName == "" {
// if dName is root, do nothing (no parent exist)
continue
}
parent := parentDir(dName)
// It may happen that dt does not have a dName key,
// since directory was excluded based on a filter. In
// such case the loop will be skipped.
for i, entry := range dt[parent] {
switch x := entry.(type) {
case fs.Directory:
if x.Remote() == dName {
// the slice is not sorted yet
// to delete item
// a) replace it with the last one
dt[parent][i] = dt[parent][len(dt[parent])-1]
// b) remove last
dt[parent] = dt[parent][:len(dt[parent])-1]
// we modify a slice within a loop, but we stop
// iterating immediately
break
}
case fs.Object:
// do nothing
default:
return errors.Errorf("unknown object type %T", entry)
}
}
}
for len(dirNames) > 0 {
// According to golang specs, if new keys were added
// during range iteration, they may be skipped.
for dName, remove := range dirNames {
if !remove {
fs.Infof(dName, "Directory in the map for prune, but the value is false")
continue
}
// First, add all subdirectories to dirNames.
// It may happen that dt[dName] does not exist.
// If so, the loop will be skipped.
for _, entry := range dt[dName] {
switch x := entry.(type) {
case fs.Directory:
excludeDir := x.Remote()
dirNames[excludeDir] = true
case fs.Object:
// do nothing
default:
return errors.Errorf("unknown object type %T", entry)
}
}
// Then remove current directory from DirTree
delete(dt, dName)
// and from dirNames
delete(dirNames, dName)
}
}
return nil
}
// String emits a simple representation of the DirTree
func (dt DirTree) String() string {
out := new(bytes.Buffer)
for _, dir := range dt.Dirs() {
_, _ = fmt.Fprintf(out, "%s/\n", dir)
for _, entry := range dt[dir] {
flag := ""
if _, ok := entry.(fs.Directory); ok {
flag = "/"
}
_, _ = fmt.Fprintf(out, " %s%s\n", path.Base(entry.Remote()), flag)
}
}
return out.String()
}
func walkRDirTree(f fs.Fs, startPath string, includeAll bool, maxLevel int, listR fs.ListRFn) (DirTree, error) {
dirs := make(DirTree)
// Entries can come in arbitrary order. We use toPrune to keep
// all directories to exclude later.
toPrune := make(map[string]bool)
includeDirectory := filter.Active.IncludeDirectory(f)
var mu sync.Mutex
err := listR(startPath, func(entries fs.DirEntries) error {
mu.Lock()
defer mu.Unlock()
for _, entry := range entries {
slashes := strings.Count(entry.Remote(), "/")
switch x := entry.(type) {
case fs.Object:
// Make sure we don't delete excluded files if not required
if includeAll || filter.Active.IncludeObject(x) {
if maxLevel < 0 || slashes <= maxLevel-1 {
dirs.add(x)
} else {
// Make sure we include any parent directories of excluded objects
dirPath := x.Remote()
for ; slashes > maxLevel-1; slashes-- {
dirPath = parentDir(dirPath)
}
dirs.checkParent(startPath, dirPath)
}
} else {
fs.Debugf(x, "Excluded from sync (and deletion)")
}
// Check if we need to prune a directory later.
if !includeAll && len(filter.Active.Opt.ExcludeFile) > 0 {
basename := path.Base(x.Remote())
if basename == filter.Active.Opt.ExcludeFile {
excludeDir := parentDir(x.Remote())
toPrune[excludeDir] = true
fs.Debugf(basename, "Excluded from sync (and deletion) based on exclude file")
}
}
case fs.Directory:
inc, err := includeDirectory(x.Remote())
if err != nil {
return err
}
if includeAll || inc {
if maxLevel < 0 || slashes <= maxLevel-1 {
if slashes == maxLevel-1 {
// Just add the object if at maxLevel
dirs.add(x)
} else {
dirs.addDir(x)
}
}
} else {
fs.Debugf(x, "Excluded from sync (and deletion)")
}
default:
return errors.Errorf("unknown object type %T", entry)
}
}
return nil
})
if err != nil {
return nil, err
}
dirs.checkParents(startPath)
if len(dirs) == 0 {
dirs[startPath] = nil
}
err = dirs.Prune(toPrune)
if err != nil {
return nil, err
}
dirs.Sort()
return dirs, nil
}
// Create a DirTree using List
func walkNDirTree(f fs.Fs, path string, includeAll bool, maxLevel int, listDir listDirFunc) (DirTree, error) {
dirs := make(DirTree)
fn := func(dirPath string, entries fs.DirEntries, err error) error {
if err == nil {
dirs[dirPath] = entries
}
return err
}
err := walk(f, path, includeAll, maxLevel, fn, listDir)
if err != nil {
return nil, err
}
return dirs, nil
}
// NewDirTree returns a DirTree filled with the directory listing
// using the parameters supplied.
//
// If includeAll is not set it will use the filters defined.
//
// If maxLevel is < 0 then it will recurse indefinitely, else it will
// only do maxLevel levels.
//
// This is implemented by WalkR if Config.UseRecursiveListing is true
// and f supports it and level > 1, or WalkN otherwise.
//
// NB (f, path) to be replaced by fs.Dir at some point
func NewDirTree(f fs.Fs, path string, includeAll bool, maxLevel int) (DirTree, error) {
if ListR := f.Features().ListR; (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && ListR != nil {
return walkRDirTree(f, path, includeAll, maxLevel, ListR)
}
return walkNDirTree(f, path, includeAll, maxLevel, list.DirSorted)
}
func walkR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listR fs.ListRFn) error {
dirs, err := walkRDirTree(f, path, includeAll, maxLevel, listR)
if err != nil {
return err
}
skipping := false
skipPrefix := ""
emptyDir := fs.DirEntries{}
for _, dirPath := range dirs.Dirs() {
if skipping {
// Skip over directories as required
if strings.HasPrefix(dirPath, skipPrefix) {
continue
}
skipping = false
}
entries := dirs[dirPath]
if entries == nil {
entries = emptyDir
}
err = fn(dirPath, entries, nil)
if err == ErrorSkipDir {
skipping = true
skipPrefix = dirPath
if skipPrefix != "" {
skipPrefix += "/"
}
} else if err != nil {
return err
}
}
return nil
}
// GetAll runs Walk getting all the results
func GetAll(f fs.Fs, path string, includeAll bool, maxLevel int) (objs []fs.Object, dirs []fs.Directory, err error) {
err = Walk(f, path, includeAll, maxLevel, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
return err
}
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
objs = append(objs, x)
case fs.Directory:
dirs = append(dirs, x)
}
}
return nil
})
return
}
// ListRHelper is used in the implementation of ListR to accumulate DirEntries
type ListRHelper struct {
callback fs.ListRCallback
entries fs.DirEntries
}
// NewListRHelper should be called from ListR with the callback passed in
func NewListRHelper(callback fs.ListRCallback) *ListRHelper {
return &ListRHelper{
callback: callback,
}
}
// send sends the stored entries to the callback if there are >= max
// entries.
func (lh *ListRHelper) send(max int) (err error) {
if len(lh.entries) >= max {
err = lh.callback(lh.entries)
lh.entries = lh.entries[:0]
}
return err
}
// Add an entry to the stored entries and send them if there are more
// than a certain amount
func (lh *ListRHelper) Add(entry fs.DirEntry) error {
if entry == nil {
return nil
}
lh.entries = append(lh.entries, entry)
return lh.send(100)
}
// Flush the stored entries (if any) sending them to the callback
func (lh *ListRHelper) Flush() error {
return lh.send(1)
}

313
vendor/github.com/ncw/rclone/lib/dircache/dircache.go generated vendored Executable file
View File

@@ -0,0 +1,313 @@
// Package dircache provides a simple cache for caching directory to path lookups
package dircache
// _methods are called without the lock
import (
"log"
"strings"
"sync"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// DirCache caches paths to directory IDs and vice versa
type DirCache struct {
cacheMu sync.RWMutex
cache map[string]string
invCache map[string]string
mu sync.Mutex
fs DirCacher // Interface to find and make stuff
trueRootID string // ID of the absolute root
root string // the path we are working on
rootID string // ID of the root directory
rootParentID string // ID of the root's parent directory
foundRoot bool // Whether we have found the root or not
}
// DirCacher describes an interface for doing the low level directory work
type DirCacher interface {
FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error)
CreateDir(pathID, leaf string) (newID string, err error)
}
// New makes a DirCache
//
// The cache is safe for concurrent use
func New(root string, trueRootID string, fs DirCacher) *DirCache {
d := &DirCache{
trueRootID: trueRootID,
root: root,
fs: fs,
}
d.Flush()
d.ResetRoot()
return d
}
// Get an ID given a path
func (dc *DirCache) Get(path string) (id string, ok bool) {
dc.cacheMu.RLock()
id, ok = dc.cache[path]
dc.cacheMu.RUnlock()
return
}
// GetInv gets a path given an ID
func (dc *DirCache) GetInv(id string) (path string, ok bool) {
dc.cacheMu.RLock()
path, ok = dc.invCache[id]
dc.cacheMu.RUnlock()
return
}
// Put a path, id into the map
func (dc *DirCache) Put(path, id string) {
dc.cacheMu.Lock()
dc.cache[path] = id
dc.invCache[id] = path
dc.cacheMu.Unlock()
}
// Flush the map of all data
func (dc *DirCache) Flush() {
dc.cacheMu.Lock()
dc.cache = make(map[string]string)
dc.invCache = make(map[string]string)
dc.cacheMu.Unlock()
}
// FlushDir flushes the map of all data starting with dir
//
// If dir is empty then this is equivalent to calling ResetRoot
func (dc *DirCache) FlushDir(dir string) {
if dir == "" {
dc.ResetRoot()
return
}
dc.cacheMu.Lock()
// Delete the root dir
ID, ok := dc.cache[dir]
if ok {
delete(dc.cache, dir)
delete(dc.invCache, ID)
}
// And any sub directories
dir += "/"
for key, ID := range dc.cache {
if strings.HasPrefix(key, dir) {
delete(dc.cache, key)
delete(dc.invCache, ID)
}
}
dc.cacheMu.Unlock()
}
// SplitPath splits a path into directory, leaf
//
// Path shouldn't start or end with a /
//
// If there are no slashes then directory will be "" and leaf = path
func SplitPath(path string) (directory, leaf string) {
lastSlash := strings.LastIndex(path, "/")
if lastSlash >= 0 {
directory = path[:lastSlash]
leaf = path[lastSlash+1:]
} else {
directory = ""
leaf = path
}
return
}
// FindDir finds the directory passed in returning the directory ID
// starting from pathID
//
// Path shouldn't start or end with a /
//
// If create is set it will make the directory if not found
//
// Algorithm:
// Look in the cache for the path, if found return the pathID
// If not found strip the last path off the path and recurse
// Now have a parent directory id, so look in the parent for self and return it
func (dc *DirCache) FindDir(path string, create bool) (pathID string, err error) {
dc.mu.Lock()
defer dc.mu.Unlock()
return dc._findDir(path, create)
}
// Look for the root and in the cache - safe to call without the mu
func (dc *DirCache) _findDirInCache(path string) string {
// fmt.Println("Finding",path,"create",create,"cache",cache)
// If it is the root, then return it
if path == "" {
// fmt.Println("Root")
return dc.rootID
}
// If it is in the cache then return it
pathID, ok := dc.Get(path)
if ok {
// fmt.Println("Cache hit on", path)
return pathID
}
return ""
}
// Unlocked findDir - must have mu
func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error) {
pathID = dc._findDirInCache(path)
if pathID != "" {
return pathID, nil
}
// Split the path into directory, leaf
directory, leaf := SplitPath(path)
// Recurse and find pathID for parent directory
parentPathID, err := dc._findDir(directory, create)
if err != nil {
return "", err
}
// Find the leaf in parentPathID
pathID, found, err := dc.fs.FindLeaf(parentPathID, leaf)
if err != nil {
return "", err
}
// If not found create the directory if required or return an error
if !found {
if create {
pathID, err = dc.fs.CreateDir(parentPathID, leaf)
if err != nil {
return "", errors.Wrap(err, "failed to make directory")
}
} else {
return "", fs.ErrorDirNotFound
}
}
// Store the leaf directory in the cache
dc.Put(path, pathID)
// fmt.Println("Dir", path, "is", pathID)
return pathID, nil
}
// FindPath finds the leaf and directoryID from a path
//
// Do not call FindPath with the root directory - it will return an error
//
// If create is set parent directories will be created if they don't exist
func (dc *DirCache) FindPath(path string, create bool) (leaf, directoryID string, err error) {
if path == "" {
err = errors.New("internal error: can't call FindPath with root directory")
return
}
dc.mu.Lock()
defer dc.mu.Unlock()
directory, leaf := SplitPath(path)
directoryID, err = dc._findDir(directory, create)
return
}
// FindRoot finds the root directory if not already found
//
// Resets the root directory
//
// If create is set it will make the directory if not found
func (dc *DirCache) FindRoot(create bool) error {
dc.mu.Lock()
defer dc.mu.Unlock()
if dc.foundRoot {
return nil
}
rootID, err := dc._findDir(dc.root, create)
if err != nil {
return err
}
dc.foundRoot = true
dc.rootID = rootID
// Find the parent of the root while we still have the root
// directory tree cached
rootParentPath, _ := SplitPath(dc.root)
dc.rootParentID, _ = dc.Get(rootParentPath)
// Reset the tree based on dc.root
dc.Flush()
// Put the root directory in
dc.Put("", dc.rootID)
return nil
}
// FindRootAndPath finds the root first if not found then finds leaf and directoryID from a path
//
// If create is set parent directories will be created if they don't exist
func (dc *DirCache) FindRootAndPath(path string, create bool) (leaf, directoryID string, err error) {
err = dc.FindRoot(create)
if err != nil {
return
}
return dc.FindPath(path, create)
}
// FoundRoot returns whether the root directory has been found yet
//
// Call this from FindLeaf or CreateDir only
func (dc *DirCache) FoundRoot() bool {
return dc.foundRoot
}
// RootID returns the ID of the root directory
//
// This should be called after FindRoot
func (dc *DirCache) RootID() string {
dc.mu.Lock()
defer dc.mu.Unlock()
if !dc.foundRoot {
log.Fatalf("Internal Error: RootID() called before FindRoot")
}
return dc.rootID
}
// RootParentID returns the ID of the parent of the root directory
//
// This should be called after FindRoot
func (dc *DirCache) RootParentID() (string, error) {
dc.mu.Lock()
defer dc.mu.Unlock()
if !dc.foundRoot {
return "", errors.New("internal error: RootID() called before FindRoot")
}
if dc.rootParentID == "" {
return "", errors.New("internal error: didn't find rootParentID")
}
if dc.rootID == dc.trueRootID {
return "", errors.New("is root directory")
}
return dc.rootParentID, nil
}
// ResetRoot resets the root directory to the absolute root and clears
// the DirCache
func (dc *DirCache) ResetRoot() {
dc.mu.Lock()
defer dc.mu.Unlock()
dc.foundRoot = false
dc.Flush()
// Put the true root in
dc.rootID = dc.trueRootID
// Put the root directory in
dc.Put("", dc.rootID)
}

545
vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil.go generated vendored Executable file
View File

@@ -0,0 +1,545 @@
package oauthutil
import (
"context"
"crypto/rand"
"encoding/json"
"fmt"
"html/template"
"log"
"net"
"net/http"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/fshttp"
"github.com/pkg/errors"
"github.com/skratchdot/open-golang/open"
"golang.org/x/oauth2"
)
const (
// TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization
// code should be returned in the title bar of the browser, with the page text
// prompting the user to copy the code and paste it in the application.
TitleBarRedirectURL = "urn:ietf:wg:oauth:2.0:oob"
// bindPort is the port that we bind the local webserver to
bindPort = "53682"
// bindAddress is binding for local webserver when active
bindAddress = "127.0.0.1:" + bindPort
// RedirectURL is redirect to local webserver when active
RedirectURL = "http://" + bindAddress + "/"
// RedirectPublicURL is redirect to local webserver when active with public name
RedirectPublicURL = "http://localhost.rclone.org:" + bindPort + "/"
// RedirectLocalhostURL is redirect to local webserver when active with localhost
RedirectLocalhostURL = "http://localhost:" + bindPort + "/"
// AuthResponse is a template to handle the redirect URL for oauth requests
AuthResponse = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{{ if .OK }}Success!{{ else }}Failure!{{ end }}</title>
</head>
<body>
<h1>{{ if .OK }}Success!{{ else }}Failure!{{ end }}</h1>
<hr>
<pre style="width: 750px; white-space: pre-wrap;">
{{ if eq .OK false }}
Error: {{ .AuthError.Name }}<br>
{{ if .AuthError.Description }}Description: {{ .AuthError.Description }}<br>{{ end }}
{{ if .AuthError.Code }}Code: {{ .AuthError.Code }}<br>{{ end }}
{{ if .AuthError.HelpURL }}Look here for help: <a href="{{ .AuthError.HelpURL }}">{{ .AuthError.HelpURL }}</a><br>{{ end }}
{{ else }}
{{ if .Code }}
Please copy this code into rclone:
{{ .Code }}
{{ else }}
All done. Please go back to rclone.
{{ end }}
{{ end }}
</pre>
</body>
</html>
`
)
// oldToken contains an end-user's tokens.
// This is the data you must store to persist authentication.
//
// From the original code.google.com/p/goauth2/oauth package - used
// for backwards compatibility in the rclone config file
type oldToken struct {
AccessToken string
RefreshToken string
Expiry time.Time
}
// GetToken returns the token saved in the config file under
// section name.
func GetToken(name string, m configmap.Mapper) (*oauth2.Token, error) {
tokenString, ok := m.Get(config.ConfigToken)
if !ok || tokenString == "" {
return nil, errors.New("empty token found - please run rclone config again")
}
token := new(oauth2.Token)
err := json.Unmarshal([]byte(tokenString), token)
if err != nil {
return nil, err
}
// if has data then return it
if token.AccessToken != "" {
return token, nil
}
// otherwise try parsing as oldToken
oldtoken := new(oldToken)
err = json.Unmarshal([]byte(tokenString), oldtoken)
if err != nil {
return nil, err
}
// Fill in result into new token
token.AccessToken = oldtoken.AccessToken
token.RefreshToken = oldtoken.RefreshToken
token.Expiry = oldtoken.Expiry
// Save new format in config file
err = PutToken(name, m, token, false)
if err != nil {
return nil, err
}
return token, nil
}
// PutToken stores the token in the config file
//
// This saves the config file if it changes
func PutToken(name string, m configmap.Mapper, token *oauth2.Token, newSection bool) error {
tokenBytes, err := json.Marshal(token)
if err != nil {
return err
}
tokenString := string(tokenBytes)
old, ok := m.Get(config.ConfigToken)
if !ok || tokenString != old {
err = config.SetValueAndSave(name, config.ConfigToken, tokenString)
if newSection && err != nil {
fs.Debugf(name, "Added new token to config, still needs to be saved")
} else if err != nil {
fs.Errorf(nil, "Failed to save new token in config file: %v", err)
} else {
fs.Debugf(name, "Saved new token in config file")
}
}
return nil
}
// TokenSource stores updated tokens in the config file
type TokenSource struct {
mu sync.Mutex
name string
m configmap.Mapper
tokenSource oauth2.TokenSource
token *oauth2.Token
config *oauth2.Config
ctx context.Context
expiryTimer *time.Timer // signals whenever the token expires
}
// Token returns a token or an error.
// Token must be safe for concurrent use by multiple goroutines.
// The returned Token must not be modified.
//
// This saves the token in the config file if it has changed
func (ts *TokenSource) Token() (*oauth2.Token, error) {
ts.mu.Lock()
defer ts.mu.Unlock()
// Make a new token source if required
if ts.tokenSource == nil {
ts.tokenSource = ts.config.TokenSource(ts.ctx, ts.token)
}
token, err := ts.tokenSource.Token()
if err != nil {
return nil, err
}
changed := *token != *ts.token
ts.token = token
if changed {
// Bump on the expiry timer if it is set
if ts.expiryTimer != nil {
ts.expiryTimer.Reset(ts.timeToExpiry())
}
err = PutToken(ts.name, ts.m, token, false)
if err != nil {
return nil, err
}
}
return token, nil
}
// Invalidate invalidates the token
func (ts *TokenSource) Invalidate() {
ts.mu.Lock()
ts.token.AccessToken = ""
ts.mu.Unlock()
}
// timeToExpiry returns how long until the token expires
//
// Call with the lock held
func (ts *TokenSource) timeToExpiry() time.Duration {
t := ts.token
if t == nil {
return 0
}
if t.Expiry.IsZero() {
return 3E9 * time.Second // ~95 years
}
return t.Expiry.Sub(time.Now())
}
// OnExpiry returns a channel which has the time written to it when
// the token expires. Note that there is only one channel so if
// attaching multiple go routines it will only signal to one of them.
func (ts *TokenSource) OnExpiry() <-chan time.Time {
ts.mu.Lock()
defer ts.mu.Unlock()
if ts.expiryTimer == nil {
ts.expiryTimer = time.NewTimer(ts.timeToExpiry())
}
return ts.expiryTimer.C
}
// Check interface satisfied
var _ oauth2.TokenSource = (*TokenSource)(nil)
// Context returns a context with our HTTP Client baked in for oauth2
func Context(client *http.Client) context.Context {
return context.WithValue(context.Background(), oauth2.HTTPClient, client)
}
// overrideCredentials sets the ClientID and ClientSecret from the
// config file if they are not blank.
// If any value is overridden, true is returned.
// the origConfig is copied
func overrideCredentials(name string, m configmap.Mapper, origConfig *oauth2.Config) (newConfig *oauth2.Config, changed bool) {
newConfig = new(oauth2.Config)
*newConfig = *origConfig
changed = false
ClientID, ok := m.Get(config.ConfigClientID)
if ok && ClientID != "" {
newConfig.ClientID = ClientID
changed = true
}
ClientSecret, ok := m.Get(config.ConfigClientSecret)
if ok && ClientSecret != "" {
newConfig.ClientSecret = ClientSecret
changed = true
}
AuthURL, ok := m.Get(config.ConfigAuthURL)
if ok && AuthURL != "" {
newConfig.Endpoint.AuthURL = AuthURL
changed = true
}
TokenURL, ok := m.Get(config.ConfigTokenURL)
if ok && TokenURL != "" {
newConfig.Endpoint.TokenURL = TokenURL
changed = true
}
return newConfig, changed
}
// NewClientWithBaseClient gets a token from the config file and
// configures a Client with it. It returns the client and a
// TokenSource which Invalidate may need to be called on. It uses the
// httpClient passed in as the base client.
func NewClientWithBaseClient(name string, m configmap.Mapper, config *oauth2.Config, baseClient *http.Client) (*http.Client, *TokenSource, error) {
config, _ = overrideCredentials(name, m, config)
token, err := GetToken(name, m)
if err != nil {
return nil, nil, err
}
// Set our own http client in the context
ctx := Context(baseClient)
// Wrap the TokenSource in our TokenSource which saves changed
// tokens in the config file
ts := &TokenSource{
name: name,
m: m,
token: token,
config: config,
ctx: ctx,
}
return oauth2.NewClient(ctx, ts), ts, nil
}
// NewClient gets a token from the config file and configures a Client
// with it. It returns the client and a TokenSource which Invalidate may need to be called on
func NewClient(name string, m configmap.Mapper, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) {
return NewClientWithBaseClient(name, m, oauthConfig, fshttp.NewClient(fs.Config))
}
// Config does the initial creation of the token
//
// It may run an internal webserver to receive the results
func Config(id, name string, m configmap.Mapper, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
return doConfig(id, name, m, nil, config, true, opts)
}
// ConfigNoOffline does the same as Config but does not pass the
// "access_type=offline" parameter.
func ConfigNoOffline(id, name string, m configmap.Mapper, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
return doConfig(id, name, m, nil, config, false, opts)
}
// ConfigErrorCheck does the same as Config, but allows the backend to pass a error handling function
// This function gets called with the request made to rclone as a parameter if no code was found
func ConfigErrorCheck(id, name string, m configmap.Mapper, errorHandler func(*http.Request) AuthError, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
return doConfig(id, name, m, errorHandler, config, true, opts)
}
func doConfig(id, name string, m configmap.Mapper, errorHandler func(*http.Request) AuthError, oauthConfig *oauth2.Config, offline bool, opts []oauth2.AuthCodeOption) error {
oauthConfig, changed := overrideCredentials(name, m, oauthConfig)
auto, ok := m.Get(config.ConfigAutomatic)
automatic := ok && auto != ""
// See if already have a token
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm() {
return nil
}
}
// Detect whether we should use internal web server
useWebServer := false
switch oauthConfig.RedirectURL {
case RedirectURL, RedirectPublicURL, RedirectLocalhostURL:
if changed {
fmt.Printf("Make sure your Redirect URL is set to %q in your custom config.\n", oauthConfig.RedirectURL)
}
useWebServer = true
if automatic {
break
}
fmt.Printf("Use auto config?\n")
fmt.Printf(" * Say Y if not sure\n")
fmt.Printf(" * Say N if you are working on a remote or headless machine\n")
auto := config.Confirm()
if !auto {
fmt.Printf("For this to work, you will need rclone available on a machine that has a web browser available.\n")
fmt.Printf("Execute the following on your machine:\n")
if changed {
fmt.Printf("\trclone authorize %q %q %q\n", id, oauthConfig.ClientID, oauthConfig.ClientSecret)
} else {
fmt.Printf("\trclone authorize %q\n", id)
}
fmt.Println("Then paste the result below:")
code := ""
for code == "" {
fmt.Printf("result> ")
code = strings.TrimSpace(config.ReadLine())
}
token := &oauth2.Token{}
err := json.Unmarshal([]byte(code), token)
if err != nil {
return err
}
return PutToken(name, m, token, false)
}
case TitleBarRedirectURL:
useWebServer = automatic
if !automatic {
fmt.Printf("Use auto config?\n")
fmt.Printf(" * Say Y if not sure\n")
fmt.Printf(" * Say N if you are working on a remote or headless machine or Y didn't work\n")
useWebServer = config.Confirm()
}
if useWebServer {
// copy the config and set to use the internal webserver
configCopy := *oauthConfig
oauthConfig = &configCopy
oauthConfig.RedirectURL = RedirectURL
}
}
// Make random state
stateBytes := make([]byte, 16)
_, err := rand.Read(stateBytes)
if err != nil {
return err
}
state := fmt.Sprintf("%x", stateBytes)
if offline {
opts = append(opts, oauth2.AccessTypeOffline)
}
authURL := oauthConfig.AuthCodeURL(state, opts...)
// Prepare webserver
server := authServer{
state: state,
bindAddress: bindAddress,
authURL: authURL,
errorHandler: errorHandler,
}
if useWebServer {
server.code = make(chan string, 1)
server.err = make(chan error, 1)
go server.Start()
defer server.Stop()
authURL = "http://" + bindAddress + "/auth"
}
// Generate a URL for the user to visit for authorization.
_ = open.Start(authURL)
fmt.Printf("If your browser doesn't open automatically go to the following link: %s\n", authURL)
fmt.Printf("Log in and authorize rclone for access\n")
var authCode string
if useWebServer {
// Read the code, and exchange it for a token.
fmt.Printf("Waiting for code...\n")
authCode = <-server.code
authError := <-server.err
if authCode != "" {
fmt.Printf("Got code\n")
} else {
if authError != nil {
return authError
}
return errors.New("failed to get code")
}
} else {
// Read the code, and exchange it for a token.
fmt.Printf("Enter verification code> ")
authCode = config.ReadLine()
}
token, err := oauthConfig.Exchange(oauth2.NoContext, authCode)
if err != nil {
return errors.Wrap(err, "failed to get token")
}
// Print code if we do automatic retrieval
if automatic {
result, err := json.Marshal(token)
if err != nil {
return errors.Wrap(err, "failed to marshal token")
}
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste", result)
}
return PutToken(name, m, token, true)
}
// Local web server for collecting auth
type authServer struct {
state string
listener net.Listener
bindAddress string
code chan string
err chan error
authURL string
server *http.Server
errorHandler func(*http.Request) AuthError
}
// AuthError gets returned by the backend's errorHandler function
type AuthError struct {
Name string
Description string
Code string
HelpURL string
}
// AuthResponseData can fill the AuthResponse template
type AuthResponseData struct {
OK bool // Failure or Success?
Code string // code to paste into rclone config
AuthError
}
// startWebServer runs an internal web server to receive config details
func (s *authServer) Start() {
fs.Debugf(nil, "Starting auth server on %s", s.bindAddress)
mux := http.NewServeMux()
s.server = &http.Server{
Addr: s.bindAddress,
Handler: mux,
}
s.server.SetKeepAlivesEnabled(false)
mux.HandleFunc("/favicon.ico", func(w http.ResponseWriter, req *http.Request) {
http.Error(w, "", 404)
return
})
mux.HandleFunc("/auth", func(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, s.authURL, http.StatusTemporaryRedirect)
return
})
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "text/html")
fs.Debugf(nil, "Received request on auth server")
code := req.FormValue("code")
var err error
var t = template.Must(template.New("authResponse").Parse(AuthResponse))
resp := AuthResponseData{AuthError: AuthError{}}
if code != "" {
state := req.FormValue("state")
if state != s.state {
fs.Debugf(nil, "State did not match: want %q got %q", s.state, state)
resp.OK = false
resp.AuthError = AuthError{
Name: "Auth State doesn't match",
}
} else {
fs.Debugf(nil, "Successfully got code")
resp.OK = true
if s.code == nil {
resp.Code = code
}
}
} else {
fs.Debugf(nil, "No code found on request")
var authError AuthError
if s.errorHandler == nil {
authError = AuthError{
Name: "Auth Error",
Description: "No code found returned by remote server.",
}
} else {
authError = s.errorHandler(req)
}
err = fmt.Errorf("Error: %s\nCode: %s\nDescription: %s\nHelp: %s",
authError.Name, authError.Code, authError.Description, authError.HelpURL)
resp.OK = false
resp.AuthError = authError
w.WriteHeader(500)
}
if err := t.Execute(w, resp); err != nil {
fs.Debugf(nil, "Could not execute template for web response.")
}
if s.code != nil {
s.code <- code
s.err <- err
}
})
var err error
s.listener, err = net.Listen("tcp", s.bindAddress)
if err != nil {
log.Fatalf("Failed to start auth webserver: %v", err)
}
err = s.server.Serve(s.listener)
fs.Debugf(nil, "Closed auth server with error: %v", err)
}

19
vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil_new.go generated vendored Executable file
View File

@@ -0,0 +1,19 @@
// oauthutil parts go1.8+
//+build go1.8
package oauthutil
import "github.com/ncw/rclone/fs"
func (s *authServer) Stop() {
fs.Debugf(nil, "Closing auth server")
if s.code != nil {
close(s.code)
s.code = nil
}
_ = s.listener.Close()
// close the server
_ = s.server.Close()
}

16
vendor/github.com/ncw/rclone/lib/oauthutil/oauthutil_old.go generated vendored Executable file
View File

@@ -0,0 +1,16 @@
// oauthutil parts pre go1.8+
//+build !go1.8
package oauthutil
import "github.com/ncw/rclone/fs"
func (s *authServer) Stop() {
fs.Debugf(nil, "Closing auth server")
if s.code != nil {
close(s.code)
s.code = nil
}
_ = s.listener.Close()
}

69
vendor/github.com/ncw/rclone/lib/oauthutil/renew.go generated vendored Executable file
View File

@@ -0,0 +1,69 @@
package oauthutil
import (
"sync/atomic"
"github.com/ncw/rclone/fs"
)
// Renew allows tokens to be renewed on expiry if uploads are in progress.
type Renew struct {
name string // name to use in logs
ts *TokenSource // token source that needs renewing
uploads int32 // number of uploads in progress - atomic access required
run func() error // a transaction to run to renew the token on
}
// NewRenew creates a new Renew struct and starts a background process
// which renews the token whenever it expires. It uses the run() call
// to run a transaction to do this.
//
// It will only renew the token if the number of uploads > 0
func NewRenew(name string, ts *TokenSource, run func() error) *Renew {
r := &Renew{
name: name,
ts: ts,
run: run,
}
go r.renewOnExpiry()
return r
}
// renewOnExpiry renews the token whenever it expires. Useful when there
// are lots of uploads in progress and the token doesn't get renewed.
// Amazon seem to cancel your uploads if you don't renew your token
// for 2hrs.
func (r *Renew) renewOnExpiry() {
expiry := r.ts.OnExpiry()
for {
<-expiry
uploads := atomic.LoadInt32(&r.uploads)
if uploads != 0 {
fs.Debugf(r.name, "Token expired - %d uploads in progress - refreshing", uploads)
// Do a transaction
err := r.run()
if err == nil {
fs.Debugf(r.name, "Token refresh successful")
} else {
fs.Errorf(r.name, "Token refresh failed: %v", err)
}
} else {
fs.Debugf(r.name, "Token expired but no uploads in progress - doing nothing")
}
}
}
// Start should be called before starting an upload
func (r *Renew) Start() {
atomic.AddInt32(&r.uploads, 1)
}
// Stop should be called after finishing an upload
func (r *Renew) Stop() {
atomic.AddInt32(&r.uploads, -1)
}
// Invalidate invalidates the token source
func (r *Renew) Invalidate() {
r.ts.Invalidate()
}

368
vendor/github.com/ncw/rclone/lib/pacer/pacer.go generated vendored Executable file
View File

@@ -0,0 +1,368 @@
// Package pacer makes pacing and retrying API calls easy
package pacer
import (
"math/rand"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
)
// Pacer state
type Pacer struct {
mu sync.Mutex // Protecting read/writes
minSleep time.Duration // minimum sleep time
maxSleep time.Duration // maximum sleep time
decayConstant uint // decay constant
attackConstant uint // attack constant
pacer chan struct{} // To pace the operations
sleepTime time.Duration // Time to sleep for each transaction
retries int // Max number of retries
maxConnections int // Maximum number of concurrent connections
connTokens chan struct{} // Connection tokens
calculatePace func(bool) // switchable pacing algorithm - call with mu held
consecutiveRetries int // number of consecutive retries
}
// Type is for selecting different pacing algorithms
type Type int
const (
// DefaultPacer is a truncated exponential attack and decay.
//
// On retries the sleep time is doubled, on non errors then
// sleeptime decays according to the decay constant as set
// with SetDecayConstant.
//
// The sleep never goes below that set with SetMinSleep or
// above that set with SetMaxSleep.
DefaultPacer = Type(iota)
// AmazonCloudDrivePacer is a specialised pacer for Amazon Drive
//
// It implements a truncated exponential backoff strategy with
// randomization. Normally operations are paced at the
// interval set with SetMinSleep. On errors the sleep timer
// is set to 0..2**retries seconds.
//
// See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices
AmazonCloudDrivePacer
// GoogleDrivePacer is a specialised pacer for Google Drive
//
// It implements a truncated exponential backoff strategy with
// randomization. Normally operations are paced at the
// interval set with SetMinSleep. On errors the sleep timer
// is set to (2 ^ n) + random_number_milliseconds seconds
//
// See https://developers.google.com/drive/v2/web/handle-errors#exponential-backoff
GoogleDrivePacer
)
// Paced is a function which is called by the Call and CallNoRetry
// methods. It should return a boolean, true if it would like to be
// retried, and an error. This error may be returned or returned
// wrapped in a RetryError.
type Paced func() (bool, error)
// New returns a Pacer with sensible defaults
func New() *Pacer {
p := &Pacer{
minSleep: 10 * time.Millisecond,
maxSleep: 2 * time.Second,
decayConstant: 2,
attackConstant: 1,
retries: fs.Config.LowLevelRetries,
pacer: make(chan struct{}, 1),
}
p.sleepTime = p.minSleep
p.SetPacer(DefaultPacer)
p.SetMaxConnections(fs.Config.Checkers + fs.Config.Transfers)
// Put the first pacing token in
p.pacer <- struct{}{}
return p
}
// SetSleep sets the current sleep time
func (p *Pacer) SetSleep(t time.Duration) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.sleepTime = t
return p
}
// GetSleep gets the current sleep time
func (p *Pacer) GetSleep() time.Duration {
p.mu.Lock()
defer p.mu.Unlock()
return p.sleepTime
}
// SetMinSleep sets the minimum sleep time for the pacer
func (p *Pacer) SetMinSleep(t time.Duration) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.minSleep = t
p.sleepTime = p.minSleep
return p
}
// SetMaxSleep sets the maximum sleep time for the pacer
func (p *Pacer) SetMaxSleep(t time.Duration) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.maxSleep = t
p.sleepTime = p.minSleep
return p
}
// SetMaxConnections sets the maximum number of concurrent connections.
// Setting the value to 0 will allow unlimited number of connections.
// Should not be changed once you have started calling the pacer.
// By default this will be set to fs.Config.Checkers.
func (p *Pacer) SetMaxConnections(n int) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.maxConnections = n
if n <= 0 {
p.connTokens = nil
} else {
p.connTokens = make(chan struct{}, n)
for i := 0; i < n; i++ {
p.connTokens <- struct{}{}
}
}
return p
}
// SetDecayConstant sets the decay constant for the pacer
//
// This is the speed the time falls back to the minimum after errors
// have occurred.
//
// bigger for slower decay, exponential. 1 is halve, 0 is go straight to minimum
func (p *Pacer) SetDecayConstant(decay uint) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.decayConstant = decay
return p
}
// SetAttackConstant sets the attack constant for the pacer
//
// This is the speed the time grows from the minimum after errors have
// occurred.
//
// bigger for slower attack, 1 is double, 0 is go straight to maximum
func (p *Pacer) SetAttackConstant(attack uint) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.attackConstant = attack
return p
}
// SetRetries sets the max number of tries for Call
func (p *Pacer) SetRetries(retries int) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
p.retries = retries
return p
}
// SetPacer sets the pacing algorithm
//
// It will choose the default algorithm if an incorrect value is
// passed in.
func (p *Pacer) SetPacer(t Type) *Pacer {
p.mu.Lock()
defer p.mu.Unlock()
switch t {
case AmazonCloudDrivePacer:
p.calculatePace = p.acdPacer
case GoogleDrivePacer:
p.calculatePace = p.drivePacer
default:
p.calculatePace = p.defaultPacer
}
return p
}
// Start a call to the API
//
// This must be called as a pair with endCall
//
// This waits for the pacer token
func (p *Pacer) beginCall() {
// pacer starts with a token in and whenever we take one out
// XXX ms later we put another in. We could do this with a
// Ticker more accurately, but then we'd have to work out how
// not to run it when it wasn't needed
<-p.pacer
if p.maxConnections > 0 {
<-p.connTokens
}
p.mu.Lock()
// Restart the timer
go func(t time.Duration) {
// fs.Debugf(f, "New sleep for %v at %v", t, time.Now())
time.Sleep(t)
p.pacer <- struct{}{}
}(p.sleepTime)
p.mu.Unlock()
}
// exponentialImplementation implements a exponentialImplementation up
// and down pacing algorithm
//
// See the description for DefaultPacer
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
//
// Call with p.mu held
func (p *Pacer) defaultPacer(retry bool) {
oldSleepTime := p.sleepTime
if retry {
if p.attackConstant == 0 {
p.sleepTime = p.maxSleep
} else {
p.sleepTime = (p.sleepTime << p.attackConstant) / ((1 << p.attackConstant) - 1)
}
if p.sleepTime > p.maxSleep {
p.sleepTime = p.maxSleep
}
if p.sleepTime != oldSleepTime {
fs.Debugf("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
}
} else {
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
if p.sleepTime < p.minSleep {
p.sleepTime = p.minSleep
}
if p.sleepTime != oldSleepTime {
fs.Debugf("pacer", "Reducing sleep to %v", p.sleepTime)
}
}
}
// acdPacer implements a truncated exponential backoff
// strategy with randomization for Amazon Drive
//
// See the description for AmazonCloudDrivePacer
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
//
// Call with p.mu held
func (p *Pacer) acdPacer(retry bool) {
consecutiveRetries := p.consecutiveRetries
if consecutiveRetries == 0 {
if p.sleepTime != p.minSleep {
p.sleepTime = p.minSleep
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
}
} else {
if consecutiveRetries > 9 {
consecutiveRetries = 9
}
// consecutiveRetries starts at 1 so
// maxSleep is 2**(consecutiveRetries-1) seconds
maxSleep := time.Second << uint(consecutiveRetries-1)
// actual sleep is random from 0..maxSleep
p.sleepTime = time.Duration(rand.Int63n(int64(maxSleep)))
if p.sleepTime < p.minSleep {
p.sleepTime = p.minSleep
}
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
}
}
// drivePacer implements a truncated exponential backoff strategy with
// randomization for Google Drive
//
// See the description for GoogleDrivePacer
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
//
// Call with p.mu held
func (p *Pacer) drivePacer(retry bool) {
consecutiveRetries := p.consecutiveRetries
if consecutiveRetries == 0 {
if p.sleepTime != p.minSleep {
p.sleepTime = p.minSleep
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
}
} else {
if consecutiveRetries > 5 {
consecutiveRetries = 5
}
// consecutiveRetries starts at 1 so go from 1,2,3,4,5,5 => 1,2,4,8,16,16
// maxSleep is 2**(consecutiveRetries-1) seconds + random milliseconds
p.sleepTime = time.Second<<uint(consecutiveRetries-1) + time.Duration(rand.Int63n(int64(time.Second)))
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
}
}
// endCall implements the pacing algorithm
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
func (p *Pacer) endCall(retry bool) {
if p.maxConnections > 0 {
p.connTokens <- struct{}{}
}
p.mu.Lock()
if retry {
p.consecutiveRetries++
} else {
p.consecutiveRetries = 0
}
p.calculatePace(retry)
p.mu.Unlock()
}
// call implements Call but with settable retries
func (p *Pacer) call(fn Paced, retries int) (err error) {
var retry bool
for i := 1; i <= retries; i++ {
p.beginCall()
retry, err = fn()
p.endCall(retry)
if !retry {
break
}
fs.Debugf("pacer", "low level retry %d/%d (error %v)", i, retries, err)
}
if retry {
err = fserrors.RetryError(err)
}
return err
}
// Call paces the remote operations to not exceed the limits and retry
// on rate limit exceeded
//
// This calls fn, expecting it to return a retry flag and an
// error. This error may be returned wrapped in a RetryError if the
// number of retries is exceeded.
func (p *Pacer) Call(fn Paced) (err error) {
p.mu.Lock()
retries := p.retries
p.mu.Unlock()
return p.call(fn, retries)
}
// CallNoRetry paces the remote operations to not exceed the limits
// and return a retry error on rate limit exceeded
//
// This calls fn and wraps the output in a RetryError if it would like
// it to be retried
func (p *Pacer) CallNoRetry(fn Paced) error {
return p.call(fn, 1)
}

31
vendor/github.com/ncw/rclone/lib/pacer/tokens.go generated vendored Executable file
View File

@@ -0,0 +1,31 @@
// Tokens for controlling concurrency
package pacer
// TokenDispenser is for controlling concurrency
type TokenDispenser struct {
tokens chan struct{}
}
// NewTokenDispenser makes a pool of n tokens
func NewTokenDispenser(n int) *TokenDispenser {
td := &TokenDispenser{
tokens: make(chan struct{}, n),
}
// Fill up the upload tokens
for i := 0; i < n; i++ {
td.tokens <- struct{}{}
}
return td
}
// Get gets a token from the pool - don't forget to return it with Put
func (td *TokenDispenser) Get() {
<-td.tokens
return
}
// Put returns a token
func (td *TokenDispenser) Put() {
td.tokens <- struct{}{}
}

28
vendor/github.com/ncw/rclone/lib/readers/counting_reader.go generated vendored Executable file
View File

@@ -0,0 +1,28 @@
package readers
import "io"
// NewCountingReader returns a CountingReader, which will read from the given
// reader while keeping track of how many bytes were read.
func NewCountingReader(in io.Reader) *CountingReader {
return &CountingReader{in: in}
}
// CountingReader holds a reader and a read count of how many bytes were read
// so far.
type CountingReader struct {
in io.Reader
read uint64
}
// Read reads from the underlying reader.
func (cr *CountingReader) Read(b []byte) (int, error) {
n, err := cr.in.Read(b)
cr.read += uint64(n)
return n, err
}
// BytesRead returns how many bytes were read from the underlying reader so far.
func (cr *CountingReader) BytesRead() uint64 {
return cr.read
}

22
vendor/github.com/ncw/rclone/lib/readers/limited.go generated vendored Executable file
View File

@@ -0,0 +1,22 @@
package readers
import "io"
// LimitedReadCloser adds io.Closer to io.LimitedReader. Create one with NewLimitedReadCloser
type LimitedReadCloser struct {
*io.LimitedReader
io.Closer
}
// NewLimitedReadCloser returns a LimitedReadCloser wrapping rc to
// limit it to reading limit bytes. If limit < 0 then it does not
// wrap rc, it just returns it.
func NewLimitedReadCloser(rc io.ReadCloser, limit int64) (lrc io.ReadCloser) {
if limit < 0 {
return rc
}
return &LimitedReadCloser{
LimitedReader: &io.LimitedReader{R: rc, N: limit},
Closer: rc,
}
}

18
vendor/github.com/ncw/rclone/lib/readers/readfill.go generated vendored Executable file
View File

@@ -0,0 +1,18 @@
package readers
import "io"
// ReadFill reads as much data from r into buf as it can
//
// It reads until the buffer is full or r.Read returned an error.
//
// This is io.ReadFull but when you just want as much data as
// possible, not an exact size of block.
func ReadFill(r io.Reader, buf []byte) (n int, err error) {
var nn int
for n < len(buf) && err == nil {
nn, err = r.Read(buf[n:])
n += nn
}
return n, err
}

96
vendor/github.com/ncw/rclone/lib/readers/repeatable.go generated vendored Executable file
View File

@@ -0,0 +1,96 @@
package readers
import (
"io"
"github.com/pkg/errors"
)
// A RepeatableReader implements the io.ReadSeeker it allow to seek cached data
// back and forth within the reader but will only read data from the internal Reader as necessary
// and will play nicely with the Account and io.LimitedReader to reflect current speed
type RepeatableReader struct {
in io.Reader // Input reader
i int64 // current reading index
b []byte // internal cache buffer
}
var _ io.ReadSeeker = (*RepeatableReader)(nil)
// Seek implements the io.Seeker interface.
// If seek position is passed the cache buffer length the function will return
// the maximum offset that can be used and "fs.RepeatableReader.Seek: offset is unavailable" Error
func (r *RepeatableReader) Seek(offset int64, whence int) (int64, error) {
var abs int64
cacheLen := int64(len(r.b))
switch whence {
case io.SeekStart:
abs = offset
case io.SeekCurrent:
abs = r.i + offset
case io.SeekEnd:
abs = cacheLen + offset
default:
return 0, errors.New("fs.RepeatableReader.Seek: invalid whence")
}
if abs < 0 {
return 0, errors.New("fs.RepeatableReader.Seek: negative position")
}
if abs > cacheLen {
return offset - (abs - cacheLen), errors.New("fs.RepeatableReader.Seek: offset is unavailable")
}
r.i = abs
return abs, nil
}
// Read data from original Reader into bytes
// Data is either served from the underlying Reader or from cache if was already read
func (r *RepeatableReader) Read(b []byte) (n int, err error) {
cacheLen := int64(len(r.b))
if r.i == cacheLen {
n, err = r.in.Read(b)
if n > 0 {
r.b = append(r.b, b[:n]...)
}
} else {
n = copy(b, r.b[r.i:])
}
r.i += int64(n)
return n, err
}
// NewRepeatableReader create new repeatable reader from Reader r
func NewRepeatableReader(r io.Reader) *RepeatableReader {
return &RepeatableReader{in: r}
}
// NewRepeatableReaderSized create new repeatable reader from Reader r
// with an initial buffer of size.
func NewRepeatableReaderSized(r io.Reader, size int) *RepeatableReader {
return &RepeatableReader{
in: r,
b: make([]byte, 0, size),
}
}
// NewRepeatableLimitReader create new repeatable reader from Reader r
// with an initial buffer of size wrapped in a io.LimitReader to read
// only size.
func NewRepeatableLimitReader(r io.Reader, size int) *RepeatableReader {
return NewRepeatableReaderSized(io.LimitReader(r, int64(size)), size)
}
// NewRepeatableReaderBuffer create new repeatable reader from Reader r
// using the buffer passed in.
func NewRepeatableReaderBuffer(r io.Reader, buf []byte) *RepeatableReader {
return &RepeatableReader{
in: r,
b: buf[:0],
}
}
// NewRepeatableLimitReaderBuffer create new repeatable reader from
// Reader r and buf wrapped in a io.LimitReader to read only size.
func NewRepeatableLimitReaderBuffer(r io.Reader, buf []byte, size int64) *RepeatableReader {
return NewRepeatableReaderBuffer(io.LimitReader(r, int64(size)), buf)
}

421
vendor/github.com/ncw/rclone/lib/rest/rest.go generated vendored Executable file
View File

@@ -0,0 +1,421 @@
// Package rest implements a simple REST wrapper
//
// All methods are safe for concurrent calling.
package rest
import (
"bytes"
"encoding/json"
"encoding/xml"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"sync"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// Client contains the info to sustain the API
type Client struct {
mu sync.RWMutex
c *http.Client
rootURL string
errorHandler func(resp *http.Response) error
headers map[string]string
signer SignerFn
}
// NewClient takes an oauth http.Client and makes a new api instance
func NewClient(c *http.Client) *Client {
api := &Client{
c: c,
errorHandler: defaultErrorHandler,
headers: make(map[string]string),
}
return api
}
// ReadBody reads resp.Body into result, closing the body
func ReadBody(resp *http.Response) (result []byte, err error) {
defer fs.CheckClose(resp.Body, &err)
return ioutil.ReadAll(resp.Body)
}
// defaultErrorHandler doesn't attempt to parse the http body, just
// returns it in the error message
func defaultErrorHandler(resp *http.Response) (err error) {
body, err := ReadBody(resp)
if err != nil {
return errors.Wrap(err, "error reading error out of body")
}
return errors.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
}
// SetErrorHandler sets the handler to decode an error response when
// the HTTP status code is not 2xx. The handler should close resp.Body.
func (api *Client) SetErrorHandler(fn func(resp *http.Response) error) *Client {
api.mu.Lock()
defer api.mu.Unlock()
api.errorHandler = fn
return api
}
// SetRoot sets the default RootURL. You can override this on a per
// call basis using the RootURL field in Opts.
func (api *Client) SetRoot(RootURL string) *Client {
api.mu.Lock()
defer api.mu.Unlock()
api.rootURL = RootURL
return api
}
// SetHeader sets a header for all requests
func (api *Client) SetHeader(key, value string) *Client {
api.mu.Lock()
defer api.mu.Unlock()
api.headers[key] = value
return api
}
// RemoveHeader unsets a header for all requests
func (api *Client) RemoveHeader(key string) *Client {
api.mu.Lock()
defer api.mu.Unlock()
delete(api.headers, key)
return api
}
// SignerFn is used to sign an outgoing request
type SignerFn func(*http.Request) error
// SetSigner sets a signer for all requests
func (api *Client) SetSigner(signer SignerFn) *Client {
api.mu.Lock()
defer api.mu.Unlock()
api.signer = signer
return api
}
// SetUserPass creates an Authorization header for all requests with
// the UserName and Password passed in
func (api *Client) SetUserPass(UserName, Password string) *Client {
req, _ := http.NewRequest("GET", "http://example.com", nil)
req.SetBasicAuth(UserName, Password)
api.SetHeader("Authorization", req.Header.Get("Authorization"))
return api
}
// SetCookie creates an Cookies Header for all requests with the supplied
// cookies passed in.
// All cookies have to be supplied at once, all cookies will be overwritten
// on a new call to the method
func (api *Client) SetCookie(cks ...*http.Cookie) *Client {
req, _ := http.NewRequest("GET", "http://example.com", nil)
for _, ck := range cks {
req.AddCookie(ck)
}
api.SetHeader("Cookie", req.Header.Get("Cookie"))
return api
}
// Opts contains parameters for Call, CallJSON etc
type Opts struct {
Method string // GET, POST etc
Path string // relative to RootURL
RootURL string // override RootURL passed into SetRoot()
Body io.Reader
NoResponse bool // set to close Body
ContentType string
ContentLength *int64
ContentRange string
ExtraHeaders map[string]string
UserName string // username for Basic Auth
Password string // password for Basic Auth
Options []fs.OpenOption
IgnoreStatus bool // if set then we don't check error status or parse error body
MultipartParams url.Values // if set do multipart form upload with attached file
MultipartMetadataName string // ..this is used for the name of the metadata form part if set
MultipartContentName string // ..name of the parameter which is the attached file
MultipartFileName string // ..name of the file for the attached file
Parameters url.Values // any parameters for the final URL
TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding
Close bool // set to close the connection after this transaction
NoRedirect bool // if this is set then the client won't follow redirects
}
// Copy creates a copy of the options
func (o *Opts) Copy() *Opts {
newOpts := *o
return &newOpts
}
// DecodeJSON decodes resp.Body into result
func DecodeJSON(resp *http.Response, result interface{}) (err error) {
defer fs.CheckClose(resp.Body, &err)
decoder := json.NewDecoder(resp.Body)
return decoder.Decode(result)
}
// DecodeXML decodes resp.Body into result
func DecodeXML(resp *http.Response, result interface{}) (err error) {
defer fs.CheckClose(resp.Body, &err)
decoder := xml.NewDecoder(resp.Body)
return decoder.Decode(result)
}
// ClientWithHeaderReset makes a new http client which resets the
// headers passed in on redirect
//
// FIXME This is now unecessary with go1.8
func ClientWithHeaderReset(c *http.Client, headers map[string]string) *http.Client {
if len(headers) == 0 {
return c
}
clientCopy := *c
clientCopy.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
}
// Reset the headers in the new request
for k, v := range headers {
if v != "" {
req.Header.Set(k, v)
}
}
return nil
}
return &clientCopy
}
// ClientWithNoRedirects makes a new http client which won't follow redirects
func ClientWithNoRedirects(c *http.Client) *http.Client {
clientCopy := *c
clientCopy.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
return &clientCopy
}
// Call makes the call and returns the http.Response
//
// if err != nil then resp.Body will need to be closed
//
// it will return resp if at all possible, even if err is set
func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
api.mu.RLock()
defer api.mu.RUnlock()
if opts == nil {
return nil, errors.New("call() called with nil opts")
}
url := api.rootURL
if opts.RootURL != "" {
url = opts.RootURL
}
if url == "" {
return nil, errors.New("RootURL not set")
}
url += opts.Path
if opts.Parameters != nil && len(opts.Parameters) > 0 {
url += "?" + opts.Parameters.Encode()
}
req, err := http.NewRequest(opts.Method, url, opts.Body)
if err != nil {
return
}
headers := make(map[string]string)
// Set default headers
for k, v := range api.headers {
headers[k] = v
}
if opts.ContentType != "" {
headers["Content-Type"] = opts.ContentType
}
if opts.ContentLength != nil {
req.ContentLength = *opts.ContentLength
}
if opts.ContentRange != "" {
headers["Content-Range"] = opts.ContentRange
}
if len(opts.TransferEncoding) != 0 {
req.TransferEncoding = opts.TransferEncoding
}
if opts.Close {
req.Close = true
}
// Set any extra headers
if opts.ExtraHeaders != nil {
for k, v := range opts.ExtraHeaders {
headers[k] = v
}
}
// add any options to the headers
fs.OpenOptionAddHeaders(opts.Options, headers)
// Now set the headers
for k, v := range headers {
if v != "" {
req.Header.Add(k, v)
}
}
if opts.UserName != "" || opts.Password != "" {
req.SetBasicAuth(opts.UserName, opts.Password)
}
var c *http.Client
if opts.NoRedirect {
c = ClientWithNoRedirects(api.c)
} else {
c = ClientWithHeaderReset(api.c, headers)
}
if api.signer != nil {
err = api.signer(req)
if err != nil {
return nil, errors.Wrap(err, "signer failed")
}
}
api.mu.RUnlock()
resp, err = c.Do(req)
api.mu.RLock()
if err != nil {
return nil, err
}
if !opts.IgnoreStatus {
if resp.StatusCode < 200 || resp.StatusCode > 299 {
err = api.errorHandler(resp)
if err.Error() == "" {
// replace empty errors with something
err = errors.Errorf("http error %d: %v", resp.StatusCode, resp.Status)
}
return resp, err
}
}
if opts.NoResponse {
return resp, resp.Body.Close()
}
return resp, nil
}
// MultipartUpload creates an io.Reader which produces an encoded a
// multipart form upload from the params passed in and the passed in
//
// in - the body of the file
// params - the form parameters
// fileName - is the name of the attached file
// contentName - the name of the parameter for the file
//
// NB This doesn't allow setting the content type of the attachment
func MultipartUpload(in io.Reader, params url.Values, contentName, fileName string) (io.ReadCloser, string, error) {
bodyReader, bodyWriter := io.Pipe()
writer := multipart.NewWriter(bodyWriter)
contentType := writer.FormDataContentType()
// Pump the data in the background
go func() {
var err error
for key, vals := range params {
for _, val := range vals {
err = writer.WriteField(key, val)
if err != nil {
_ = bodyWriter.CloseWithError(errors.Wrap(err, "create metadata part"))
return
}
}
}
part, err := writer.CreateFormFile(contentName, fileName)
if err != nil {
_ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to create form file"))
return
}
_, err = io.Copy(part, in)
if err != nil {
_ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to copy data"))
return
}
err = writer.Close()
if err != nil {
_ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to close form"))
return
}
_ = bodyWriter.Close()
}()
return bodyReader, contentType, nil
}
// CallJSON runs Call and decodes the body as a JSON object into response (if not nil)
//
// If request is not nil then it will be JSON encoded as the body of the request
//
// If (opts.MultipartParams or opts.MultipartContentName) and
// opts.Body are set then CallJSON will do a multipart upload with a
// file attached. opts.MultipartContentName is the name of the
// parameter and opts.MultipartFileName is the name of the file. If
// MultpartContentName is set, and request != nil is supplied, then
// the request will be marshalled into JSON and added to the form with
// parameter name MultipartMetadataName.
//
// It will return resp if at all possible, even if err is set
func (api *Client) CallJSON(opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
return api.callCodec(opts, request, response, json.Marshal, DecodeJSON, "application/json")
}
// CallXML runs Call and decodes the body as a XML object into response (if not nil)
//
// If request is not nil then it will be XML encoded as the body of the request
//
// See CallJSON for a description of MultipartParams and related opts
//
// It will return resp if at all possible, even if err is set
func (api *Client) CallXML(opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
return api.callCodec(opts, request, response, xml.Marshal, DecodeXML, "application/xml")
}
type marshalFn func(v interface{}) ([]byte, error)
type decodeFn func(resp *http.Response, result interface{}) (err error)
func (api *Client) callCodec(opts *Opts, request interface{}, response interface{}, marshal marshalFn, decode decodeFn, contentType string) (resp *http.Response, err error) {
var requestBody []byte
// Marshal the request if given
if request != nil {
requestBody, err = marshal(request)
if err != nil {
return nil, err
}
// Set the body up as a marshalled object if no body passed in
if opts.Body == nil {
opts = opts.Copy()
opts.ContentType = contentType
opts.Body = bytes.NewBuffer(requestBody)
}
}
isMultipart := (opts.MultipartParams != nil || opts.MultipartMetadataName != "") && opts.Body != nil
if isMultipart {
params := opts.MultipartParams
if params == nil {
params = url.Values{}
}
if opts.MultipartMetadataName != "" {
params.Add(opts.MultipartMetadataName, string(requestBody))
}
opts = opts.Copy()
opts.Body, opts.ContentType, err = MultipartUpload(opts.Body, params, opts.MultipartContentName, opts.MultipartFileName)
if err != nil {
return nil, err
}
}
resp, err = api.Call(opts)
if err != nil {
return resp, err
}
if response == nil || opts.NoResponse {
return resp, nil
}
err = decode(resp, response)
return resp, err
}

27
vendor/github.com/ncw/rclone/lib/rest/url.go generated vendored Executable file
View File

@@ -0,0 +1,27 @@
package rest
import (
"net/url"
"github.com/pkg/errors"
)
// URLJoin joins a URL and a path returning a new URL
//
// path should be URL escaped
func URLJoin(base *url.URL, path string) (*url.URL, error) {
rel, err := url.Parse(path)
if err != nil {
return nil, errors.Wrapf(err, "Error parsing %q as URL", path)
}
return base.ResolveReference(rel), nil
}
// URLPathEscape escapes URL path the in string using URL escaping rules
//
// This mimics url.PathEscape which only available from go 1.8
func URLPathEscape(in string) string {
var u url.URL
u.Path = in
return u.String()
}

20
vendor/github.com/ncw/swift/COPYING generated vendored Executable file
View File

@@ -0,0 +1,20 @@
Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

158
vendor/github.com/ncw/swift/README.md generated vendored Executable file
View File

@@ -0,0 +1,158 @@
Swift
=====
This package provides an easy to use library for interfacing with
Swift / Openstack Object Storage / Rackspace cloud files from the Go
Language
See here for package docs
http://godoc.org/github.com/ncw/swift
[![Build Status](https://api.travis-ci.org/ncw/swift.svg?branch=master)](https://travis-ci.org/ncw/swift) [![GoDoc](https://godoc.org/github.com/ncw/swift?status.svg)](https://godoc.org/github.com/ncw/swift)
Install
-------
Use go to install the library
go get github.com/ncw/swift
Usage
-----
See here for full package docs
- http://godoc.org/github.com/ncw/swift
Here is a short example from the docs
```go
import "github.com/ncw/swift"
// Create a connection
c := swift.Connection{
UserName: "user",
ApiKey: "key",
AuthUrl: "auth_url",
Domain: "domain", // Name of the domain (v3 auth only)
Tenant: "tenant", // Name of the tenant (v2 auth only)
}
// Authenticate
err := c.Authenticate()
if err != nil {
panic(err)
}
// List all the containers
containers, err := c.ContainerNames(nil)
fmt.Println(containers)
// etc...
```
Additions
---------
The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface.
Testing
-------
To run the tests you can either use an embedded fake Swift server
either use a real Openstack Swift server or a Rackspace Cloud files account.
When using a real Swift server, you need to set these environment variables
before running the tests
export SWIFT_API_USER='user'
export SWIFT_API_KEY='key'
export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0'
And optionally these if using v2 authentication
export SWIFT_TENANT='TenantName'
export SWIFT_TENANT_ID='TenantId'
And optionally these if using v3 authentication
export SWIFT_TENANT='TenantName'
export SWIFT_TENANT_ID='TenantId'
export SWIFT_API_DOMAIN_ID='domain id'
export SWIFT_API_DOMAIN='domain name'
And optionally these if using v3 trust
export SWIFT_TRUST_ID='TrustId'
And optionally this if you want to skip server certificate validation
export SWIFT_AUTH_INSECURE=1
And optionally this to configure the connect channel timeout, in seconds
export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60
And optionally this to configure the data channel timeout, in seconds
export SWIFT_DATA_CHANNEL_TIMEOUT=60
Then run the tests with `go test`
License
-------
This is free software under the terms of MIT license (check COPYING file
included in this package).
Contact and support
-------------------
The project website is at:
- https://github.com/ncw/swift
There you can file bug reports, ask for help or contribute patches.
Authors
-------
- Nick Craig-Wood <nick@craig-wood.com>
Contributors
------------
- Brian "bojo" Jones <mojobojo@gmail.com>
- Janika Liiv <janika@toggl.com>
- Yamamoto, Hirotaka <ymmt2005@gmail.com>
- Stephen <yo@groks.org>
- platformpurple <stephen@platformpurple.com>
- Paul Querna <pquerna@apache.org>
- Livio Soares <liviobs@gmail.com>
- thesyncim <thesyncim@gmail.com>
- lsowen <lsowen@s1network.com> <logan@s1network.com>
- Sylvain Baubeau <sbaubeau@redhat.com>
- Chris Kastorff <encryptio@gmail.com>
- Dai HaoJun <haojun.dai@hp.com>
- Hua Wang <wanghua.humble@gmail.com>
- Fabian Ruff <fabian@progra.de> <fabian.ruff@sap.com>
- Arturo Reuschenbach Puncernau <reuschenbach@gmail.com>
- Petr Kotek <petr.kotek@bigcommerce.com>
- Stefan Majewsky <stefan.majewsky@sap.com> <majewsky@gmx.net>
- Cezar Sa Espinola <cezarsa@gmail.com>
- Sam Gunaratne <samgzeit@gmail.com>
- Richard Scothern <richard.scothern@gmail.com>
- Michel Couillard <!--<couillard.michel@voxlog.ca>--> <michel.couillard@gmail.com>
- Christopher Waldon <ckwaldon@us.ibm.com>
- dennis <dai.haojun@gmail.com>
- hag <hannes.georg@xing.com>
- Alexander Neumann <alexander@bumpern.de>
- eclipseo <30413512+eclipseo@users.noreply.github.com>
- Yuri Per <yuri@acronis.com>
- Falk Reimann <falk.reimann@sap.com>
- Arthur Paim Arnold <arthurpaimarnold@gmail.com>
- Bruno Michel <bmichel@menfin.info>
- Charles Hsu <charles0126@gmail.com>
- Omar Ali <omarali@users.noreply.github.com>
- Andreas Andersen <andreas@softwaredesign.se>
- kayrus <kay.diam@gmail.com>
- CodeLingo Bot <bot@codelingo.io>
- Jérémy Clerc <jeremy.clerc@tagpay.fr>
- 4xicom <37339705+4xicom@users.noreply.github.com>

335
vendor/github.com/ncw/swift/auth.go generated vendored Executable file
View File

@@ -0,0 +1,335 @@
package swift
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
"strings"
"time"
)
// Auth defines the operations needed to authenticate with swift
//
// This encapsulates the different authentication schemes in use
type Authenticator interface {
// Request creates an http.Request for the auth - return nil if not needed
Request(*Connection) (*http.Request, error)
// Response parses the http.Response
Response(resp *http.Response) error
// The public storage URL - set Internal to true to read
// internal/service net URL
StorageUrl(Internal bool) string
// The access token
Token() string
// The CDN url if available
CdnUrl() string
}
// Expireser is an optional interface to read the expiration time of the token
type Expireser interface {
Expires() time.Time
}
type CustomEndpointAuthenticator interface {
StorageUrlForEndpoint(endpointType EndpointType) string
}
type EndpointType string
const (
// Use public URL as storage URL
EndpointTypePublic = EndpointType("public")
// Use internal URL as storage URL
EndpointTypeInternal = EndpointType("internal")
// Use admin URL as storage URL
EndpointTypeAdmin = EndpointType("admin")
)
// newAuth - create a new Authenticator from the AuthUrl
//
// A hint for AuthVersion can be provided
func newAuth(c *Connection) (Authenticator, error) {
AuthVersion := c.AuthVersion
if AuthVersion == 0 {
if strings.Contains(c.AuthUrl, "v3") {
AuthVersion = 3
} else if strings.Contains(c.AuthUrl, "v2") {
AuthVersion = 2
} else if strings.Contains(c.AuthUrl, "v1") {
AuthVersion = 1
} else {
return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly")
}
}
switch AuthVersion {
case 1:
return &v1Auth{}, nil
case 2:
return &v2Auth{
// Guess as to whether using API key or
// password it will try both eventually so
// this is just an optimization.
useApiKey: len(c.ApiKey) >= 32,
}, nil
case 3:
return &v3Auth{}, nil
}
return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion)
}
// ------------------------------------------------------------
// v1 auth
type v1Auth struct {
Headers http.Header // V1 auth: the authentication headers so extensions can access them
}
// v1 Authentication - make request
func (auth *v1Auth) Request(c *Connection) (*http.Request, error) {
req, err := http.NewRequest("GET", c.AuthUrl, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.UserAgent)
req.Header.Set("X-Auth-Key", c.ApiKey)
req.Header.Set("X-Auth-User", c.UserName)
return req, nil
}
// v1 Authentication - read response
func (auth *v1Auth) Response(resp *http.Response) error {
auth.Headers = resp.Header
return nil
}
// v1 Authentication - read storage url
func (auth *v1Auth) StorageUrl(Internal bool) string {
storageUrl := auth.Headers.Get("X-Storage-Url")
if Internal {
newUrl, err := url.Parse(storageUrl)
if err != nil {
return storageUrl
}
newUrl.Host = "snet-" + newUrl.Host
storageUrl = newUrl.String()
}
return storageUrl
}
// v1 Authentication - read auth token
func (auth *v1Auth) Token() string {
return auth.Headers.Get("X-Auth-Token")
}
// v1 Authentication - read cdn url
func (auth *v1Auth) CdnUrl() string {
return auth.Headers.Get("X-CDN-Management-Url")
}
// ------------------------------------------------------------
// v2 Authentication
type v2Auth struct {
Auth *v2AuthResponse
Region string
useApiKey bool // if set will use API key not Password
useApiKeyOk bool // if set won't change useApiKey any more
notFirst bool // set after first run
}
// v2 Authentication - make request
func (auth *v2Auth) Request(c *Connection) (*http.Request, error) {
auth.Region = c.Region
// Toggle useApiKey if not first run and not OK yet
if auth.notFirst && !auth.useApiKeyOk {
auth.useApiKey = !auth.useApiKey
}
auth.notFirst = true
// Create a V2 auth request for the body of the connection
var v2i interface{}
if !auth.useApiKey {
// Normal swift authentication
v2 := v2AuthRequest{}
v2.Auth.PasswordCredentials.UserName = c.UserName
v2.Auth.PasswordCredentials.Password = c.ApiKey
v2.Auth.Tenant = c.Tenant
v2.Auth.TenantId = c.TenantId
v2i = v2
} else {
// Rackspace special with API Key
v2 := v2AuthRequestRackspace{}
v2.Auth.ApiKeyCredentials.UserName = c.UserName
v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey
v2.Auth.Tenant = c.Tenant
v2.Auth.TenantId = c.TenantId
v2i = v2
}
body, err := json.Marshal(v2i)
if err != nil {
return nil, err
}
url := c.AuthUrl
if !strings.HasSuffix(url, "/") {
url += "/"
}
url += "tokens"
req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", c.UserAgent)
return req, nil
}
// v2 Authentication - read response
func (auth *v2Auth) Response(resp *http.Response) error {
auth.Auth = new(v2AuthResponse)
err := readJson(resp, auth.Auth)
// If successfully read Auth then no need to toggle useApiKey any more
if err == nil {
auth.useApiKeyOk = true
}
return err
}
// Finds the Endpoint Url of "type" from the v2AuthResponse using the
// Region if set or defaulting to the first one if not
//
// Returns "" if not found
func (auth *v2Auth) endpointUrl(Type string, endpointType EndpointType) string {
for _, catalog := range auth.Auth.Access.ServiceCatalog {
if catalog.Type == Type {
for _, endpoint := range catalog.Endpoints {
if auth.Region == "" || (auth.Region == endpoint.Region) {
switch endpointType {
case EndpointTypeInternal:
return endpoint.InternalUrl
case EndpointTypePublic:
return endpoint.PublicUrl
case EndpointTypeAdmin:
return endpoint.AdminUrl
default:
return ""
}
}
}
}
}
return ""
}
// v2 Authentication - read storage url
//
// If Internal is true then it reads the private (internal / service
// net) URL.
func (auth *v2Auth) StorageUrl(Internal bool) string {
endpointType := EndpointTypePublic
if Internal {
endpointType = EndpointTypeInternal
}
return auth.StorageUrlForEndpoint(endpointType)
}
// v2 Authentication - read storage url
//
// Use the indicated endpointType to choose a URL.
func (auth *v2Auth) StorageUrlForEndpoint(endpointType EndpointType) string {
return auth.endpointUrl("object-store", endpointType)
}
// v2 Authentication - read auth token
func (auth *v2Auth) Token() string {
return auth.Auth.Access.Token.Id
}
// v2 Authentication - read expires
func (auth *v2Auth) Expires() time.Time {
t, err := time.Parse(time.RFC3339, auth.Auth.Access.Token.Expires)
if err != nil {
return time.Time{} // return Zero if not parsed
}
return t
}
// v2 Authentication - read cdn url
func (auth *v2Auth) CdnUrl() string {
return auth.endpointUrl("rax:object-cdn", EndpointTypePublic)
}
// ------------------------------------------------------------
// V2 Authentication request
//
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
type v2AuthRequest struct {
Auth struct {
PasswordCredentials struct {
UserName string `json:"username"`
Password string `json:"password"`
} `json:"passwordCredentials"`
Tenant string `json:"tenantName,omitempty"`
TenantId string `json:"tenantId,omitempty"`
} `json:"auth"`
}
// V2 Authentication request - Rackspace variant
//
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
type v2AuthRequestRackspace struct {
Auth struct {
ApiKeyCredentials struct {
UserName string `json:"username"`
ApiKey string `json:"apiKey"`
} `json:"RAX-KSKEY:apiKeyCredentials"`
Tenant string `json:"tenantName,omitempty"`
TenantId string `json:"tenantId,omitempty"`
} `json:"auth"`
}
// V2 Authentication reply
//
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html
// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html
type v2AuthResponse struct {
Access struct {
ServiceCatalog []struct {
Endpoints []struct {
InternalUrl string
PublicUrl string
AdminUrl string
Region string
TenantId string
}
Name string
Type string
}
Token struct {
Expires string
Id string
Tenant struct {
Id string
Name string
}
}
User struct {
DefaultRegion string `json:"RAX-AUTH:defaultRegion"`
Id string
Name string
Roles []struct {
Description string
Id string
Name string
TenantId string
}
}
}
}

300
vendor/github.com/ncw/swift/auth_v3.go generated vendored Executable file
View File

@@ -0,0 +1,300 @@
package swift
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
)
const (
v3AuthMethodToken = "token"
v3AuthMethodPassword = "password"
v3AuthMethodApplicationCredential = "application_credential"
v3CatalogTypeObjectStore = "object-store"
)
// V3 Authentication request
// http://docs.openstack.org/developer/keystone/api_curl_examples.html
// http://developer.openstack.org/api-ref-identity-v3.html
type v3AuthRequest struct {
Auth struct {
Identity struct {
Methods []string `json:"methods"`
Password *v3AuthPassword `json:"password,omitempty"`
Token *v3AuthToken `json:"token,omitempty"`
ApplicationCredential *v3AuthApplicationCredential `json:"application_credential,omitempty"`
} `json:"identity"`
Scope *v3Scope `json:"scope,omitempty"`
} `json:"auth"`
}
type v3Scope struct {
Project *v3Project `json:"project,omitempty"`
Domain *v3Domain `json:"domain,omitempty"`
Trust *v3Trust `json:"OS-TRUST:trust,omitempty"`
}
type v3Domain struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
}
type v3Project struct {
Name string `json:"name,omitempty"`
Id string `json:"id,omitempty"`
Domain *v3Domain `json:"domain,omitempty"`
}
type v3Trust struct {
Id string `json:"id"`
}
type v3User struct {
Domain *v3Domain `json:"domain,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Password string `json:"password,omitempty"`
}
type v3AuthToken struct {
Id string `json:"id"`
}
type v3AuthPassword struct {
User v3User `json:"user"`
}
type v3AuthApplicationCredential struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Secret string `json:"secret,omitempty"`
User *v3User `json:"user,omitempty"`
}
// V3 Authentication response
type v3AuthResponse struct {
Token struct {
ExpiresAt string `json:"expires_at"`
IssuedAt string `json:"issued_at"`
Methods []string
Roles []struct {
Id, Name string
Links struct {
Self string
}
}
Project struct {
Domain struct {
Id, Name string
}
Id, Name string
}
Catalog []struct {
Id, Namem, Type string
Endpoints []struct {
Id, Region_Id, Url, Region string
Interface EndpointType
}
}
User struct {
Id, Name string
Domain struct {
Id, Name string
Links struct {
Self string
}
}
}
Audit_Ids []string
}
}
type v3Auth struct {
Region string
Auth *v3AuthResponse
Headers http.Header
}
func (auth *v3Auth) Request(c *Connection) (*http.Request, error) {
auth.Region = c.Region
var v3i interface{}
v3 := v3AuthRequest{}
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret != "" {
var user *v3User
if c.ApplicationCredentialId != "" {
c.ApplicationCredentialName = ""
user = &v3User{}
}
if user == nil && c.UserId != "" {
// UserID could be used without the domain information
user = &v3User{
Id: c.UserId,
}
}
if user == nil && c.UserName == "" {
// Make sure that Username or UserID are provided
return nil, fmt.Errorf("UserID or Name should be provided")
}
if user == nil && c.DomainId != "" {
user = &v3User{
Name: c.UserName,
Domain: &v3Domain{
Id: c.DomainId,
},
}
}
if user == nil && c.Domain != "" {
user = &v3User{
Name: c.UserName,
Domain: &v3Domain{
Name: c.Domain,
},
}
}
// Make sure that DomainID or DomainName are provided among Username
if user == nil {
return nil, fmt.Errorf("DomainID or Domain should be provided")
}
v3.Auth.Identity.Methods = []string{v3AuthMethodApplicationCredential}
v3.Auth.Identity.ApplicationCredential = &v3AuthApplicationCredential{
Id: c.ApplicationCredentialId,
Name: c.ApplicationCredentialName,
Secret: c.ApplicationCredentialSecret,
User: user,
}
} else if c.UserName == "" && c.UserId == "" {
v3.Auth.Identity.Methods = []string{v3AuthMethodToken}
v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey}
} else {
v3.Auth.Identity.Methods = []string{v3AuthMethodPassword}
v3.Auth.Identity.Password = &v3AuthPassword{
User: v3User{
Name: c.UserName,
Id: c.UserId,
Password: c.ApiKey,
},
}
var domain *v3Domain
if c.Domain != "" {
domain = &v3Domain{Name: c.Domain}
} else if c.DomainId != "" {
domain = &v3Domain{Id: c.DomainId}
}
v3.Auth.Identity.Password.User.Domain = domain
}
if v3.Auth.Identity.Methods[0] != v3AuthMethodApplicationCredential {
if c.TrustId != "" {
v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}}
} else if c.TenantId != "" || c.Tenant != "" {
v3.Auth.Scope = &v3Scope{Project: &v3Project{}}
if c.TenantId != "" {
v3.Auth.Scope.Project.Id = c.TenantId
} else if c.Tenant != "" {
v3.Auth.Scope.Project.Name = c.Tenant
switch {
case c.TenantDomain != "":
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.TenantDomain}
case c.TenantDomainId != "":
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.TenantDomainId}
case c.Domain != "":
v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.Domain}
case c.DomainId != "":
v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.DomainId}
default:
v3.Auth.Scope.Project.Domain = &v3Domain{Name: "Default"}
}
}
}
}
v3i = v3
body, err := json.Marshal(v3i)
if err != nil {
return nil, err
}
url := c.AuthUrl
if !strings.HasSuffix(url, "/") {
url += "/"
}
url += "auth/tokens"
req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", c.UserAgent)
return req, nil
}
func (auth *v3Auth) Response(resp *http.Response) error {
auth.Auth = &v3AuthResponse{}
auth.Headers = resp.Header
err := readJson(resp, auth.Auth)
return err
}
func (auth *v3Auth) endpointUrl(Type string, endpointType EndpointType) string {
for _, catalog := range auth.Auth.Token.Catalog {
if catalog.Type == Type {
for _, endpoint := range catalog.Endpoints {
if endpoint.Interface == endpointType && (auth.Region == "" || (auth.Region == endpoint.Region)) {
return endpoint.Url
}
}
}
}
return ""
}
func (auth *v3Auth) StorageUrl(Internal bool) string {
endpointType := EndpointTypePublic
if Internal {
endpointType = EndpointTypeInternal
}
return auth.StorageUrlForEndpoint(endpointType)
}
func (auth *v3Auth) StorageUrlForEndpoint(endpointType EndpointType) string {
return auth.endpointUrl("object-store", endpointType)
}
func (auth *v3Auth) Token() string {
return auth.Headers.Get("X-Subject-Token")
}
func (auth *v3Auth) Expires() time.Time {
t, err := time.Parse(time.RFC3339, auth.Auth.Token.ExpiresAt)
if err != nil {
return time.Time{} // return Zero if not parsed
}
return t
}
func (auth *v3Auth) CdnUrl() string {
return ""
}

28
vendor/github.com/ncw/swift/compatibility_1_0.go generated vendored Executable file
View File

@@ -0,0 +1,28 @@
// Go 1.0 compatibility functions
// +build !go1.1
package swift
import (
"log"
"net/http"
"time"
)
// Cancel the request - doesn't work under < go 1.1
func cancelRequest(transport http.RoundTripper, req *http.Request) {
log.Printf("Tried to cancel a request but couldn't - recompile with go 1.1")
}
// Reset a timer - Doesn't work properly < go 1.1
//
// This is quite hard to do properly under go < 1.1 so we do a crude
// approximation and hope that everyone upgrades to go 1.1 quickly
func resetTimer(t *time.Timer, d time.Duration) {
t.Stop()
// Very likely this doesn't actually work if we are already
// selecting on t.C. However we've stopped the original timer
// so won't break transfers but may not time them out :-(
*t = *time.NewTimer(d)
}

24
vendor/github.com/ncw/swift/compatibility_1_1.go generated vendored Executable file
View File

@@ -0,0 +1,24 @@
// Go 1.1 and later compatibility functions
//
// +build go1.1
package swift
import (
"net/http"
"time"
)
// Cancel the request
func cancelRequest(transport http.RoundTripper, req *http.Request) {
if tr, ok := transport.(interface {
CancelRequest(*http.Request)
}); ok {
tr.CancelRequest(req)
}
}
// Reset a timer
func resetTimer(t *time.Timer, d time.Duration) {
t.Reset(d)
}

23
vendor/github.com/ncw/swift/compatibility_1_6.go generated vendored Executable file
View File

@@ -0,0 +1,23 @@
// +build go1.6
package swift
import (
"net/http"
"time"
)
const IS_AT_LEAST_GO_16 = true
func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {
tr.ExpectContinueTimeout = t
}
func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {
if req.Body != nil {
req.Header.Add("Expect", "100-continue")
}
if !hasContentLength {
req.TransferEncoding = []string{"chunked"}
}
}

13
vendor/github.com/ncw/swift/compatibility_not_1_6.go generated vendored Executable file
View File

@@ -0,0 +1,13 @@
// +build !go1.6
package swift
import (
"net/http"
"time"
)
const IS_AT_LEAST_GO_16 = false
func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {}
func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {}

149
vendor/github.com/ncw/swift/dlo.go generated vendored Executable file
View File

@@ -0,0 +1,149 @@
package swift
import (
"os"
"strings"
)
// DynamicLargeObjectCreateFile represents an open static large object
type DynamicLargeObjectCreateFile struct {
largeObjectCreateFile
}
// DynamicLargeObjectCreateFile creates a dynamic large object
// returning an object which satisfies io.Writer, io.Seeker, io.Closer
// and io.ReaderFrom. The flags are as passes to the
// largeObjectCreate method.
func (c *Connection) DynamicLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
lo, err := c.largeObjectCreate(opts)
if err != nil {
return nil, err
}
return withBuffer(opts, &DynamicLargeObjectCreateFile{
largeObjectCreateFile: *lo,
}), nil
}
// DynamicLargeObjectCreate creates or truncates an existing dynamic
// large object returning a writeable object. This sets opts.Flags to
// an appropriate value before calling DynamicLargeObjectCreateFile
func (c *Connection) DynamicLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
opts.Flags = os.O_TRUNC | os.O_CREATE
return c.DynamicLargeObjectCreateFile(opts)
}
// DynamicLargeObjectDelete deletes a dynamic large object and all of its segments.
func (c *Connection) DynamicLargeObjectDelete(container string, path string) error {
return c.LargeObjectDelete(container, path)
}
// DynamicLargeObjectMove moves a dynamic large object from srcContainer, srcObjectName to dstContainer, dstObjectName
func (c *Connection) DynamicLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
info, headers, err := c.Object(srcContainer, srcObjectName)
if err != nil {
return err
}
segmentContainer, segmentPath := parseFullPath(headers["X-Object-Manifest"])
if err := c.createDLOManifest(dstContainer, dstObjectName, segmentContainer+"/"+segmentPath, info.ContentType, sanitizeLargeObjectMoveHeaders(headers)); err != nil {
return err
}
if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
return err
}
return nil
}
func sanitizeLargeObjectMoveHeaders(headers Headers) Headers {
sanitizedHeaders := make(map[string]string, len(headers))
for k, v := range headers {
if strings.HasPrefix(k, "X-") { //Some of the fields does not effect the request e,g, X-Timestamp, X-Trans-Id, X-Openstack-Request-Id. Open stack will generate new ones anyway.
sanitizedHeaders[k] = v
}
}
return sanitizedHeaders
}
// createDLOManifest creates a dynamic large object manifest
func (c *Connection) createDLOManifest(container string, objectName string, prefix string, contentType string, headers Headers) error {
if headers == nil {
headers = make(Headers)
}
headers["X-Object-Manifest"] = prefix
manifest, err := c.ObjectCreate(container, objectName, false, "", contentType, headers)
if err != nil {
return err
}
if err := manifest.Close(); err != nil {
return err
}
return nil
}
// Close satisfies the io.Closer interface
func (file *DynamicLargeObjectCreateFile) Close() error {
return file.Flush()
}
func (file *DynamicLargeObjectCreateFile) Flush() error {
err := file.conn.createDLOManifest(file.container, file.objectName, file.segmentContainer+"/"+file.prefix, file.contentType, file.headers)
if err != nil {
return err
}
return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size())
}
func (c *Connection) getAllDLOSegments(segmentContainer, segmentPath string) ([]Object, error) {
//a simple container listing works 99.9% of the time
segments, err := c.ObjectsAll(segmentContainer, &ObjectsOpts{Prefix: segmentPath})
if err != nil {
return nil, err
}
hasObjectName := make(map[string]struct{})
for _, segment := range segments {
hasObjectName[segment.Name] = struct{}{}
}
//The container listing might be outdated (i.e. not contain all existing
//segment objects yet) because of temporary inconsistency (Swift is only
//eventually consistent!). Check its completeness.
segmentNumber := 0
for {
segmentNumber++
segmentName := getSegment(segmentPath, segmentNumber)
if _, seen := hasObjectName[segmentName]; seen {
continue
}
//This segment is missing in the container listing. Use a more reliable
//request to check its existence. (HEAD requests on segments are
//guaranteed to return the correct metadata, except for the pathological
//case of an outage of large parts of the Swift cluster or its network,
//since every segment is only written once.)
segment, _, err := c.Object(segmentContainer, segmentName)
switch err {
case nil:
//found new segment -> add it in the correct position and keep
//going, more might be missing
if segmentNumber <= len(segments) {
segments = append(segments[:segmentNumber], segments[segmentNumber-1:]...)
segments[segmentNumber-1] = segment
} else {
segments = append(segments, segment)
}
continue
case ObjectNotFound:
//This segment is missing. Since we upload segments sequentially,
//there won't be any more segments after it.
return segments, nil
default:
return nil, err //unexpected error
}
}
}

19
vendor/github.com/ncw/swift/doc.go generated vendored Executable file
View File

@@ -0,0 +1,19 @@
/*
Package swift provides an easy to use interface to Swift / Openstack Object Storage / Rackspace Cloud Files
Standard Usage
Most of the work is done through the Container*() and Object*() methods.
All methods are safe to use concurrently in multiple go routines.
Object Versioning
As defined by http://docs.openstack.org/api/openstack-object-storage/1.0/content/Object_Versioning-e1e3230.html#d6e983 one can create a container which allows for version control of files. The suggested method is to create a version container for holding all non-current files, and a current container for holding the latest version that the file points to. The container and objects inside it can be used in the standard manner, however, pushing a file multiple times will result in it being copied to the version container and the new file put in it's place. If the current file is deleted, the previous file in the version container will replace it. This means that if a file is updated 5 times, it must be deleted 5 times to be completely removed from the system.
Rackspace Sub Module
This module specifically allows the enabling/disabling of Rackspace Cloud File CDN management on a container. This is specific to the Rackspace API and not Swift/Openstack, therefore it has been placed in a submodule. One can easily create a RsConnection and use it like the standard Connection to access and manipulate containers and objects.
*/
package swift

1
vendor/github.com/ncw/swift/go.mod generated vendored Executable file
View File

@@ -0,0 +1 @@
module github.com/ncw/swift

448
vendor/github.com/ncw/swift/largeobjects.go generated vendored Executable file
View File

@@ -0,0 +1,448 @@
package swift
import (
"bufio"
"bytes"
"crypto/rand"
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
gopath "path"
"strconv"
"strings"
"time"
)
// NotLargeObject is returned if an operation is performed on an object which isn't large.
var NotLargeObject = errors.New("Not a large object")
// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded
var readAfterWriteTimeout = 15 * time.Second
// readAfterWriteWait defines the time to sleep between two retries
var readAfterWriteWait = 200 * time.Millisecond
// largeObjectCreateFile represents an open static or dynamic large object
type largeObjectCreateFile struct {
conn *Connection
container string
objectName string
currentLength int64
filePos int64
chunkSize int64
segmentContainer string
prefix string
contentType string
checkHash bool
segments []Object
headers Headers
minChunkSize int64
}
func swiftSegmentPath(path string) (string, error) {
checksum := sha1.New()
random := make([]byte, 32)
if _, err := rand.Read(random); err != nil {
return "", err
}
path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...)))
return strings.TrimLeft(strings.TrimRight("segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil
}
func getSegment(segmentPath string, partNumber int) string {
return fmt.Sprintf("%s/%016d", segmentPath, partNumber)
}
func parseFullPath(manifest string) (container string, prefix string) {
components := strings.SplitN(manifest, "/", 2)
container = components[0]
if len(components) > 1 {
prefix = components[1]
}
return container, prefix
}
func (headers Headers) IsLargeObjectDLO() bool {
_, isDLO := headers["X-Object-Manifest"]
return isDLO
}
func (headers Headers) IsLargeObjectSLO() bool {
_, isSLO := headers["X-Static-Large-Object"]
return isSLO
}
func (headers Headers) IsLargeObject() bool {
return headers.IsLargeObjectSLO() || headers.IsLargeObjectDLO()
}
func (c *Connection) getAllSegments(container string, path string, headers Headers) (string, []Object, error) {
if manifest, isDLO := headers["X-Object-Manifest"]; isDLO {
segmentContainer, segmentPath := parseFullPath(manifest)
segments, err := c.getAllDLOSegments(segmentContainer, segmentPath)
return segmentContainer, segments, err
}
if headers.IsLargeObjectSLO() {
return c.getAllSLOSegments(container, path)
}
return "", nil, NotLargeObject
}
// LargeObjectOpts describes how a large object should be created
type LargeObjectOpts struct {
Container string // Name of container to place object
ObjectName string // Name of object
Flags int // Creation flags
CheckHash bool // If set Check the hash
Hash string // If set use this hash to check
ContentType string // Content-Type of the object
Headers Headers // Additional headers to upload the object with
ChunkSize int64 // Size of chunks of the object, defaults to 10MB if not set
MinChunkSize int64 // Minimum chunk size, automatically set for SLO's based on info
SegmentContainer string // Name of the container to place segments
SegmentPrefix string // Prefix to use for the segments
NoBuffer bool // Prevents using a bufio.Writer to write segments
}
type LargeObjectFile interface {
io.Writer
io.Seeker
io.Closer
Size() int64
Flush() error
}
// largeObjectCreate creates a large object at opts.Container, opts.ObjectName.
//
// opts.Flags can have the following bits set
// os.TRUNC - remove the contents of the large object if it exists
// os.APPEND - write at the end of the large object
func (c *Connection) largeObjectCreate(opts *LargeObjectOpts) (*largeObjectCreateFile, error) {
var (
segmentPath string
segmentContainer string
segments []Object
currentLength int64
err error
)
if opts.SegmentPrefix != "" {
segmentPath = opts.SegmentPrefix
} else if segmentPath, err = swiftSegmentPath(opts.ObjectName); err != nil {
return nil, err
}
if info, headers, err := c.Object(opts.Container, opts.ObjectName); err == nil {
if opts.Flags&os.O_TRUNC != 0 {
c.LargeObjectDelete(opts.Container, opts.ObjectName)
} else {
currentLength = info.Bytes
if headers.IsLargeObject() {
segmentContainer, segments, err = c.getAllSegments(opts.Container, opts.ObjectName, headers)
if err != nil {
return nil, err
}
if len(segments) > 0 {
segmentPath = gopath.Dir(segments[0].Name)
}
} else {
if err = c.ObjectMove(opts.Container, opts.ObjectName, opts.Container, getSegment(segmentPath, 1)); err != nil {
return nil, err
}
segments = append(segments, info)
}
}
} else if err != ObjectNotFound {
return nil, err
}
// segmentContainer is not empty when the manifest already existed
if segmentContainer == "" {
if opts.SegmentContainer != "" {
segmentContainer = opts.SegmentContainer
} else {
segmentContainer = opts.Container + "_segments"
}
}
file := &largeObjectCreateFile{
conn: c,
checkHash: opts.CheckHash,
container: opts.Container,
objectName: opts.ObjectName,
chunkSize: opts.ChunkSize,
minChunkSize: opts.MinChunkSize,
headers: opts.Headers,
segmentContainer: segmentContainer,
prefix: segmentPath,
segments: segments,
currentLength: currentLength,
}
if file.chunkSize == 0 {
file.chunkSize = 10 * 1024 * 1024
}
if file.minChunkSize > file.chunkSize {
file.chunkSize = file.minChunkSize
}
if opts.Flags&os.O_APPEND != 0 {
file.filePos = currentLength
}
return file, nil
}
// LargeObjectDelete deletes the large object named by container, path
func (c *Connection) LargeObjectDelete(container string, objectName string) error {
_, headers, err := c.Object(container, objectName)
if err != nil {
return err
}
var objects [][]string
if headers.IsLargeObject() {
segmentContainer, segments, err := c.getAllSegments(container, objectName, headers)
if err != nil {
return err
}
for _, obj := range segments {
objects = append(objects, []string{segmentContainer, obj.Name})
}
}
objects = append(objects, []string{container, objectName})
info, err := c.cachedQueryInfo()
if err == nil && info.SupportsBulkDelete() && len(objects) > 0 {
filenames := make([]string, len(objects))
for i, obj := range objects {
filenames[i] = obj[0] + "/" + obj[1]
}
_, err = c.doBulkDelete(filenames)
// Don't fail on ObjectNotFound because eventual consistency
// makes this situation normal.
if err != nil && err != Forbidden && err != ObjectNotFound {
return err
}
} else {
for _, obj := range objects {
if err := c.ObjectDelete(obj[0], obj[1]); err != nil {
return err
}
}
}
return nil
}
// LargeObjectGetSegments returns all the segments that compose an object
// If the object is a Dynamic Large Object (DLO), it just returns the objects
// that have the prefix as indicated by the manifest.
// If the object is a Static Large Object (SLO), it retrieves the JSON content
// of the manifest and return all the segments of it.
func (c *Connection) LargeObjectGetSegments(container string, path string) (string, []Object, error) {
_, headers, err := c.Object(container, path)
if err != nil {
return "", nil, err
}
return c.getAllSegments(container, path, headers)
}
// Seek sets the offset for the next write operation
func (file *largeObjectCreateFile) Seek(offset int64, whence int) (int64, error) {
switch whence {
case 0:
file.filePos = offset
case 1:
file.filePos += offset
case 2:
file.filePos = file.currentLength + offset
default:
return -1, fmt.Errorf("invalid value for whence")
}
if file.filePos < 0 {
return -1, fmt.Errorf("negative offset")
}
return file.filePos, nil
}
func (file *largeObjectCreateFile) Size() int64 {
return file.currentLength
}
func withLORetry(expectedSize int64, fn func() (Headers, int64, error)) (err error) {
endTimer := time.NewTimer(readAfterWriteTimeout)
defer endTimer.Stop()
waitingTime := readAfterWriteWait
for {
var headers Headers
var sz int64
if headers, sz, err = fn(); err == nil {
if !headers.IsLargeObjectDLO() || (expectedSize == 0 && sz > 0) || expectedSize == sz {
return
}
} else {
return
}
waitTimer := time.NewTimer(waitingTime)
select {
case <-endTimer.C:
waitTimer.Stop()
err = fmt.Errorf("Timeout expired while waiting for object to have size == %d, got: %d", expectedSize, sz)
return
case <-waitTimer.C:
waitingTime *= 2
}
}
}
func (c *Connection) waitForSegmentsToShowUp(container, objectName string, expectedSize int64) (err error) {
err = withLORetry(expectedSize, func() (Headers, int64, error) {
var info Object
var headers Headers
info, headers, err = c.objectBase(container, objectName)
if err != nil {
return headers, 0, err
}
return headers, info.Bytes, nil
})
return
}
// Write satisfies the io.Writer interface
func (file *largeObjectCreateFile) Write(buf []byte) (int, error) {
var sz int64
var relativeFilePos int
writeSegmentIdx := 0
for i, obj := range file.segments {
if file.filePos < sz+obj.Bytes || (i == len(file.segments)-1 && file.filePos < sz+file.minChunkSize) {
relativeFilePos = int(file.filePos - sz)
break
}
writeSegmentIdx++
sz += obj.Bytes
}
sizeToWrite := len(buf)
for offset := 0; offset < sizeToWrite; {
newSegment, n, err := file.writeSegment(buf[offset:], writeSegmentIdx, relativeFilePos)
if err != nil {
return 0, err
}
if writeSegmentIdx < len(file.segments) {
file.segments[writeSegmentIdx] = *newSegment
} else {
file.segments = append(file.segments, *newSegment)
}
offset += n
writeSegmentIdx++
relativeFilePos = 0
}
file.filePos += int64(sizeToWrite)
file.currentLength = 0
for _, obj := range file.segments {
file.currentLength += obj.Bytes
}
return sizeToWrite, nil
}
func (file *largeObjectCreateFile) writeSegment(buf []byte, writeSegmentIdx int, relativeFilePos int) (*Object, int, error) {
var (
readers []io.Reader
existingSegment *Object
segmentSize int
)
segmentName := getSegment(file.prefix, writeSegmentIdx+1)
sizeToRead := int(file.chunkSize)
if writeSegmentIdx < len(file.segments) {
existingSegment = &file.segments[writeSegmentIdx]
if writeSegmentIdx != len(file.segments)-1 {
sizeToRead = int(existingSegment.Bytes)
}
if relativeFilePos > 0 {
headers := make(Headers)
headers["Range"] = "bytes=0-" + strconv.FormatInt(int64(relativeFilePos-1), 10)
existingSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers)
if err != nil {
return nil, 0, err
}
defer existingSegmentReader.Close()
sizeToRead -= relativeFilePos
segmentSize += relativeFilePos
readers = []io.Reader{existingSegmentReader}
}
}
if sizeToRead > len(buf) {
sizeToRead = len(buf)
}
segmentSize += sizeToRead
readers = append(readers, bytes.NewReader(buf[:sizeToRead]))
if existingSegment != nil && segmentSize < int(existingSegment.Bytes) {
headers := make(Headers)
headers["Range"] = "bytes=" + strconv.FormatInt(int64(segmentSize), 10) + "-"
tailSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers)
if err != nil {
return nil, 0, err
}
defer tailSegmentReader.Close()
segmentSize = int(existingSegment.Bytes)
readers = append(readers, tailSegmentReader)
}
segmentReader := io.MultiReader(readers...)
headers, err := file.conn.ObjectPut(file.segmentContainer, segmentName, segmentReader, true, "", file.contentType, nil)
if err != nil {
return nil, 0, err
}
return &Object{Name: segmentName, Bytes: int64(segmentSize), Hash: headers["Etag"]}, sizeToRead, nil
}
func withBuffer(opts *LargeObjectOpts, lo LargeObjectFile) LargeObjectFile {
if !opts.NoBuffer {
return &bufferedLargeObjectFile{
LargeObjectFile: lo,
bw: bufio.NewWriterSize(lo, int(opts.ChunkSize)),
}
}
return lo
}
type bufferedLargeObjectFile struct {
LargeObjectFile
bw *bufio.Writer
}
func (blo *bufferedLargeObjectFile) Close() error {
err := blo.bw.Flush()
if err != nil {
return err
}
return blo.LargeObjectFile.Close()
}
func (blo *bufferedLargeObjectFile) Write(p []byte) (n int, err error) {
return blo.bw.Write(p)
}
func (blo *bufferedLargeObjectFile) Seek(offset int64, whence int) (int64, error) {
err := blo.bw.Flush()
if err != nil {
return 0, err
}
return blo.LargeObjectFile.Seek(offset, whence)
}
func (blo *bufferedLargeObjectFile) Size() int64 {
return blo.LargeObjectFile.Size() + int64(blo.bw.Buffered())
}
func (blo *bufferedLargeObjectFile) Flush() error {
err := blo.bw.Flush()
if err != nil {
return err
}
return blo.LargeObjectFile.Flush()
}

174
vendor/github.com/ncw/swift/meta.go generated vendored Executable file
View File

@@ -0,0 +1,174 @@
// Metadata manipulation in and out of Headers
package swift
import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
)
// Metadata stores account, container or object metadata.
type Metadata map[string]string
// Metadata gets the Metadata starting with the metaPrefix out of the Headers.
//
// The keys in the Metadata will be converted to lower case
func (h Headers) Metadata(metaPrefix string) Metadata {
m := Metadata{}
metaPrefix = http.CanonicalHeaderKey(metaPrefix)
for key, value := range h {
if strings.HasPrefix(key, metaPrefix) {
metaKey := strings.ToLower(key[len(metaPrefix):])
m[metaKey] = value
}
}
return m
}
// AccountMetadata converts Headers from account to a Metadata.
//
// The keys in the Metadata will be converted to lower case.
func (h Headers) AccountMetadata() Metadata {
return h.Metadata("X-Account-Meta-")
}
// ContainerMetadata converts Headers from container to a Metadata.
//
// The keys in the Metadata will be converted to lower case.
func (h Headers) ContainerMetadata() Metadata {
return h.Metadata("X-Container-Meta-")
}
// ObjectMetadata converts Headers from object to a Metadata.
//
// The keys in the Metadata will be converted to lower case.
func (h Headers) ObjectMetadata() Metadata {
return h.Metadata("X-Object-Meta-")
}
// Headers convert the Metadata starting with the metaPrefix into a
// Headers.
//
// The keys in the Metadata will be converted from lower case to http
// Canonical (see http.CanonicalHeaderKey).
func (m Metadata) Headers(metaPrefix string) Headers {
h := Headers{}
for key, value := range m {
key = http.CanonicalHeaderKey(metaPrefix + key)
h[key] = value
}
return h
}
// AccountHeaders converts the Metadata for the account.
func (m Metadata) AccountHeaders() Headers {
return m.Headers("X-Account-Meta-")
}
// ContainerHeaders converts the Metadata for the container.
func (m Metadata) ContainerHeaders() Headers {
return m.Headers("X-Container-Meta-")
}
// ObjectHeaders converts the Metadata for the object.
func (m Metadata) ObjectHeaders() Headers {
return m.Headers("X-Object-Meta-")
}
// Turns a number of ns into a floating point string in seconds
//
// Trims trailing zeros and guaranteed to be perfectly accurate
func nsToFloatString(ns int64) string {
if ns < 0 {
return "-" + nsToFloatString(-ns)
}
result := fmt.Sprintf("%010d", ns)
split := len(result) - 9
result, decimals := result[:split], result[split:]
decimals = strings.TrimRight(decimals, "0")
if decimals != "" {
result += "."
result += decimals
}
return result
}
// Turns a floating point string in seconds into a ns integer
//
// Guaranteed to be perfectly accurate
func floatStringToNs(s string) (int64, error) {
const zeros = "000000000"
if point := strings.IndexRune(s, '.'); point >= 0 {
tail := s[point+1:]
if fill := 9 - len(tail); fill < 0 {
tail = tail[:9]
} else {
tail += zeros[:fill]
}
s = s[:point] + tail
} else if len(s) > 0 { // Make sure empty string produces an error
s += zeros
}
return strconv.ParseInt(s, 10, 64)
}
// FloatStringToTime converts a floating point number string to a time.Time
//
// The string is floating point number of seconds since the epoch
// (Unix time). The number should be in fixed point format (not
// exponential), eg "1354040105.123456789" which represents the time
// "2012-11-27T18:15:05.123456789Z"
//
// Some care is taken to preserve all the accuracy in the time.Time
// (which wouldn't happen with a naive conversion through float64) so
// a round trip conversion won't change the data.
//
// If an error is returned then time will be returned as the zero time.
func FloatStringToTime(s string) (t time.Time, err error) {
ns, err := floatStringToNs(s)
if err != nil {
return
}
t = time.Unix(0, ns)
return
}
// TimeToFloatString converts a time.Time object to a floating point string
//
// The string is floating point number of seconds since the epoch
// (Unix time). The number is in fixed point format (not
// exponential), eg "1354040105.123456789" which represents the time
// "2012-11-27T18:15:05.123456789Z". Trailing zeros will be dropped
// from the output.
//
// Some care is taken to preserve all the accuracy in the time.Time
// (which wouldn't happen with a naive conversion through float64) so
// a round trip conversion won't change the data.
func TimeToFloatString(t time.Time) string {
return nsToFloatString(t.UnixNano())
}
// GetModTime reads a modification time (mtime) from a Metadata object
//
// This is a defacto standard (used in the official python-swiftclient
// amongst others) for storing the modification time (as read using
// os.Stat) for an object. It is stored using the key 'mtime', which
// for example when written to an object will be 'X-Object-Meta-Mtime'.
//
// If an error is returned then time will be returned as the zero time.
func (m Metadata) GetModTime() (t time.Time, err error) {
return FloatStringToTime(m["mtime"])
}
// SetModTime writes an modification time (mtime) to a Metadata object
//
// This is a defacto standard (used in the official python-swiftclient
// amongst others) for storing the modification time (as read using
// os.Stat) for an object. It is stored using the key 'mtime', which
// for example when written to an object will be 'X-Object-Meta-Mtime'.
func (m Metadata) SetModTime(t time.Time) {
m["mtime"] = TimeToFloatString(t)
}

55
vendor/github.com/ncw/swift/notes.txt generated vendored Executable file
View File

@@ -0,0 +1,55 @@
Notes on Go Swift
=================
Make a builder style interface like the Google Go APIs? Advantages
are that it is easy to add named methods to the service object to do
specific things. Slightly less efficient. Not sure about how to
return extra stuff though - in an object?
Make a container struct so these could be methods on it?
Make noResponse check for 204?
Make storage public so it can be extended easily?
Rename to go-swift to match user agent string?
Reconnect on auth error - 401 when token expires isn't tested
Make more api compatible with python cloudfiles?
Retry operations on timeout / network errors?
- also 408 error
- GET requests only?
Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock
Add extra headers field to Connection (for via etc)
Make errors use an error heirachy then can catch them with a type assertion
Error(...)
ObjectCorrupted{ Error }
Make a Debug flag in connection for logging stuff
Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc
Object range
Object create, update with X-Delete-At or X-Delete-After
Large object support
- check uploads are less than 5GB in normal mode?
Access control CORS?
Swift client retries and backs off for all types of errors
Implement net error interface?
type Error interface {
error
Timeout() bool // Is the error a timeout?
Temporary() bool // Is the error temporary?
}

171
vendor/github.com/ncw/swift/slo.go generated vendored Executable file
View File

@@ -0,0 +1,171 @@
package swift
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/url"
"os"
)
// StaticLargeObjectCreateFile represents an open static large object
type StaticLargeObjectCreateFile struct {
largeObjectCreateFile
}
var SLONotSupported = errors.New("SLO not supported")
type swiftSegment struct {
Path string `json:"path,omitempty"`
Etag string `json:"etag,omitempty"`
Size int64 `json:"size_bytes,omitempty"`
// When uploading a manifest, the attributes must be named `path`, `etag` and `size_bytes`
// but when querying the JSON content of a manifest with the `multipart-manifest=get`
// parameter, Swift names those attributes `name`, `hash` and `bytes`.
// We use all the different attributes names in this structure to be able to use
// the same structure for both uploading and retrieving.
Name string `json:"name,omitempty"`
Hash string `json:"hash,omitempty"`
Bytes int64 `json:"bytes,omitempty"`
ContentType string `json:"content_type,omitempty"`
LastModified string `json:"last_modified,omitempty"`
}
// StaticLargeObjectCreateFile creates a static large object returning
// an object which satisfies io.Writer, io.Seeker, io.Closer and
// io.ReaderFrom. The flags are as passed to the largeObjectCreate
// method.
func (c *Connection) StaticLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) {
info, err := c.cachedQueryInfo()
if err != nil || !info.SupportsSLO() {
return nil, SLONotSupported
}
realMinChunkSize := info.SLOMinSegmentSize()
if realMinChunkSize > opts.MinChunkSize {
opts.MinChunkSize = realMinChunkSize
}
lo, err := c.largeObjectCreate(opts)
if err != nil {
return nil, err
}
return withBuffer(opts, &StaticLargeObjectCreateFile{
largeObjectCreateFile: *lo,
}), nil
}
// StaticLargeObjectCreate creates or truncates an existing static
// large object returning a writeable object. This sets opts.Flags to
// an appropriate value before calling StaticLargeObjectCreateFile
func (c *Connection) StaticLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) {
opts.Flags = os.O_TRUNC | os.O_CREATE
return c.StaticLargeObjectCreateFile(opts)
}
// StaticLargeObjectDelete deletes a static large object and all of its segments.
func (c *Connection) StaticLargeObjectDelete(container string, path string) error {
info, err := c.cachedQueryInfo()
if err != nil || !info.SupportsSLO() {
return SLONotSupported
}
return c.LargeObjectDelete(container, path)
}
// StaticLargeObjectMove moves a static large object from srcContainer, srcObjectName to dstContainer, dstObjectName
func (c *Connection) StaticLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error {
swiftInfo, err := c.cachedQueryInfo()
if err != nil || !swiftInfo.SupportsSLO() {
return SLONotSupported
}
info, headers, err := c.Object(srcContainer, srcObjectName)
if err != nil {
return err
}
container, segments, err := c.getAllSegments(srcContainer, srcObjectName, headers)
if err != nil {
return err
}
//copy only metadata during move (other headers might not be safe for copying)
headers = headers.ObjectMetadata().ObjectHeaders()
if err := c.createSLOManifest(dstContainer, dstObjectName, info.ContentType, container, segments, headers); err != nil {
return err
}
if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil {
return err
}
return nil
}
// createSLOManifest creates a static large object manifest
func (c *Connection) createSLOManifest(container string, path string, contentType string, segmentContainer string, segments []Object, h Headers) error {
sloSegments := make([]swiftSegment, len(segments))
for i, segment := range segments {
sloSegments[i].Path = fmt.Sprintf("%s/%s", segmentContainer, segment.Name)
sloSegments[i].Etag = segment.Hash
sloSegments[i].Size = segment.Bytes
}
content, err := json.Marshal(sloSegments)
if err != nil {
return err
}
values := url.Values{}
values.Set("multipart-manifest", "put")
if _, err := c.objectPut(container, path, bytes.NewBuffer(content), false, "", contentType, h, values); err != nil {
return err
}
return nil
}
func (file *StaticLargeObjectCreateFile) Close() error {
return file.Flush()
}
func (file *StaticLargeObjectCreateFile) Flush() error {
if err := file.conn.createSLOManifest(file.container, file.objectName, file.contentType, file.segmentContainer, file.segments, file.headers); err != nil {
return err
}
return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size())
}
func (c *Connection) getAllSLOSegments(container, path string) (string, []Object, error) {
var (
segmentList []swiftSegment
segments []Object
segPath string
segmentContainer string
)
values := url.Values{}
values.Set("multipart-manifest", "get")
file, _, err := c.objectOpen(container, path, true, nil, values)
if err != nil {
return "", nil, err
}
content, err := ioutil.ReadAll(file)
if err != nil {
return "", nil, err
}
json.Unmarshal(content, &segmentList)
for _, segment := range segmentList {
segmentContainer, segPath = parseFullPath(segment.Name[1:])
segments = append(segments, Object{
Name: segPath,
Bytes: segment.Bytes,
Hash: segment.Hash,
})
}
return segmentContainer, segments, nil
}

2264
vendor/github.com/ncw/swift/swift.go generated vendored Executable file

File diff suppressed because it is too large Load Diff

59
vendor/github.com/ncw/swift/timeout_reader.go generated vendored Executable file
View File

@@ -0,0 +1,59 @@
package swift
import (
"io"
"time"
)
// An io.ReadCloser which obeys an idle timeout
type timeoutReader struct {
reader io.ReadCloser
timeout time.Duration
cancel func()
}
// Returns a wrapper around the reader which obeys an idle
// timeout. The cancel function is called if the timeout happens
func newTimeoutReader(reader io.ReadCloser, timeout time.Duration, cancel func()) *timeoutReader {
return &timeoutReader{
reader: reader,
timeout: timeout,
cancel: cancel,
}
}
// Read reads up to len(p) bytes into p
//
// Waits at most for timeout for the read to complete otherwise returns a timeout
func (t *timeoutReader) Read(p []byte) (int, error) {
// FIXME limit the amount of data read in one chunk so as to not exceed the timeout?
// Do the read in the background
type result struct {
n int
err error
}
done := make(chan result, 1)
go func() {
n, err := t.reader.Read(p)
done <- result{n, err}
}()
// Wait for the read or the timeout
timer := time.NewTimer(t.timeout)
defer timer.Stop()
select {
case r := <-done:
return r.n, r.err
case <-timer.C:
t.cancel()
return 0, TimeoutError
}
panic("unreachable") // for Go 1.0
}
// Close the channel
func (t *timeoutReader) Close() error {
return t.reader.Close()
}
// Check it satisfies the interface
var _ io.ReadCloser = &timeoutReader{}

22
vendor/github.com/ncw/swift/travis_realserver.sh generated vendored Executable file
View File

@@ -0,0 +1,22 @@
#!/bin/bash
set -e
if [ "${TRAVIS_PULL_REQUEST}" = "true" ]; then
exit 0
fi
if [ "${TEST_REAL_SERVER}" = "rackspace" ] && [ ! -z "${RACKSPACE_APIKEY}" ]; then
echo "Running tests pointing to Rackspace"
export SWIFT_API_KEY=$RACKSPACE_APIKEY
export SWIFT_API_USER=$RACKSPACE_USER
export SWIFT_AUTH_URL=$RACKSPACE_AUTH
go test ./...
fi
if [ "${TEST_REAL_SERVER}" = "memset" ] && [ ! -z "${MEMSET_APIKEY}" ]; then
echo "Running tests pointing to Memset"
export SWIFT_API_KEY=$MEMSET_APIKEY
export SWIFT_API_USER=$MEMSET_USER
export SWIFT_AUTH_URL=$MEMSET_AUTH
go test
fi

55
vendor/github.com/ncw/swift/watchdog_reader.go generated vendored Executable file
View File

@@ -0,0 +1,55 @@
package swift
import (
"io"
"time"
)
var watchdogChunkSize = 1 << 20 // 1 MiB
// An io.Reader which resets a watchdog timer whenever data is read
type watchdogReader struct {
timeout time.Duration
reader io.Reader
timer *time.Timer
chunkSize int
}
// Returns a new reader which will kick the watchdog timer whenever data is read
func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader {
return &watchdogReader{
timeout: timeout,
reader: reader,
timer: timer,
chunkSize: watchdogChunkSize,
}
}
// Read reads up to len(p) bytes into p
func (t *watchdogReader) Read(p []byte) (int, error) {
//read from underlying reader in chunks not larger than t.chunkSize
//while resetting the watchdog timer before every read; the small chunk
//size ensures that the timer does not fire when reading a large amount of
//data from a slow connection
start := 0
end := len(p)
for start < end {
length := end - start
if length > t.chunkSize {
length = t.chunkSize
}
resetTimer(t.timer, t.timeout)
n, err := t.reader.Read(p[start : start+length])
start += n
if n == 0 || err != nil {
return start, err
}
}
resetTimer(t.timer, t.timeout)
return start, nil
}
// Check it satisfies the interface
var _ io.Reader = &watchdogReader{}