newline battles continue

This commit is contained in:
bel
2020-01-19 20:41:30 +00:00
parent 98adb53caf
commit 573696774e
1456 changed files with 501133 additions and 6 deletions

2088
vendor/github.com/ncw/rclone/backend/drive/drive.go generated vendored Executable file

File diff suppressed because it is too large Load Diff

249
vendor/github.com/ncw/rclone/backend/drive/upload.go generated vendored Executable file
View File

@@ -0,0 +1,249 @@
// Upload for drive
//
// Docs
// Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable
// Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices
// Files insert: https://developers.google.com/drive/v2/reference/files/insert
// Files update: https://developers.google.com/drive/v2/reference/files/update
//
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
package drive
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
const (
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
statusResumeIncomplete = 308
)
// resumableUpload is used by the generated APIs to provide resumable uploads.
// It is not used by developers directly.
type resumableUpload struct {
f *Fs
remote string
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
URI string
// Media is the object being uploaded.
Media io.Reader
// MediaType defines the media type, e.g. "image/jpeg".
MediaType string
// ContentLength is the full size of the object being uploaded.
ContentLength int64
// Return value
ret *drive.File
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
params := make(url.Values)
params.Set("alt", "json")
params.Set("uploadType", "resumable")
params.Set("fields", partialFields)
if f.isTeamDrive {
params.Set("supportsTeamDrives", "true")
}
if f.opt.KeepRevisionForever {
params.Set("keepRevisionForever", "true")
}
urls := "https://www.googleapis.com/upload/drive/v3/files"
method := "POST"
if fileID != "" {
params.Set("setModifiedDate", "true")
urls += "/{fileId}"
method = "PATCH"
}
urls += "?" + params.Encode()
var res *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
var body io.Reader
body, err = googleapi.WithoutDataWrapper.JSONReader(info)
if err != nil {
return false, err
}
var req *http.Request
req, err = http.NewRequest(method, urls, body)
if err != nil {
return false, err
}
googleapi.Expand(req.URL, map[string]string{
"fileId": fileID,
})
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
req.Header.Set("X-Upload-Content-Type", contentType)
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
res, err = f.client.Do(req)
if err == nil {
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
return shouldRetry(err)
})
if err != nil {
return nil, err
}
loc := res.Header.Get("Location")
rx := &resumableUpload{
f: f,
remote: remote,
URI: loc,
Media: in,
MediaType: contentType,
ContentLength: size,
}
return rx.Upload()
}
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequest("POST", rx.URI, body)
req.ContentLength = reqSize
if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
} else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
}
req.Header.Set("Content-Type", rx.MediaType)
return req
}
// rangeRE matches the transfer status response from the server. $1 is
// the last byte index uploaded.
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(0, nil, 0)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return rx.ContentLength, nil
}
if res.StatusCode != statusResumeIncomplete {
err = googleapi.CheckResponse(res)
if err != nil {
return 0, err
}
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err == nil {
return start, nil
}
}
return 0, errors.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, io.SeekStart)
req := rx.makeRequest(start, chunk, chunkSize)
res, err := rx.f.client.Do(req)
if err != nil {
return 599, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == statusResumeIncomplete {
return res.StatusCode, nil
}
err = googleapi.CheckResponse(res)
if err != nil {
return res.StatusCode, err
}
// When the entire file upload is complete, the server
// responds with an HTTP 201 Created along with any metadata
// associated with this resource. If this request had been
// updating an existing entity rather than creating a new one,
// the HTTP response code for a completed upload would have
// been 200 OK.
//
// So parse the response out of the body. We aren't expecting
// any other 2xx codes, so we parse it unconditionaly on
// StatusCode
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
return 598, err
}
return res.StatusCode, nil
}
// Upload uploads the chunks from the input
// It retries each chunk using the pacer and --low-level-retries
func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0)
var StatusCode int
var err error
buf := make([]byte, int(rx.f.opt.ChunkSize))
for start < rx.ContentLength {
reqSize := rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
// Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false
err = nil
}
return again, err
})
if err != nil {
return nil, err
}
start += reqSize
}
// Resume or retry uploads that fail due to connection interruptions or
// any 5xx errors, including:
//
// 500 Internal Server Error
// 502 Bad Gateway
// 503 Service Unavailable
// 504 Gateway Timeout
//
// Use an exponential backoff strategy if any 5xx server error is
// returned when resuming or retrying upload requests. These errors can
// occur if a server is getting overloaded. Exponential backoff can help
// alleviate these kinds of problems during periods of high volume of
// requests or heavy network traffic. Other kinds of requests should not
// be handled by exponential backoff but you can still retry a number of
// them. When retrying these requests, limit the number of times you
// retry them. For example your code could limit to ten retries or less
// before reporting an error.
//
// Handle 404 Not Found errors when doing resumable uploads by starting
// the entire upload over from the beginning.
if rx.ret == nil {
return nil, fserrors.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
}
return rx.ret, nil
}

127
vendor/github.com/ncw/rclone/backend/dropbox/dbhash/dbhash.go generated vendored Executable file
View File

@@ -0,0 +1,127 @@
// Package dbhash implements the dropbox hash as described in
//
// https://www.dropbox.com/developers/reference/content-hash
package dbhash
import (
"crypto/sha256"
"hash"
)
const (
// BlockSize of the checksum in bytes.
BlockSize = sha256.BlockSize
// Size of the checksum in bytes.
Size = sha256.BlockSize
bytesPerBlock = 4 * 1024 * 1024
hashReturnedError = "hash function returned error"
)
type digest struct {
n int // bytes written into blockHash so far
blockHash hash.Hash
totalHash hash.Hash
sumCalled bool
writtenMore bool
}
// New returns a new hash.Hash computing the Dropbox checksum.
func New() hash.Hash {
d := &digest{}
d.Reset()
return d
}
// writeBlockHash writes the current block hash into the total hash
func (d *digest) writeBlockHash() {
blockHash := d.blockHash.Sum(nil)
_, err := d.totalHash.Write(blockHash)
if err != nil {
panic(hashReturnedError)
}
// reset counters for blockhash
d.n = 0
d.blockHash.Reset()
}
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
func (d *digest) Write(p []byte) (n int, err error) {
n = len(p)
for len(p) > 0 {
d.writtenMore = true
toWrite := bytesPerBlock - d.n
if toWrite > len(p) {
toWrite = len(p)
}
_, err = d.blockHash.Write(p[:toWrite])
if err != nil {
panic(hashReturnedError)
}
d.n += toWrite
p = p[toWrite:]
// Accumulate the total hash
if d.n == bytesPerBlock {
d.writeBlockHash()
}
}
return n, nil
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
//
// TODO(ncw) Sum() can only be called once for this type of hash.
// If you call Sum(), then Write() then Sum() it will result in
// a panic. Calling Write() then Sum(), then Sum() is OK.
func (d *digest) Sum(b []byte) []byte {
if d.sumCalled && d.writtenMore {
panic("digest.Sum() called more than once")
}
d.sumCalled = true
d.writtenMore = false
if d.n != 0 {
d.writeBlockHash()
}
return d.totalHash.Sum(b)
}
// Reset resets the Hash to its initial state.
func (d *digest) Reset() {
d.n = 0
d.totalHash = sha256.New()
d.blockHash = sha256.New()
d.sumCalled = false
d.writtenMore = false
}
// Size returns the number of bytes Sum will return.
func (d *digest) Size() int {
return d.totalHash.Size()
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (d *digest) BlockSize() int {
return d.totalHash.BlockSize()
}
// Sum returns the Dropbox checksum of the data.
func Sum(data []byte) [Size]byte {
var d digest
d.Reset()
_, _ = d.Write(data)
var out [Size]byte
d.Sum(out[:0])
return out
}
// must implement this interface
var _ hash.Hash = (*digest)(nil)

29
vendor/github.com/ncw/rclone/backend/local/about_unix.go generated vendored Executable file
View File

@@ -0,0 +1,29 @@
// +build darwin dragonfly freebsd linux
package local
import (
"syscall"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// About gets quota information
func (f *Fs) About() (*fs.Usage, error) {
var s syscall.Statfs_t
err := syscall.Statfs(f.root, &s)
if err != nil {
return nil, errors.Wrap(err, "failed to read disk usage")
}
bs := int64(s.Bsize)
usage := &fs.Usage{
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
Free: fs.NewUsageValue(bs * int64(s.Bavail)), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// check interface
var _ fs.Abouter = &Fs{}

36
vendor/github.com/ncw/rclone/backend/local/about_windows.go generated vendored Executable file
View File

@@ -0,0 +1,36 @@
// +build windows
package local
import (
"syscall"
"unsafe"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
// About gets quota information
func (f *Fs) About() (*fs.Usage, error) {
var available, total, free int64
_, _, e1 := getFreeDiskSpace.Call(
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
)
if e1 != syscall.Errno(0) {
return nil, errors.Wrap(e1, "failed to read disk usage")
}
usage := &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used
Used: fs.NewUsageValue(total - free), // bytes in use
Free: fs.NewUsageValue(available), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// check interface
var _ fs.Abouter = &Fs{}

993
vendor/github.com/ncw/rclone/backend/local/local.go generated vendored Executable file
View File

@@ -0,0 +1,993 @@
// Package local provides a filesystem interface
package local
import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
)
// Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "local",
Description: "Local Disk",
NewFs: NewFs,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
}},
}, {
Name: "copy_links",
Help: "Follow symlinks and copy the pointed to item.",
Default: false,
NoPrefix: true,
ShortOpt: "L",
Advanced: true,
}, {
Name: "skip_links",
Help: "Don't warn about skipped symlinks.",
Default: false,
NoPrefix: true,
Advanced: true,
}, {
Name: "no_unicode_normalization",
Help: "Don't apply unicode normalization to paths and filenames",
Default: false,
Advanced: true,
}, {
Name: "no_check_updated",
Help: "Don't check to see if the files change during upload",
Default: false,
Advanced: true,
}, {
Name: "one_file_system",
Help: "Don't cross filesystem boundaries (unix/macOS only).",
Default: false,
NoPrefix: true,
ShortOpt: "x",
Advanced: true,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
FollowSymlinks bool `config:"copy_links"`
SkipSymlinks bool `config:"skip_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
}
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
opt Options // parsed config options
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
wmu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
dirNames *mapper // directory name mapping
objectHashesMu sync.Mutex // global lock for Object.hashes
}
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
}
// ------------------------------------------------------------
// NewFs constructs an Fs from the path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.NoUTFNorm {
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
}
f := &Fs{
name: name,
opt: *opt,
warned: make(map[string]struct{}),
dev: devUnset,
lstat: os.Lstat,
dirNames: newMapper(),
}
f.root = f.cleanPath(root)
f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true,
}).Fill(f)
if opt.FollowSymlinks {
f.lstat = os.Stat
}
// Check to see if this points to a file
fi, err := f.lstat(f.root)
if err == nil {
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
if err == nil && fi.Mode().IsRegular() {
// It is a file, so use the parent as the root
f.root = filepath.Dir(f.root)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Local file system at %s", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// caseInsenstive returns whether the remote is case insensitive or not
func (f *Fs) caseInsensitive() bool {
// FIXME not entirely accurate since you can have case
// sensitive Fses on darwin and case insenstive Fses on linux.
// Should probably check but that would involve creating a
// file in the remote to be most accurate which probably isn't
// desirable.
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
}
// newObject makes a half completed Object
//
// if dstPath is empty then it is made from remote
func (f *Fs) newObject(remote, dstPath string) *Object {
if dstPath == "" {
dstPath = f.cleanPath(filepath.Join(f.root, remote))
}
remote = f.cleanRemote(remote)
return &Object{
fs: f,
remote: remote,
path: dstPath,
}
}
// Return an Object from a path
//
// May return nil if an error occurred
func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Object, error) {
o := f.newObject(remote, dstPath)
if info != nil {
o.setMetadata(info)
} else {
err := o.lstat()
if err != nil {
if os.IsNotExist(err) {
return nil, fs.ErrorObjectNotFound
}
if os.IsPermission(err) {
return nil, fs.ErrorPermissionDenied
}
return nil, err
}
}
if o.mode.IsDir() {
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, "", nil)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
dir = f.dirNames.Load(dir)
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
remote := f.cleanRemote(dir)
_, err = os.Stat(fsDirPath)
if err != nil {
return nil, fs.ErrorDirNotFound
}
fd, err := os.Open(fsDirPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to open directory %q", dir)
}
defer func() {
cerr := fd.Close()
if cerr != nil && err == nil {
err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
}
}()
for {
fis, err := fd.Readdir(1024)
if err == io.EOF && len(fis) == 0 {
break
}
if err != nil {
return nil, errors.Wrapf(err, "failed to read directory %q", dir)
}
for _, fi := range fis {
name := fi.Name()
mode := fi.Mode()
newRemote := path.Join(remote, name)
newPath := filepath.Join(fsDirPath, name)
// Follow symlinks if required
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
fi, err = os.Stat(newPath)
if err != nil {
return nil, err
}
mode = fi.Mode()
}
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
entries = append(entries, d)
}
} else {
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
if err != nil {
return nil, err
}
if fso.Storable() {
entries = append(entries, fso)
}
}
}
}
return entries, nil
}
// cleanRemote makes string a valid UTF-8 string for remote strings.
//
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
// It also normalises the UTF-8 and converts the slashes if necessary.
func (f *Fs) cleanRemote(name string) string {
if !utf8.ValidString(name) {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
name = string([]rune(name))
}
name = filepath.ToSlash(name)
return name
}
// mapper maps raw to cleaned directory names
type mapper struct {
mu sync.RWMutex // mutex to protect the below
m map[string]string // map of un-normalised directory names
}
func newMapper() *mapper {
return &mapper{
m: make(map[string]string),
}
}
// Lookup a directory name to make a local name (reverses
// cleanDirName)
//
// FIXME this is temporary before we make a proper Directory object
func (m *mapper) Load(in string) string {
m.mu.RLock()
out, ok := m.m[in]
m.mu.RUnlock()
if ok {
return out
}
return in
}
// Cleans a directory name recording if it needed to be altered
//
// FIXME this is temporary before we make a proper Directory object
func (m *mapper) Save(in, out string) string {
if in != out {
m.mu.Lock()
m.m[out] = in
m.mu.Unlock()
}
return out
}
// Put the Object to the local filesystem
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
// Temporary Object under construction - info filled in by Update()
o := f.newObject(remote, "")
err := o.Update(in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
root := f.cleanPath(filepath.Join(f.root, dir))
err := os.MkdirAll(root, 0777)
if err != nil {
return err
}
if dir == "" {
fi, err := f.lstat(root)
if err != nil {
return err
}
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
return nil
}
// Rmdir removes the directory
//
// If it isn't empty it will return an error
func (f *Fs) Rmdir(dir string) error {
root := f.cleanPath(filepath.Join(f.root, dir))
return os.Remove(root)
}
// Precision of the file system
func (f *Fs) Precision() (precision time.Duration) {
f.precisionOk.Do(func() {
f.precision = f.readPrecision()
})
return f.precision
}
// Read the precision
func (f *Fs) readPrecision() (precision time.Duration) {
// Default precision of 1s
precision = time.Second
// Create temporary file and test it
fd, err := ioutil.TempFile("", "rclone")
if err != nil {
// If failed return 1s
// fmt.Println("Failed to create temp file", err)
return time.Second
}
path := fd.Name()
// fmt.Println("Created temp file", path)
err = fd.Close()
if err != nil {
return time.Second
}
// Delete it on return
defer func() {
// fmt.Println("Remove temp file")
_ = os.Remove(path) // ignore error
}()
// Find the minimum duration we can detect
for duration := time.Duration(1); duration < time.Second; duration *= 10 {
// Current time with delta
t := time.Unix(time.Now().Unix(), int64(duration))
err := os.Chtimes(path, t, t)
if err != nil {
// fmt.Println("Failed to Chtimes", err)
break
}
// Read the actual time back
fi, err := os.Stat(path)
if err != nil {
// fmt.Println("Failed to Stat", err)
break
}
// If it matches - have found the precision
// fmt.Println("compare", fi.ModTime(), t)
if fi.ModTime().Equal(t) {
// fmt.Println("Precision detected as", duration)
return duration
}
}
return
}
// Purge deletes all the files and directories
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
fi, err := f.lstat(f.root)
if err != nil {
return err
}
if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", f.root)
}
return os.RemoveAll(f.root)
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Temporary Object under construction
dstObj := f.newObject(remote, "")
// Check it is a file if it exists
err := dstObj.lstat()
if os.IsNotExist(err) {
// OK
} else if err != nil {
return nil, err
} else if !dstObj.mode.IsRegular() {
// It isn't a file
return nil, errors.New("can't move file onto non-file")
}
// Create destination
err = dstObj.mkdirAll()
if err != nil {
return nil, err
}
// Do the move
err = os.Rename(srcObj.path, dstObj.path)
if os.IsNotExist(err) {
// race condition, source was deleted in the meantime
return nil, err
} else if os.IsPermission(err) {
// not enough rights to write to dst
return nil, err
} else if err != nil {
// not quite clear, but probably trying to move a file across file system
// boundaries. Copying might still work.
fs.Debugf(src, "Can't move: %v: trying copy", err)
return nil, fs.ErrorCantMove
}
// Update the info
err = dstObj.lstat()
if err != nil {
return nil, err
}
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := f.cleanPath(filepath.Join(srcFs.root, srcRemote))
dstPath := f.cleanPath(filepath.Join(f.root, dstRemote))
// Check if destination exists
_, err := os.Lstat(dstPath)
if !os.IsNotExist(err) {
return fs.ErrorDirExists
}
// Create parent of destination
dstParentPath := filepath.Dir(dstPath)
err = os.MkdirAll(dstParentPath, 0777)
if err != nil {
return err
}
// Do the move
err = os.Rename(srcPath, dstPath)
if os.IsNotExist(err) {
// race condition, source was deleted in the meantime
return err
} else if os.IsPermission(err) {
// not enough rights to write to dst
return err
} else if err != nil {
// not quite clear, but probably trying to move directory across file system
// boundaries. Copying might still work.
fs.Debugf(src, "Can't move dir: %v: trying copy", err)
return fs.ErrorCantDirMove
}
return nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Supported
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(r hash.Type) (string, error) {
// Check that the underlying file hasn't changed
oldtime := o.modTime
oldsize := o.size
err := o.lstat()
if err != nil {
return "", errors.Wrap(err, "hash: failed to stat")
}
o.fs.objectHashesMu.Lock()
hashes := o.hashes
o.fs.objectHashesMu.Unlock()
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
in, err := os.Open(o.path)
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
hashes, err = hash.Stream(in)
closeErr := in.Close()
if err != nil {
return "", errors.Wrap(err, "hash: failed to read")
}
if closeErr != nil {
return "", errors.Wrap(closeErr, "hash: failed to close")
}
o.fs.objectHashesMu.Lock()
o.hashes = hashes
o.fs.objectHashesMu.Unlock()
}
return hashes[r], nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime() time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
return err
}
// Re-read metadata
return o.lstat()
}
// Storable returns a boolean showing if this object is storable
func (o *Object) Storable() bool {
// Check for control characters in the remote name and show non storable
for _, c := range o.Remote() {
if c >= 0x00 && c < 0x20 || c == 0x7F {
fs.Logf(o.fs, "Can't store file with control characters: %q", o.Remote())
return false
}
}
mode := o.mode
if mode&os.ModeSymlink != 0 {
if !o.fs.opt.SkipSymlinks {
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
}
return false
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
fs.Logf(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
// fs.Debugf(o, "Skipping directory")
return false
}
return true
}
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
// object that is read
type localOpenFile struct {
o *Object // object that is open
in io.ReadCloser // handle we are wrapping
hash *hash.MultiHasher // currently accumulating hashes
fd *os.File // file object reference
}
// Read bytes from the object - see io.Reader
func (file *localOpenFile) Read(p []byte) (n int, err error) {
if !file.o.fs.opt.NoCheckUpdated {
// Check if file has the same size and modTime
fi, err := file.fd.Stat()
if err != nil {
return 0, errors.Wrap(err, "can't read status of source file while transferring")
}
if file.o.size != fi.Size() {
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
}
if !file.o.modTime.Equal(fi.ModTime()) {
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
}
}
n, err = file.in.Read(p)
if n > 0 {
// Hash routines never return an error
_, _ = file.hash.Write(p[:n])
}
return
}
// Close the object and update the hashes
func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
if file.hash.Size() == file.o.Size() {
file.o.fs.objectHashesMu.Lock()
file.o.hashes = file.hash.Sums()
file.o.fs.objectHashesMu.Unlock()
}
}
return err
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.size)
case *fs.HashesOption:
hashes = x.Hashes
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
fd, err := os.Open(o.path)
if err != nil {
return
}
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
if offset != 0 {
// seek the object
_, err = fd.Seek(offset, io.SeekStart)
// don't attempt to make checksums
return wrappedFd, err
}
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return nil, err
}
// Update the md5sum as we go along
in = &localOpenFile{
o: o,
in: wrappedFd,
hash: hash,
fd: fd,
}
return in, nil
}
// mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error {
dir := filepath.Dir(o.path)
return os.MkdirAll(dir, 0777)
}
// Update the object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.HashesOption:
hashes = x.Hashes
}
}
err := o.mkdirAll()
if err != nil {
return err
}
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
// Calculate the hash of the object we are reading as we go along
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return err
}
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in)
closeErr := out.Close()
if err == nil {
err = closeErr
}
if err != nil {
fs.Logf(o, "Removing partially written file on error: %v", err)
if removeErr := os.Remove(o.path); removeErr != nil {
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
}
return err
}
// All successful so update the hashes
o.fs.objectHashesMu.Lock()
o.hashes = hash.Sums()
o.fs.objectHashesMu.Unlock()
// Set the mtime
err = o.SetModTime(src.ModTime())
if err != nil {
return err
}
// ReRead info now that we have finished
return o.lstat()
}
// setMetadata sets the file info from the os.FileInfo passed in
func (o *Object) setMetadata(info os.FileInfo) {
// Don't overwrite the info if we don't need to
// this avoids upsetting the race detector
if o.size != info.Size() {
o.size = info.Size()
}
if !o.modTime.Equal(info.ModTime()) {
o.modTime = info.ModTime()
}
if o.mode != info.Mode() {
o.mode = info.Mode()
}
}
// Stat a Object into info
func (o *Object) lstat() error {
info, err := o.fs.lstat(o.path)
if err == nil {
o.setMetadata(info)
}
return err
}
// Remove an object
func (o *Object) Remove() error {
return remove(o.path)
}
// cleanPathFragment cleans an OS path fragment which is part of a
// bigger path and not necessarily absolute
func cleanPathFragment(s string) string {
if s == "" {
return s
}
s = filepath.Clean(s)
if runtime.GOOS == "windows" {
s = strings.Replace(s, `/`, `\`, -1)
}
return s
}
// cleanPath cleans and makes absolute the path passed in and returns
// an OS path.
//
// The input might be in OS form or rclone form or a mixture, but the
// output is in OS form.
//
// On windows it makes the path UNC also and replaces any characters
// Windows can't deal with with their replacements.
func (f *Fs) cleanPath(s string) string {
s = cleanPathFragment(s)
if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
if !f.opt.NoUNC {
// Convert to UNC
s = uncPath(s)
}
s = cleanWindowsName(f, s)
} else {
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
}
return s
}
// Pattern to match a windows absolute path: "c:\" and similar
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// uncPath converts an absolute Windows path
// to a UNC long path.
func uncPath(s string) string {
// UNC can NOT use "/", so convert all to "\"
s = strings.Replace(s, `/`, `\`, -1)
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(s, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(s, `\\?\`) {
return s
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
}
if isAbsWinDrive.MatchString(s) {
return `\\?\` + s
}
return s
}
// cleanWindowsName will clean invalid Windows characters replacing them with _
func cleanWindowsName(f *Fs, name string) string {
original := name
var name2 string
if strings.HasPrefix(name, `\\?\`) {
name2 = `\\?\`
name = strings.TrimPrefix(name, `\\?\`)
}
if strings.HasPrefix(name, `//?/`) {
name2 = `//?/`
name = strings.TrimPrefix(name, `//?/`)
}
// Colon is allowed as part of a drive name X:\
colonAt := strings.Index(name, ":")
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
// Copy to name2, which is unfiltered
name2 += name[0 : colonAt+1]
name = name[colonAt+1:]
}
name2 += strings.Map(func(r rune) rune {
switch r {
case '<', '>', '"', '|', '?', '*', ':':
return '_'
}
return r
}, name)
if name2 != original && f != nil {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Logf(f, "Replacing invalid characters in %q to %q", name, name2)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
}
return name2
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -0,0 +1,13 @@
// Device reading functions
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package local
import "os"
// readDevice turns a valid os.FileInfo into a device number,
// returning devUnset if it fails.
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
return devUnset
}

View File

@@ -0,0 +1,26 @@
// Device reading functions
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package local
import (
"os"
"syscall"
"github.com/ncw/rclone/fs"
)
// readDevice turns a valid os.FileInfo into a device number,
// returning devUnset if it fails.
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
if !oneFileSystem {
return devUnset
}
statT, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys())
return devUnset
}
return uint64(statT.Dev)
}

10
vendor/github.com/ncw/rclone/backend/local/remove_other.go generated vendored Executable file
View File

@@ -0,0 +1,10 @@
//+build !windows
package local
import "os"
// Removes name, retrying on a sharing violation
func remove(name string) error {
return os.Remove(name)
}

38
vendor/github.com/ncw/rclone/backend/local/remove_windows.go generated vendored Executable file
View File

@@ -0,0 +1,38 @@
//+build windows
package local
import (
"os"
"syscall"
"time"
"github.com/ncw/rclone/fs"
)
const (
ERROR_SHARING_VIOLATION syscall.Errno = 32
)
// Removes name, retrying on a sharing violation
func remove(name string) (err error) {
const maxTries = 10
var sleepTime = 1 * time.Millisecond
for i := 0; i < maxTries; i++ {
err = os.Remove(name)
if err == nil {
break
}
pathErr, ok := err.(*os.PathError)
if !ok {
break
}
if pathErr.Err != ERROR_SHARING_VIOLATION {
break
}
fs.Logf(name, "Remove detected sharing violation - retry %d/%d sleeping %v", i+1, maxTries, sleepTime)
time.Sleep(sleepTime)
sleepTime <<= 1
}
return err
}

View File

@@ -0,0 +1,202 @@
// Package quickxorhash provides the quickXorHash algorithm which is a
// quick, simple non-cryptographic hash algorithm that works by XORing
// the bytes in a circular-shifting fashion.
//
// It is used by Microsoft Onedrive for Business to hash data.
//
// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
package quickxorhash
// This code was ported from the code snippet linked from
// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
// Which has the copyright
// ------------------------------------------------------------------------------
// Copyright (c) 2016 Microsoft Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// ------------------------------------------------------------------------------
import (
"hash"
)
const (
// BlockSize is the preferred size for hashing
BlockSize = 64
// Size of the output checksum
Size = 20
bitsInLastCell = 32
shift = 11
widthInBits = 8 * Size
dataSize = (widthInBits-1)/64 + 1
)
type quickXorHash struct {
data [dataSize]uint64
lengthSoFar uint64
shiftSoFar int
}
// New returns a new hash.Hash computing the quickXorHash checksum.
func New() hash.Hash {
return &quickXorHash{}
}
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// It never returns an error.
//
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
func (q *quickXorHash) Write(p []byte) (n int, err error) {
currentshift := q.shiftSoFar
// The bitvector where we'll start xoring
vectorArrayIndex := currentshift / 64
// The position within the bit vector at which we begin xoring
vectorOffset := currentshift % 64
iterations := len(p)
if iterations > widthInBits {
iterations = widthInBits
}
for i := 0; i < iterations; i++ {
isLastCell := vectorArrayIndex == len(q.data)-1
var bitsInVectorCell int
if isLastCell {
bitsInVectorCell = bitsInLastCell
} else {
bitsInVectorCell = 64
}
// There's at least 2 bitvectors before we reach the end of the array
if vectorOffset <= bitsInVectorCell-8 {
for j := i; j < len(p); j += widthInBits {
q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
}
} else {
index1 := vectorArrayIndex
var index2 int
if isLastCell {
index2 = 0
} else {
index2 = vectorArrayIndex + 1
}
low := byte(bitsInVectorCell - vectorOffset)
xoredByte := byte(0)
for j := i; j < len(p); j += widthInBits {
xoredByte ^= p[j]
}
q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
q.data[index2] ^= uint64(xoredByte) >> low
}
vectorOffset += shift
for vectorOffset >= bitsInVectorCell {
if isLastCell {
vectorArrayIndex = 0
} else {
vectorArrayIndex = vectorArrayIndex + 1
}
vectorOffset -= bitsInVectorCell
}
}
// Update the starting position in a circular shift pattern
q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
q.lengthSoFar += uint64(len(p))
return len(p), nil
}
// Calculate the current checksum
func (q *quickXorHash) checkSum() (h [Size]byte) {
// Output the data as little endian bytes
ph := 0
for _, d := range q.data[:len(q.data)-1] {
_ = h[ph+7] // bounds check
h[ph+0] = byte(d >> (8 * 0))
h[ph+1] = byte(d >> (8 * 1))
h[ph+2] = byte(d >> (8 * 2))
h[ph+3] = byte(d >> (8 * 3))
h[ph+4] = byte(d >> (8 * 4))
h[ph+5] = byte(d >> (8 * 5))
h[ph+6] = byte(d >> (8 * 6))
h[ph+7] = byte(d >> (8 * 7))
ph += 8
}
// remaining 32 bits
d := q.data[len(q.data)-1]
h[Size-4] = byte(d >> (8 * 0))
h[Size-3] = byte(d >> (8 * 1))
h[Size-2] = byte(d >> (8 * 2))
h[Size-1] = byte(d >> (8 * 3))
// XOR the file length with the least significant bits in little endian format
d = q.lengthSoFar
h[Size-8] ^= byte(d >> (8 * 0))
h[Size-7] ^= byte(d >> (8 * 1))
h[Size-6] ^= byte(d >> (8 * 2))
h[Size-5] ^= byte(d >> (8 * 3))
h[Size-4] ^= byte(d >> (8 * 4))
h[Size-3] ^= byte(d >> (8 * 5))
h[Size-2] ^= byte(d >> (8 * 6))
h[Size-1] ^= byte(d >> (8 * 7))
return h
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (q *quickXorHash) Sum(b []byte) []byte {
hash := q.checkSum()
return append(b, hash[:]...)
}
// Reset resets the Hash to its initial state.
func (q *quickXorHash) Reset() {
*q = quickXorHash{}
}
// Size returns the number of bytes Sum will return.
func (q *quickXorHash) Size() int {
return Size
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (q *quickXorHash) BlockSize() int {
return BlockSize
}
// Sum returns the quickXorHash checksum of the data.
func Sum(data []byte) [Size]byte {
var d quickXorHash
_, _ = d.Write(data)
return d.checkSum()
}

1493
vendor/github.com/ncw/rclone/backend/s3/s3.go generated vendored Executable file

File diff suppressed because it is too large Load Diff

115
vendor/github.com/ncw/rclone/backend/s3/v2sign.go generated vendored Executable file
View File

@@ -0,0 +1,115 @@
// v2 signing
package s3
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"net/http"
"sort"
"strings"
"time"
)
// URL parameters that need to be added to the signature
var s3ParamsToSign = map[string]struct{}{
"acl": {},
"location": {},
"logging": {},
"notification": {},
"partNumber": {},
"policy": {},
"requestPayment": {},
"torrent": {},
"uploadId": {},
"uploads": {},
"versionId": {},
"versioning": {},
"versions": {},
"response-content-type": {},
"response-content-language": {},
"response-expires": {},
"response-cache-control": {},
"response-content-disposition": {},
"response-content-encoding": {},
}
// sign signs requests using v2 auth
//
// Cobbled together from goamz and aws-sdk-go
func sign(AccessKey, SecretKey string, req *http.Request) {
// Set date
date := time.Now().UTC().Format(time.RFC1123)
req.Header.Set("Date", date)
// Sort out URI
uri := req.URL.Opaque
if uri != "" {
if strings.HasPrefix(uri, "//") {
// Strip off //host/uri
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
req.URL.Opaque = uri // reset to plain URI otherwise Ceph gets confused
}
} else {
uri = req.URL.Path
}
if uri == "" {
uri = "/"
}
// Look through headers of interest
var md5 string
var contentType string
var headersToSign []string
for k, v := range req.Header {
k = strings.ToLower(k)
switch k {
case "content-md5":
md5 = v[0]
case "content-type":
contentType = v[0]
default:
if strings.HasPrefix(k, "x-amz-") {
vall := strings.Join(v, ",")
headersToSign = append(headersToSign, k+":"+vall)
}
}
}
// Make headers of interest into canonical string
var joinedHeadersToSign string
if len(headersToSign) > 0 {
sort.StringSlice(headersToSign).Sort()
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
}
// Look for query parameters which need to be added to the signature
params := req.URL.Query()
var queriesToSign []string
for k, vs := range params {
if _, ok := s3ParamsToSign[k]; ok {
for _, v := range vs {
if v == "" {
queriesToSign = append(queriesToSign, k)
} else {
queriesToSign = append(queriesToSign, k+"="+v)
}
}
}
}
// Add query parameters to URI
if len(queriesToSign) > 0 {
sort.StringSlice(queriesToSign).Sort()
uri += "?" + strings.Join(queriesToSign, "&")
}
// Make signature
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
hash := hmac.New(sha1.New, []byte(SecretKey))
_, _ = hash.Write([]byte(payload))
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
base64.StdEncoding.Encode(signature, hash.Sum(nil))
// Set signature in request
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
}