This commit is contained in:
bel
2020-01-13 03:37:51 +00:00
commit c8eb52f9ba
2023 changed files with 702080 additions and 0 deletions

112
.rclone_repo/cmd/about/about.go Executable file
View File

@@ -0,0 +1,112 @@
package about
import (
"encoding/json"
"fmt"
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
jsonOutput bool
fullOutput bool
)
func init() {
cmd.Root.AddCommand(commandDefinition)
commandDefinition.Flags().BoolVar(&jsonOutput, "json", false, "Format output as JSON")
commandDefinition.Flags().BoolVar(&fullOutput, "full", false, "Full numbers instead of SI units")
}
// printValue formats uv to be output
func printValue(what string, uv *int64) {
what += ":"
if uv == nil {
return
}
var val string
if fullOutput {
val = fmt.Sprintf("%d", *uv)
} else {
val = fs.SizeSuffix(*uv).String()
}
fmt.Printf("%-9s%v\n", what, val)
}
var commandDefinition = &cobra.Command{
Use: "about remote:",
Short: `Get quota information from the remote.`,
Long: `
Get quota information from the remote, like bytes used/free/quota and bytes
used in the trash. Not supported by all remotes.
This will print to stdout something like this:
Total: 17G
Used: 7.444G
Free: 1.315G
Trashed: 100.000M
Other: 8.241G
Where the fields are:
* Total: total size available.
* Used: total size used
* Free: total amount this user could upload.
* Trashed: total amount in the trash
* Other: total amount in other storage (eg Gmail, Google Photos)
* Objects: total number of objects in the storage
Note that not all the backends provide all the fields - they will be
missing if they are not known for that backend. Where it is known
that the value is unlimited the value will also be omitted.
Use the --full flag to see the numbers written out in full, eg
Total: 18253611008
Used: 7993453766
Free: 1411001220
Trashed: 104857602
Other: 8849156022
Use the --json flag for a computer readable output, eg
{
"total": 18253611008,
"used": 7993453766,
"trashed": 104857602,
"other": 8849156022,
"free": 1411001220
}
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
doAbout := f.Features().About
if doAbout == nil {
return errors.Errorf("%v doesn't support about", f)
}
u, err := doAbout()
if err != nil {
return errors.Wrap(err, "About call failed")
}
if jsonOutput {
out := json.NewEncoder(os.Stdout)
out.SetIndent("", "\t")
return out.Encode(u)
}
printValue("Total", u.Total)
printValue("Used", u.Used)
printValue("Free", u.Free)
printValue("Trashed", u.Trashed)
printValue("Other", u.Other)
printValue("Objects", u.Objects)
return nil
})
},
}

56
.rclone_repo/cmd/all/all.go Executable file
View File

@@ -0,0 +1,56 @@
// Package all imports all the commands
package all
import (
// Active commands
_ "github.com/ncw/rclone/cmd"
_ "github.com/ncw/rclone/cmd/about"
_ "github.com/ncw/rclone/cmd/authorize"
_ "github.com/ncw/rclone/cmd/cachestats"
_ "github.com/ncw/rclone/cmd/cat"
_ "github.com/ncw/rclone/cmd/check"
_ "github.com/ncw/rclone/cmd/cleanup"
_ "github.com/ncw/rclone/cmd/cmount"
_ "github.com/ncw/rclone/cmd/config"
_ "github.com/ncw/rclone/cmd/copy"
_ "github.com/ncw/rclone/cmd/copyto"
_ "github.com/ncw/rclone/cmd/copyurl"
_ "github.com/ncw/rclone/cmd/cryptcheck"
_ "github.com/ncw/rclone/cmd/cryptdecode"
_ "github.com/ncw/rclone/cmd/dbhashsum"
_ "github.com/ncw/rclone/cmd/dedupe"
_ "github.com/ncw/rclone/cmd/delete"
_ "github.com/ncw/rclone/cmd/deletefile"
_ "github.com/ncw/rclone/cmd/genautocomplete"
_ "github.com/ncw/rclone/cmd/gendocs"
_ "github.com/ncw/rclone/cmd/hashsum"
_ "github.com/ncw/rclone/cmd/info"
_ "github.com/ncw/rclone/cmd/link"
_ "github.com/ncw/rclone/cmd/listremotes"
_ "github.com/ncw/rclone/cmd/ls"
_ "github.com/ncw/rclone/cmd/lsd"
_ "github.com/ncw/rclone/cmd/lsf"
_ "github.com/ncw/rclone/cmd/lsjson"
_ "github.com/ncw/rclone/cmd/lsl"
_ "github.com/ncw/rclone/cmd/md5sum"
_ "github.com/ncw/rclone/cmd/memtest"
_ "github.com/ncw/rclone/cmd/mkdir"
_ "github.com/ncw/rclone/cmd/mount"
_ "github.com/ncw/rclone/cmd/move"
_ "github.com/ncw/rclone/cmd/moveto"
_ "github.com/ncw/rclone/cmd/ncdu"
_ "github.com/ncw/rclone/cmd/obscure"
_ "github.com/ncw/rclone/cmd/purge"
_ "github.com/ncw/rclone/cmd/rc"
_ "github.com/ncw/rclone/cmd/rcat"
_ "github.com/ncw/rclone/cmd/reveal"
_ "github.com/ncw/rclone/cmd/rmdir"
_ "github.com/ncw/rclone/cmd/rmdirs"
_ "github.com/ncw/rclone/cmd/serve"
_ "github.com/ncw/rclone/cmd/sha1sum"
_ "github.com/ncw/rclone/cmd/size"
_ "github.com/ncw/rclone/cmd/sync"
_ "github.com/ncw/rclone/cmd/touch"
_ "github.com/ncw/rclone/cmd/tree"
_ "github.com/ncw/rclone/cmd/version"
)

View File

@@ -0,0 +1,24 @@
package authorize
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/config"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "authorize",
Short: `Remote authorization.`,
Long: `
Remote authorization. Used to authorize a remote or headless
rclone from a machine with a browser - use as instructed by
rclone config.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 3, command, args)
config.Authorize(args)
},
}

View File

@@ -0,0 +1,55 @@
// +build !plan9
package cachestats
import (
"encoding/json"
"fmt"
"github.com/ncw/rclone/backend/cache"
"github.com/ncw/rclone/cmd"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefinition)
}
var commandDefinition = &cobra.Command{
Use: "cachestats source:",
Short: `Print cache stats for a remote`,
Long: `
Print cache stats for a remote in JSON format
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
var fsCache *cache.Fs
fsCache, ok := fsrc.(*cache.Fs)
if !ok {
unwrap := fsrc.Features().UnWrap
if unwrap != nil {
fsCache, ok = unwrap().(*cache.Fs)
}
if !ok {
return errors.Errorf("%s: is not a cache remote", fsrc.Name())
}
}
m, err := fsCache.Stats()
if err != nil {
return err
}
raw, err := json.MarshalIndent(m, "", " ")
if err != nil {
return err
}
fmt.Printf("%s\n", string(raw))
return nil
})
},
}

View File

@@ -0,0 +1,6 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9
package cachestats

80
.rclone_repo/cmd/cat/cat.go Executable file
View File

@@ -0,0 +1,80 @@
package cat
import (
"io"
"io/ioutil"
"log"
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
// Globals
var (
head = int64(0)
tail = int64(0)
offset = int64(0)
count = int64(-1)
discard = false
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().Int64VarP(&head, "head", "", head, "Only print the first N characters.")
commandDefintion.Flags().Int64VarP(&tail, "tail", "", tail, "Only print the last N characters.")
commandDefintion.Flags().Int64VarP(&offset, "offset", "", offset, "Start printing at offset N (or from end if -ve).")
commandDefintion.Flags().Int64VarP(&count, "count", "", count, "Only print N characters.")
commandDefintion.Flags().BoolVarP(&discard, "discard", "", discard, "Discard the output instead of printing.")
}
var commandDefintion = &cobra.Command{
Use: "cat remote:path",
Short: `Concatenates any files and sends them to stdout.`,
Long: `
rclone cat sends any files to standard output.
You can use it like this to output a single file
rclone cat remote:path/to/file
Or like this to output any file in dir or subdirectories.
rclone cat remote:path/to/dir
Or like this to output any .txt files in dir or subdirectories.
rclone --include "*.txt" cat remote:path/to/dir
Use the --head flag to print characters only at the start, --tail for
the end and --offset and --count to print a section in the middle.
Note that if offset is negative it will count from the end, so
--offset -1 --count 1 is equivalent to --tail 1.
`,
Run: func(command *cobra.Command, args []string) {
usedOffset := offset != 0 || count >= 0
usedHead := head > 0
usedTail := tail > 0
if usedHead && usedTail || usedHead && usedOffset || usedTail && usedOffset {
log.Fatalf("Can only use one of --head, --tail or --offset with --count")
}
if head > 0 {
offset = 0
count = head
}
if tail > 0 {
offset = -tail
count = -1
}
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
var w io.Writer = os.Stdout
if discard {
w = ioutil.Discard
}
cmd.Run(false, false, command, func() error {
return operations.Cat(fsrc, w, offset, count)
})
},
}

51
.rclone_repo/cmd/check/check.go Executable file
View File

@@ -0,0 +1,51 @@
package check
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
// Globals
var (
download = false
oneway = false
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&download, "download", "", download, "Check by downloading rather than with hash.")
commandDefintion.Flags().BoolVarP(&oneway, "one-way", "", oneway, "Check one way only, source files must exist on remote")
}
var commandDefintion = &cobra.Command{
Use: "check source:path dest:path",
Short: `Checks the files in the source and destination match.`,
Long: `
Checks the files in the source and destination match. It compares
sizes and hashes (MD5 or SHA1) and logs a report of files which don't
match. It doesn't alter the source or destination.
If you supply the --size-only flag, it will only compare the sizes not
the hashes as well. Use this for a quick check.
If you supply the --download flag, it will download the data from
both remotes and check them against each other on the fly. This can
be useful for remotes that don't support hashes or if you really want
to check all the data.
If you supply the --one-way flag, it will only check that files in source
match the files in destination, not the other way around. Meaning extra files in
destination that are not in the source will not trigger an error.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
fsrc, fdst := cmd.NewFsSrcDst(args)
cmd.Run(false, false, command, func() error {
if download {
return operations.CheckDownload(fdst, fsrc, oneway)
}
return operations.Check(fdst, fsrc, oneway)
})
},
}

View File

@@ -0,0 +1,27 @@
package cleanup
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "cleanup remote:path",
Short: `Clean up the remote if possible`,
Long: `
Clean up the remote if possible. Empty the trash or delete old file
versions. Not supported by all remotes.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(true, false, command, func() error {
return operations.CleanUp(fsrc)
})
},
}

550
.rclone_repo/cmd/cmd.go Executable file
View File

@@ -0,0 +1,550 @@
// Package cmd implemnts the rclone command
//
// It is in a sub package so it's internals can be re-used elsewhere
package cmd
// FIXME only attach the remote flags when using a remote???
// would probably mean bringing all the flags in to here? Or define some flagsets in fs...
import (
"fmt"
"log"
"os"
"os/exec"
"path"
"regexp"
"runtime"
"runtime/pprof"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configflags"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fs/filter/filterflags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fspath"
fslog "github.com/ncw/rclone/fs/log"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fs/rc/rcflags"
"github.com/ncw/rclone/lib/atexit"
)
// Globals
var (
// Flags
cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file")
memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file")
statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)")
dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s")
version bool
retries = flags.IntP("retries", "", 3, "Retry operations this many times if they fail")
retriesInterval = flags.DurationP("retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)")
// Errors
errorCommandNotFound = errors.New("command not found")
errorUncategorized = errors.New("uncategorized error")
errorNotEnoughArguments = errors.New("not enough arguments")
errorTooManyArguents = errors.New("too many arguments")
)
const (
exitCodeSuccess = iota
exitCodeUsageError
exitCodeUncategorizedError
exitCodeDirNotFound
exitCodeFileNotFound
exitCodeRetryError
exitCodeNoRetryError
exitCodeFatalError
exitCodeTransferExceeded
)
// Root is the main rclone command
var Root = &cobra.Command{
Use: "rclone",
Short: "Sync files and directories to and from local and remote object stores - " + fs.Version,
Long: `
Rclone is a command line program to sync files and directories to and
from various cloud storage systems and using file transfer services, such as:
* Amazon Drive
* Amazon S3
* Backblaze B2
* Box
* Dropbox
* FTP
* Google Cloud Storage
* Google Drive
* HTTP
* Hubic
* Jottacloud
* Mega
* Microsoft Azure Blob Storage
* Microsoft OneDrive
* OpenDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore
* pCloud
* QingStor
* SFTP
* Webdav / Owncloud / Nextcloud
* Yandex Disk
* The local filesystem
Features
* MD5/SHA1 hashes checked at all times for file integrity
* Timestamps preserved on files
* Partial syncs supported on a whole file basis
* Copy mode to just copy new/changed files
* Sync (one way) mode to make a directory identical
* Check mode to check for file hash equality
* Can sync to and from network, eg two different cloud accounts
See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.
* https://rclone.org/
`,
PersistentPostRun: func(cmd *cobra.Command, args []string) {
fs.Debugf("rclone", "Version %q finishing with parameters %q", fs.Version, os.Args)
atexit.Run()
},
}
// runRoot implements the main rclone command with no subcommands
func runRoot(cmd *cobra.Command, args []string) {
if version {
ShowVersion()
resolveExitCode(nil)
} else {
_ = Root.Usage()
_, _ = fmt.Fprintf(os.Stderr, "Command not found.\n")
resolveExitCode(errorCommandNotFound)
}
}
func init() {
// Add global flags
configflags.AddFlags(pflag.CommandLine)
filterflags.AddFlags(pflag.CommandLine)
rcflags.AddFlags(pflag.CommandLine)
Root.Run = runRoot
Root.Flags().BoolVarP(&version, "version", "V", false, "Print the version number")
cobra.OnInitialize(initConfig)
}
// ShowVersion prints the version to stdout
func ShowVersion() {
fmt.Printf("rclone %s\n", fs.Version)
fmt.Printf("- os/arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
fmt.Printf("- go version: %s\n", runtime.Version())
}
// NewFsFile creates a Fs from a name but may point to a file.
//
// It returns a string with the file name if points to a file
// otherwise "".
func NewFsFile(remote string) (fs.Fs, string) {
_, _, fsPath, err := fs.ParseRemote(remote)
if err != nil {
fs.CountError(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
f, err := fs.NewFs(remote)
switch err {
case fs.ErrorIsFile:
return f, path.Base(fsPath)
case nil:
return f, ""
default:
fs.CountError(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
return nil, ""
}
// newFsFileAddFilter creates a src Fs from a name
//
// This works the same as NewFsFile however it adds filters to the Fs
// to limit it to a single file if the remote pointed to a file.
func newFsFileAddFilter(remote string) (fs.Fs, string) {
f, fileName := NewFsFile(remote)
if fileName != "" {
if !filter.Active.InActive() {
err := errors.Errorf("Can't limit to single files when using filters: %v", remote)
fs.CountError(err)
log.Fatalf(err.Error())
}
// Limit transfers to this file
err := filter.Active.AddFile(fileName)
if err != nil {
fs.CountError(err)
log.Fatalf("Failed to limit to single file %q: %v", remote, err)
}
}
return f, fileName
}
// NewFsSrc creates a new src fs from the arguments.
//
// The source can be a file or a directory - if a file then it will
// limit the Fs to a single file.
func NewFsSrc(args []string) fs.Fs {
fsrc, _ := newFsFileAddFilter(args[0])
return fsrc
}
// newFsDir creates an Fs from a name
//
// This must point to a directory
func newFsDir(remote string) fs.Fs {
f, err := fs.NewFs(remote)
if err != nil {
fs.CountError(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
return f
}
// NewFsDir creates a new Fs from the arguments
//
// The argument must point a directory
func NewFsDir(args []string) fs.Fs {
fdst := newFsDir(args[0])
return fdst
}
// NewFsSrcDst creates a new src and dst fs from the arguments
func NewFsSrcDst(args []string) (fs.Fs, fs.Fs) {
fsrc, _ := newFsFileAddFilter(args[0])
fdst := newFsDir(args[1])
return fsrc, fdst
}
// NewFsSrcFileDst creates a new src and dst fs from the arguments
//
// The source may be a file, in which case the source Fs and file name is returned
func NewFsSrcFileDst(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs) {
fsrc, srcFileName = NewFsFile(args[0])
fdst = newFsDir(args[1])
return fsrc, srcFileName, fdst
}
// NewFsSrcDstFiles creates a new src and dst fs from the arguments
// If src is a file then srcFileName and dstFileName will be non-empty
func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs, dstFileName string) {
fsrc, srcFileName = newFsFileAddFilter(args[0])
// If copying a file...
dstRemote := args[1]
// If file exists then srcFileName != "", however if the file
// doesn't exist then we assume it is a directory...
if srcFileName != "" {
dstRemote, dstFileName = fspath.Split(dstRemote)
if dstRemote == "" {
dstRemote = "."
}
if dstFileName == "" {
log.Fatalf("%q is a directory", args[1])
}
}
fdst, err := fs.NewFs(dstRemote)
switch err {
case fs.ErrorIsFile:
fs.CountError(err)
log.Fatalf("Source doesn't exist or is a directory and destination is a file")
case nil:
default:
fs.CountError(err)
log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err)
}
return
}
// NewFsDstFile creates a new dst fs with a destination file name from the arguments
func NewFsDstFile(args []string) (fdst fs.Fs, dstFileName string) {
dstRemote, dstFileName := fspath.Split(args[0])
if dstRemote == "" {
dstRemote = "."
}
if dstFileName == "" {
log.Fatalf("%q is a directory", args[0])
}
fdst = newFsDir(dstRemote)
return
}
// ShowStats returns true if the user added a `--stats` flag to the command line.
//
// This is called by Run to override the default value of the
// showStats passed in.
func ShowStats() bool {
statsIntervalFlag := pflag.Lookup("stats")
return statsIntervalFlag != nil && statsIntervalFlag.Changed
}
// Run the function with stats and retries if required
func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
var err error
var stopStats chan struct{}
if !showStats && ShowStats() {
showStats = true
}
if fs.Config.Progress {
stopStats = startProgress()
} else if showStats {
stopStats = StartStats()
}
SigInfoHandler()
for try := 1; try <= *retries; try++ {
err = f()
if !Retry || (err == nil && !accounting.Stats.Errored()) {
if try > 1 {
fs.Errorf(nil, "Attempt %d/%d succeeded", try, *retries)
}
break
}
if fserrors.IsFatalError(err) {
fs.Errorf(nil, "Fatal error received - not attempting retries")
break
}
if fserrors.IsNoRetryError(err) {
fs.Errorf(nil, "Can't retry this error - not attempting retries")
break
}
if err != nil {
fs.Errorf(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, accounting.Stats.GetErrors(), err)
} else {
fs.Errorf(nil, "Attempt %d/%d failed with %d errors", try, *retries, accounting.Stats.GetErrors())
}
if try < *retries {
accounting.Stats.ResetErrors()
}
if *retriesInterval > 0 {
time.Sleep(*retriesInterval)
}
}
if showStats {
close(stopStats)
}
if err != nil {
log.Printf("Failed to %s: %v", cmd.Name(), err)
resolveExitCode(err)
}
if showStats && (accounting.Stats.Errored() || *statsInterval > 0) {
accounting.Stats.Log()
}
fs.Debugf(nil, "%d go routines active\n", runtime.NumGoroutine())
// dump all running go-routines
if fs.Config.Dump&fs.DumpGoRoutines != 0 {
err = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
if err != nil {
fs.Errorf(nil, "Failed to dump goroutines: %v", err)
}
}
// dump open files
if fs.Config.Dump&fs.DumpOpenFiles != 0 {
c := exec.Command("lsof", "-p", strconv.Itoa(os.Getpid()))
c.Stdout = os.Stdout
c.Stderr = os.Stderr
err = c.Run()
if err != nil {
fs.Errorf(nil, "Failed to list open files: %v", err)
}
}
if accounting.Stats.Errored() {
resolveExitCode(accounting.Stats.GetLastError())
}
}
// CheckArgs checks there are enough arguments and prints a message if not
func CheckArgs(MinArgs, MaxArgs int, cmd *cobra.Command, args []string) {
if len(args) < MinArgs {
_ = cmd.Usage()
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum\n", cmd.Name(), MinArgs)
// os.Exit(1)
resolveExitCode(errorNotEnoughArguments)
} else if len(args) > MaxArgs {
_ = cmd.Usage()
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum\n", cmd.Name(), MaxArgs)
// os.Exit(1)
resolveExitCode(errorTooManyArguents)
}
}
// StartStats prints the stats every statsInterval
//
// It returns a channel which should be closed to stop the stats.
func StartStats() chan struct{} {
stopStats := make(chan struct{})
if *statsInterval > 0 {
go func() {
ticker := time.NewTicker(*statsInterval)
for {
select {
case <-ticker.C:
accounting.Stats.Log()
case <-stopStats:
ticker.Stop()
return
}
}
}()
}
return stopStats
}
// initConfig is run by cobra after initialising the flags
func initConfig() {
// Start the logger
fslog.InitLogging()
// Finish parsing any command line flags
configflags.SetFlags()
// Load filters
var err error
filter.Active, err = filter.NewFilter(&filterflags.Opt)
if err != nil {
log.Fatalf("Failed to load filters: %v", err)
}
// Write the args for debug purposes
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
// Start the remote control if configured
rc.Start(&rcflags.Opt)
// Setup CPU profiling if desired
if *cpuProfile != "" {
fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
f, err := os.Create(*cpuProfile)
if err != nil {
fs.CountError(err)
log.Fatal(err)
}
err = pprof.StartCPUProfile(f)
if err != nil {
fs.CountError(err)
log.Fatal(err)
}
atexit.Register(func() {
pprof.StopCPUProfile()
})
}
// Setup memory profiling if desired
if *memProfile != "" {
atexit.Register(func() {
fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
f, err := os.Create(*memProfile)
if err != nil {
fs.CountError(err)
log.Fatal(err)
}
err = pprof.WriteHeapProfile(f)
if err != nil {
fs.CountError(err)
log.Fatal(err)
}
err = f.Close()
if err != nil {
fs.CountError(err)
log.Fatal(err)
}
})
}
if m, _ := regexp.MatchString("^(bits|bytes)$", *dataRateUnit); m == false {
fs.Errorf(nil, "Invalid unit passed to --stats-unit. Defaulting to bytes.")
fs.Config.DataRateUnit = "bytes"
} else {
fs.Config.DataRateUnit = *dataRateUnit
}
}
func resolveExitCode(err error) {
atexit.Run()
if err == nil {
os.Exit(exitCodeSuccess)
}
_, unwrapped := fserrors.Cause(err)
switch {
case unwrapped == fs.ErrorDirNotFound:
os.Exit(exitCodeDirNotFound)
case unwrapped == fs.ErrorObjectNotFound:
os.Exit(exitCodeFileNotFound)
case unwrapped == errorUncategorized:
os.Exit(exitCodeUncategorizedError)
case unwrapped == accounting.ErrorMaxTransferLimitReached:
os.Exit(exitCodeTransferExceeded)
case fserrors.ShouldRetry(err):
os.Exit(exitCodeRetryError)
case fserrors.IsNoRetryError(err):
os.Exit(exitCodeNoRetryError)
case fserrors.IsFatalError(err):
os.Exit(exitCodeFatalError)
default:
os.Exit(exitCodeUsageError)
}
}
// AddBackendFlags creates flags for all the backend options
func AddBackendFlags() {
for _, fsInfo := range fs.Registry {
done := map[string]struct{}{}
for i := range fsInfo.Options {
opt := &fsInfo.Options[i]
// Skip if done already (eg with Provider options)
if _, doneAlready := done[opt.Name]; doneAlready {
continue
}
done[opt.Name] = struct{}{}
// Make a flag from each option
name := strings.Replace(opt.Name, "_", "-", -1) // convert snake_case to kebab-case
if !opt.NoPrefix {
name = fsInfo.Prefix + "-" + name
}
found := pflag.CommandLine.Lookup(name) != nil
if !found {
// Take first line of help only
help := strings.TrimSpace(opt.Help)
if nl := strings.IndexRune(help, '\n'); nl >= 0 {
help = help[:nl]
}
help = strings.TrimSpace(help)
flag := pflag.CommandLine.VarPF(opt, name, string(opt.ShortOpt), help)
if _, isBool := opt.Default.(bool); isBool {
flag.NoOptDefVal = "true"
}
// Hide on the command line if requested
if opt.Hide&fs.OptionHideCommandLine != 0 {
flag.Hidden = true
}
} else {
fs.Errorf(nil, "Not adding duplicate flag --%s", name)
}
//flag.Hidden = true
}
}
}
// Main runs rclone interpreting flags and commands out of os.Args
func Main() {
AddBackendFlags()
if err := Root.Execute(); err != nil {
log.Fatalf("Fatal error: %v", err)
}
}

583
.rclone_repo/cmd/cmount/fs.go Executable file
View File

@@ -0,0 +1,583 @@
// +build cmount
// +build cgo
// +build linux darwin freebsd windows
package cmount
import (
"io"
"os"
"path"
"sync"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/log"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/pkg/errors"
)
const fhUnset = ^uint64(0)
// FS represents the top level filing system
type FS struct {
VFS *vfs.VFS
f fs.Fs
ready chan (struct{})
mu sync.Mutex // to protect the below
handles []vfs.Handle
}
// NewFS makes a new FS
func NewFS(f fs.Fs) *FS {
fsys := &FS{
VFS: vfs.New(f, &vfsflags.Opt),
f: f,
ready: make(chan (struct{})),
}
return fsys
}
// Open a handle returning an integer file handle
func (fsys *FS) openHandle(handle vfs.Handle) (fh uint64) {
fsys.mu.Lock()
defer fsys.mu.Unlock()
var i int
var oldHandle vfs.Handle
for i, oldHandle = range fsys.handles {
if oldHandle == nil {
fsys.handles[i] = handle
goto found
}
}
fsys.handles = append(fsys.handles, handle)
i = len(fsys.handles) - 1
found:
return uint64(i)
}
// get the handle for fh, call with the lock held
func (fsys *FS) _getHandle(fh uint64) (i int, handle vfs.Handle, errc int) {
if fh > uint64(len(fsys.handles)) {
fs.Debugf(nil, "Bad file handle: too big: 0x%X", fh)
return i, nil, -fuse.EBADF
}
i = int(fh)
handle = fsys.handles[i]
if handle == nil {
fs.Debugf(nil, "Bad file handle: nil handle: 0x%X", fh)
return i, nil, -fuse.EBADF
}
return i, handle, 0
}
// Get the handle for the file handle
func (fsys *FS) getHandle(fh uint64) (handle vfs.Handle, errc int) {
fsys.mu.Lock()
_, handle, errc = fsys._getHandle(fh)
fsys.mu.Unlock()
return
}
// Close the handle
func (fsys *FS) closeHandle(fh uint64) (errc int) {
fsys.mu.Lock()
i, _, errc := fsys._getHandle(fh)
if errc == 0 {
fsys.handles[i] = nil
}
fsys.mu.Unlock()
return
}
// lookup a Node given a path
func (fsys *FS) lookupNode(path string) (node vfs.Node, errc int) {
node, err := fsys.VFS.Stat(path)
return node, translateError(err)
}
// lookup a Dir given a path
func (fsys *FS) lookupDir(path string) (dir *vfs.Dir, errc int) {
node, errc := fsys.lookupNode(path)
if errc != 0 {
return nil, errc
}
dir, ok := node.(*vfs.Dir)
if !ok {
return nil, -fuse.ENOTDIR
}
return dir, 0
}
// lookup a parent Dir given a path returning the dir and the leaf
func (fsys *FS) lookupParentDir(filePath string) (leaf string, dir *vfs.Dir, errc int) {
parentDir, leaf := path.Split(filePath)
dir, errc = fsys.lookupDir(parentDir)
return leaf, dir, errc
}
// lookup a File given a path
func (fsys *FS) lookupFile(path string) (file *vfs.File, errc int) {
node, errc := fsys.lookupNode(path)
if errc != 0 {
return nil, errc
}
file, ok := node.(*vfs.File)
if !ok {
return nil, -fuse.EISDIR
}
return file, 0
}
// get a node and handle from the path or from the fh if not fhUnset
//
// handle may be nil
func (fsys *FS) getNode(path string, fh uint64) (node vfs.Node, handle vfs.Handle, errc int) {
if fh == fhUnset {
node, errc = fsys.lookupNode(path)
} else {
handle, errc = fsys.getHandle(fh)
if errc == 0 {
node = handle.Node()
}
}
return
}
// stat fills up the stat block for Node
func (fsys *FS) stat(node vfs.Node, stat *fuse.Stat_t) (errc int) {
Size := uint64(node.Size())
Blocks := (Size + 511) / 512
modTime := node.ModTime()
Mode := node.Mode().Perm()
if node.IsDir() {
Mode |= fuse.S_IFDIR
} else {
Mode |= fuse.S_IFREG
}
//stat.Dev = 1
stat.Ino = node.Inode() // FIXME do we need to set the inode number?
stat.Mode = uint32(Mode)
stat.Nlink = 1
stat.Uid = fsys.VFS.Opt.UID
stat.Gid = fsys.VFS.Opt.GID
//stat.Rdev
stat.Size = int64(Size)
t := fuse.NewTimespec(modTime)
stat.Atim = t
stat.Mtim = t
stat.Ctim = t
stat.Blksize = 512
stat.Blocks = int64(Blocks)
stat.Birthtim = t
// fs.Debugf(nil, "stat = %+v", *stat)
return 0
}
// Init is called after the filesystem is ready
func (fsys *FS) Init() {
defer log.Trace(fsys.f, "")("")
close(fsys.ready)
}
// Destroy is called when it is unmounted (note that depending on how
// the file system is terminated the file system may not receive the
// Destroy call).
func (fsys *FS) Destroy() {
defer log.Trace(fsys.f, "")("")
}
// Getattr reads the attributes for path
func (fsys *FS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
defer log.Trace(path, "fh=0x%X", fh)("errc=%v", &errc)
node, _, errc := fsys.getNode(path, fh)
if errc == 0 {
errc = fsys.stat(node, stat)
}
return
}
// Opendir opens path as a directory
func (fsys *FS) Opendir(path string) (errc int, fh uint64) {
defer log.Trace(path, "")("errc=%d, fh=0x%X", &errc, &fh)
handle, err := fsys.VFS.OpenFile(path, os.O_RDONLY, 0777)
if err != nil {
return translateError(err), fhUnset
}
return 0, fsys.openHandle(handle)
}
// Readdir reads the directory at dirPath
func (fsys *FS) Readdir(dirPath string,
fill func(name string, stat *fuse.Stat_t, ofst int64) bool,
ofst int64,
fh uint64) (errc int) {
itemsRead := -1
defer log.Trace(dirPath, "ofst=%d, fh=0x%X", ofst, fh)("items=%d, errc=%d", &itemsRead, &errc)
node, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
}
items, err := node.Readdir(-1)
if err != nil {
return translateError(err)
}
// Optionally, create a struct stat that describes the file as
// for getattr (but FUSE only looks at st_ino and the
// file-type bits of st_mode).
//
// FIXME If you call host.SetCapReaddirPlus() then WinFsp will
// use the full stat information - a Useful optimization on
// Windows.
//
// NB we are using the first mode for readdir: The readdir
// implementation ignores the offset parameter, and passes
// zero to the filler function's offset. The filler function
// will not return '1' (unless an error happens), so the whole
// directory is read in a single readdir operation.
fill(".", nil, 0)
fill("..", nil, 0)
for _, item := range items {
node, ok := item.(vfs.Node)
if ok {
fill(node.Name(), nil, 0)
}
}
itemsRead = len(items)
return 0
}
// Releasedir finished reading the directory
func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
return fsys.closeHandle(fh)
}
// Statfs reads overall stats on the filessystem
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
const blockSize = 4096
const fsBlocks = (1 << 50) / blockSize
stat.Blocks = fsBlocks // Total data blocks in file system.
stat.Bfree = fsBlocks // Free blocks in file system.
stat.Bavail = fsBlocks // Free blocks in file system if you're not root.
stat.Files = 1E9 // Total files in file system.
stat.Ffree = 1E9 // Free files in file system.
stat.Bsize = blockSize // Block size
stat.Namemax = 255 // Maximum file name length?
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
total, used, free := fsys.VFS.Statfs()
if total >= 0 {
stat.Blocks = uint64(total) / blockSize
}
if used >= 0 {
stat.Bfree = stat.Blocks - uint64(used)/blockSize
}
if free >= 0 {
stat.Bavail = uint64(free) / blockSize
}
mountlib.ClipBlocks(&stat.Blocks)
mountlib.ClipBlocks(&stat.Bfree)
mountlib.ClipBlocks(&stat.Bavail)
return 0
}
// Open opens a file
func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
defer log.Trace(path, "flags=0x%X", flags)("errc=%d, fh=0x%X", &errc, &fh)
// translate the fuse flags to os flags
flags = translateOpenFlags(flags)
handle, err := fsys.VFS.OpenFile(path, flags, 0777)
if err != nil {
return translateError(err), fhUnset
}
return 0, fsys.openHandle(handle)
}
// Create creates and opens a file.
func (fsys *FS) Create(filePath string, flags int, mode uint32) (errc int, fh uint64) {
defer log.Trace(filePath, "flags=0x%X, mode=0%o", flags, mode)("errc=%d, fh=0x%X", &errc, &fh)
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
if errc != 0 {
return errc, fhUnset
}
file, err := parentDir.Create(leaf, flags)
if err != nil {
return translateError(err), fhUnset
}
// translate the fuse flags to os flags
flags = translateOpenFlags(flags) | os.O_CREATE
handle, err := file.Open(flags)
if err != nil {
return translateError(err), fhUnset
}
return 0, fsys.openHandle(handle)
}
// Truncate truncates a file to size
func (fsys *FS) Truncate(path string, size int64, fh uint64) (errc int) {
defer log.Trace(path, "size=%d, fh=0x%X", size, fh)("errc=%d", &errc)
node, handle, errc := fsys.getNode(path, fh)
if errc != 0 {
return errc
}
var err error
if handle != nil {
err = handle.Truncate(size)
} else {
err = node.Truncate(size)
}
if err != nil {
return translateError(err)
}
return 0
}
// Read data from file handle
func (fsys *FS) Read(path string, buff []byte, ofst int64, fh uint64) (n int) {
defer log.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
handle, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
}
n, err := handle.ReadAt(buff, ofst)
if err == io.EOF {
} else if err != nil {
return translateError(err)
}
return n
}
// Write data to file handle
func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
defer log.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
handle, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
}
n, err := handle.WriteAt(buff, ofst)
if err != nil {
return translateError(err)
}
return n
}
// Flush flushes an open file descriptor or path
func (fsys *FS) Flush(path string, fh uint64) (errc int) {
defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
handle, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
}
return translateError(handle.Flush())
}
// Release closes the file if still open
func (fsys *FS) Release(path string, fh uint64) (errc int) {
defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
handle, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
}
_ = fsys.closeHandle(fh)
return translateError(handle.Release())
}
// Unlink removes a file.
func (fsys *FS) Unlink(filePath string) (errc int) {
defer log.Trace(filePath, "")("errc=%d", &errc)
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
if errc != 0 {
return errc
}
return translateError(parentDir.RemoveName(leaf))
}
// Mkdir creates a directory.
func (fsys *FS) Mkdir(dirPath string, mode uint32) (errc int) {
defer log.Trace(dirPath, "mode=0%o", mode)("errc=%d", &errc)
leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
if errc != 0 {
return errc
}
_, err := parentDir.Mkdir(leaf)
return translateError(err)
}
// Rmdir removes a directory
func (fsys *FS) Rmdir(dirPath string) (errc int) {
defer log.Trace(dirPath, "")("errc=%d", &errc)
leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
if errc != 0 {
return errc
}
return translateError(parentDir.RemoveName(leaf))
}
// Rename renames a file.
func (fsys *FS) Rename(oldPath string, newPath string) (errc int) {
defer log.Trace(oldPath, "newPath=%q", newPath)("errc=%d", &errc)
return translateError(fsys.VFS.Rename(oldPath, newPath))
}
// Utimens changes the access and modification times of a file.
func (fsys *FS) Utimens(path string, tmsp []fuse.Timespec) (errc int) {
defer log.Trace(path, "tmsp=%+v", tmsp)("errc=%d", &errc)
node, errc := fsys.lookupNode(path)
if errc != 0 {
return errc
}
var t time.Time
if tmsp == nil || len(tmsp) < 2 {
t = time.Now()
} else {
t = tmsp[1].Time()
}
return translateError(node.SetModTime(t))
}
// Mknod creates a file node.
func (fsys *FS) Mknod(path string, mode uint32, dev uint64) (errc int) {
defer log.Trace(path, "mode=0x%X, dev=0x%X", mode, dev)("errc=%d", &errc)
return -fuse.ENOSYS
}
// Fsync synchronizes file contents.
func (fsys *FS) Fsync(path string, datasync bool, fh uint64) (errc int) {
defer log.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Link creates a hard link to a file.
func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
defer log.Trace(oldpath, "newpath=%q", newpath)("errc=%d", &errc)
return -fuse.ENOSYS
}
// Symlink creates a symbolic link.
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
defer log.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
return -fuse.ENOSYS
}
// Readlink reads the target of a symbolic link.
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
defer log.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
return -fuse.ENOSYS, ""
}
// Chmod changes the permission bits of a file.
func (fsys *FS) Chmod(path string, mode uint32) (errc int) {
defer log.Trace(path, "mode=0%o", mode)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Chown changes the owner and group of a file.
func (fsys *FS) Chown(path string, uid uint32, gid uint32) (errc int) {
defer log.Trace(path, "uid=%d, gid=%d", uid, gid)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Access checks file access permissions.
func (fsys *FS) Access(path string, mask uint32) (errc int) {
defer log.Trace(path, "mask=0%o", mask)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Fsyncdir synchronizes directory contents.
func (fsys *FS) Fsyncdir(path string, datasync bool, fh uint64) (errc int) {
defer log.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Setxattr sets extended attributes.
func (fsys *FS) Setxattr(path string, name string, value []byte, flags int) (errc int) {
return -fuse.ENOSYS
}
// Getxattr gets extended attributes.
func (fsys *FS) Getxattr(path string, name string) (errc int, value []byte) {
return -fuse.ENOSYS, nil
}
// Removexattr removes extended attributes.
func (fsys *FS) Removexattr(path string, name string) (errc int) {
return -fuse.ENOSYS
}
// Listxattr lists extended attributes.
func (fsys *FS) Listxattr(path string, fill func(name string) bool) (errc int) {
return -fuse.ENOSYS
}
// Translate errors from mountlib
func translateError(err error) (errc int) {
if err == nil {
return 0
}
switch errors.Cause(err) {
case vfs.OK:
return 0
case vfs.ENOENT:
return -fuse.ENOENT
case vfs.EEXIST:
return -fuse.EEXIST
case vfs.EPERM:
return -fuse.EPERM
case vfs.ECLOSED:
return -fuse.EBADF
case vfs.ENOTEMPTY:
return -fuse.ENOTEMPTY
case vfs.ESPIPE:
return -fuse.ESPIPE
case vfs.EBADF:
return -fuse.EBADF
case vfs.EROFS:
return -fuse.EROFS
case vfs.ENOSYS:
return -fuse.ENOSYS
case vfs.EINVAL:
return -fuse.EINVAL
}
fs.Errorf(nil, "IO error: %v", err)
return -fuse.EIO
}
// Translate Open Flags from FUSE to os (as used in the vfs layer)
func translateOpenFlags(inFlags int) (outFlags int) {
switch inFlags & fuse.O_ACCMODE {
case fuse.O_RDONLY:
outFlags = os.O_RDONLY
case fuse.O_WRONLY:
outFlags = os.O_WRONLY
case fuse.O_RDWR:
outFlags = os.O_RDWR
}
if inFlags&fuse.O_APPEND != 0 {
outFlags |= os.O_APPEND
}
if inFlags&fuse.O_CREAT != 0 {
outFlags |= os.O_CREATE
}
if inFlags&fuse.O_EXCL != 0 {
outFlags |= os.O_EXCL
}
if inFlags&fuse.O_TRUNC != 0 {
outFlags |= os.O_TRUNC
}
// NB O_SYNC isn't defined by fuse
return outFlags
}

243
.rclone_repo/cmd/cmount/mount.go Executable file
View File

@@ -0,0 +1,243 @@
// Package cmount implents a FUSE mounting system for rclone remotes.
//
// This uses the cgo based cgofuse library
// +build cmount
// +build cgo
// +build linux darwin freebsd windows
package cmount
import (
"fmt"
"os"
"os/signal"
"runtime"
"syscall"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/okzk/sdnotify"
"github.com/pkg/errors"
)
func init() {
name := "cmount"
if runtime.GOOS == "windows" {
name = "mount"
}
mountlib.NewMountCommand(name, Mount)
}
// mountOptions configures the options from the command line flags
func mountOptions(device string, mountpoint string) (options []string) {
// Options
options = []string{
"-o", "fsname=" + device,
"-o", "subtype=rclone",
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
"-o", fmt.Sprintf("attr_timeout=%g", mountlib.AttrTimeout.Seconds()),
// This causes FUSE to supply O_TRUNC with the Open
// call which is more efficient for cmount. However
// it does not work with cgofuse on Windows with
// WinFSP so cmount must work with or without it.
"-o", "atomic_o_trunc",
}
if mountlib.DebugFUSE {
options = append(options, "-o", "debug")
}
// OSX options
if runtime.GOOS == "darwin" {
options = append(options, "-o", "volname="+mountlib.VolumeName)
if mountlib.NoAppleDouble {
options = append(options, "-o", "noappledouble")
}
if mountlib.NoAppleXattr {
options = append(options, "-o", "noapplexattr")
}
}
// Windows options
if runtime.GOOS == "windows" {
// These cause WinFsp to mean the current user
options = append(options, "-o", "uid=-1")
options = append(options, "-o", "gid=-1")
options = append(options, "--FileSystemName=rclone")
}
if mountlib.AllowNonEmpty {
options = append(options, "-o", "nonempty")
}
if mountlib.AllowOther {
options = append(options, "-o", "allow_other")
}
if mountlib.AllowRoot {
options = append(options, "-o", "allow_root")
}
if mountlib.DefaultPermissions {
options = append(options, "-o", "default_permissions")
}
if vfsflags.Opt.ReadOnly {
options = append(options, "-o", "ro")
}
if mountlib.WritebackCache {
// FIXME? options = append(options, "-o", WritebackCache())
}
if mountlib.DaemonTimeout != 0 {
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(mountlib.DaemonTimeout.Seconds())))
}
for _, option := range mountlib.ExtraOptions {
options = append(options, "-o", option)
}
for _, option := range mountlib.ExtraFlags {
options = append(options, option)
}
return options
}
// waitFor runs fn() until it returns true or the timeout expires
func waitFor(fn func() bool) (ok bool) {
const totalWait = 10 * time.Second
const individualWait = 10 * time.Millisecond
for i := 0; i < int(totalWait/individualWait); i++ {
ok = fn()
if ok {
return ok
}
time.Sleep(individualWait)
}
return false
}
// mount the file system
//
// The mount point will be ready when this returns.
//
// returns an error, and an error channel for the serve process to
// report an error when fusermount is called.
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
fs.Debugf(f, "Mounting on %q", mountpoint)
// Check the mountpoint - in Windows the mountpoint musn't exist before the mount
if runtime.GOOS != "windows" {
fi, err := os.Stat(mountpoint)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "mountpoint")
}
if !fi.IsDir() {
return nil, nil, nil, errors.New("mountpoint is not a directory")
}
}
// Create underlying FS
fsys := NewFS(f)
host := fuse.NewFileSystemHost(fsys)
// Create options
options := mountOptions(f.Name()+":"+f.Root(), mountpoint)
fs.Debugf(f, "Mounting with options: %q", options)
// Serve the mount point in the background returning error to errChan
errChan := make(chan error, 1)
go func() {
var err error
ok := host.Mount(mountpoint, options)
if !ok {
err = errors.New("mount failed")
fs.Errorf(f, "Mount failed")
}
errChan <- err
}()
// unmount
unmount := func() error {
// Shutdown the VFS
fsys.VFS.Shutdown()
fs.Debugf(nil, "Calling host.Unmount")
if host.Unmount() {
fs.Debugf(nil, "host.Unmount succeeded")
if runtime.GOOS == "windows" {
if !waitFor(func() bool {
_, err := os.Stat(mountpoint)
return err != nil
}) {
fs.Errorf(nil, "mountpoint %q didn't disappear after unmount - continuing anyway", mountpoint)
}
}
return nil
}
fs.Debugf(nil, "host.Unmount failed")
return errors.New("host unmount failed")
}
// Wait for the filesystem to become ready, checking the file
// system didn't blow up before starting
select {
case err := <-errChan:
err = errors.Wrap(err, "mount stopped before calling Init")
return nil, nil, nil, err
case <-fsys.ready:
}
// Wait for the mount point to be available on Windows
// On Windows the Init signal comes slightly before the mount is ready
if runtime.GOOS == "windows" {
if !waitFor(func() bool {
_, err := os.Stat(mountpoint)
return err == nil
}) {
fs.Errorf(nil, "mountpoint %q didn't became available on mount - continuing anyway", mountpoint)
}
}
return fsys.VFS, errChan, unmount, nil
}
// Mount mounts the remote at mountpoint.
//
// If noModTime is set then it
func Mount(f fs.Fs, mountpoint string) error {
// Mount it
FS, errChan, _, err := mount(f, mountpoint)
if err != nil {
return errors.Wrap(err, "failed to mount FUSE fs")
}
// Note cgofuse unmounts the fs on SIGINT etc
sigHup := make(chan os.Signal, 1)
signal.Notify(sigHup, syscall.SIGHUP)
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
return errors.Wrap(err, "failed to notify systemd")
}
waitloop:
for {
select {
// umount triggered outside the app
case err = <-errChan:
break waitloop
// user sent SIGHUP to clear the cache
case <-sigHup:
root, err := FS.Root()
if err != nil {
fs.Errorf(f, "Error reading root: %v", err)
} else {
root.ForgetAll()
}
}
}
_ = sdnotify.Stopping()
if err != nil {
return errors.Wrap(err, "failed to umount FUSE fs")
}
return nil
}

View File

@@ -0,0 +1,19 @@
// +build cmount
// +build cgo
// +build linux darwin freebsd windows
// +build !race !windows
// FIXME this doesn't work with the race detector under Windows either
// hanging or producing lots of differences.
package cmount
import (
"testing"
"github.com/ncw/rclone/cmd/mountlib/mounttest"
)
func TestMount(t *testing.T) {
mounttest.RunTests(t, mount)
}

View File

@@ -0,0 +1,6 @@
// Build for cmount for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !linux,!darwin,!freebsd,!windows !cgo !cmount
package cmount

141
.rclone_repo/cmd/config/config.go Executable file
View File

@@ -0,0 +1,141 @@
package config
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/config"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(configCommand)
configCommand.AddCommand(configEditCommand)
configCommand.AddCommand(configFileCommand)
configCommand.AddCommand(configShowCommand)
configCommand.AddCommand(configDumpCommand)
configCommand.AddCommand(configProvidersCommand)
configCommand.AddCommand(configCreateCommand)
configCommand.AddCommand(configUpdateCommand)
configCommand.AddCommand(configDeleteCommand)
configCommand.AddCommand(configPasswordCommand)
}
var configCommand = &cobra.Command{
Use: "config",
Short: `Enter an interactive configuration session.`,
Long: `Enter an interactive configuration session where you can setup new
remotes and manage existing ones. You may also set or remove a
password to protect your configuration.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 0, command, args)
config.EditConfig()
},
}
var configEditCommand = &cobra.Command{
Use: "edit",
Short: configCommand.Short,
Long: configCommand.Long,
Run: configCommand.Run,
}
var configFileCommand = &cobra.Command{
Use: "file",
Short: `Show path of configuration file in use.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 0, command, args)
config.ShowConfigLocation()
},
}
var configShowCommand = &cobra.Command{
Use: "show [<remote>]",
Short: `Print (decrypted) config file, or the config for a single remote.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
if len(args) == 0 {
config.ShowConfig()
} else {
config.ShowRemote(args[0])
}
},
}
var configDumpCommand = &cobra.Command{
Use: "dump",
Short: `Dump the config file as JSON.`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 0, command, args)
return config.Dump()
},
}
var configProvidersCommand = &cobra.Command{
Use: "providers",
Short: `List in JSON format all the providers and options.`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 0, command, args)
return config.JSONListProviders()
},
}
var configCreateCommand = &cobra.Command{
Use: "create <name> <type> [<key> <value>]*",
Short: `Create a new remote with name, type and options.`,
Long: `
Create a new remote of <name> with <type> and options. The options
should be passed in in pairs of <key> <value>.
For example to make a swift remote of name myremote using auto config
you would do:
rclone config create myremote swift env_auth true
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 256, command, args)
return config.CreateRemote(args[0], args[1], args[2:])
},
}
var configUpdateCommand = &cobra.Command{
Use: "update <name> [<key> <value>]+",
Short: `Update options in an existing remote.`,
Long: `
Update an existing remote's options. The options should be passed in
in pairs of <key> <value>.
For example to update the env_auth field of a remote of name myremote you would do:
rclone config update myremote swift env_auth true
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(3, 256, command, args)
return config.UpdateRemote(args[0], args[1:])
},
}
var configDeleteCommand = &cobra.Command{
Use: "delete <name>",
Short: `Delete an existing remote <name>.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
config.DeleteRemote(args[0])
},
}
var configPasswordCommand = &cobra.Command{
Use: "password <name> [<key> <value>]+",
Short: `Update password in an existing remote.`,
Long: `
Update an existing remote's password. The password
should be passed in in pairs of <key> <value>.
For example to set password of a remote of name myremote you would do:
rclone config password myremote fieldname mypassword
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(3, 256, command, args)
return config.PasswordRemote(args[0], args[1:])
},
}

64
.rclone_repo/cmd/copy/copy.go Executable file
View File

@@ -0,0 +1,64 @@
package copy
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/sync"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "copy source:path dest:path",
Short: `Copy files from source to dest, skipping already copied`,
Long: `
Copy the source to the destination. Doesn't transfer
unchanged files, testing by size and modification time or
MD5SUM. Doesn't delete files from the destination.
Note that it is always the contents of the directory that is synced,
not the directory so when source:path is a directory, it's the
contents of source:path that are copied, not the directory name and
contents.
If dest:path doesn't exist, it is created and the source:path contents
go there.
For example
rclone copy source:sourcepath dest:destpath
Let's say there are two files in sourcepath
sourcepath/one.txt
sourcepath/two.txt
This copies them to
destpath/one.txt
destpath/two.txt
Not to
destpath/sourcepath/one.txt
destpath/sourcepath/two.txt
If you are familiar with ` + "`rsync`" + `, rclone always works as if you had
written a trailing / - meaning "copy the contents of this directory".
This applies to all commands and whether you are talking about the
source or destination.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
cmd.Run(true, true, command, func() error {
if srcFileName == "" {
return sync.CopyDir(fdst, fsrc)
}
return operations.CopyFile(fdst, fsrc, srcFileName, srcFileName)
})
},
}

View File

@@ -0,0 +1,54 @@
package copyto
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/sync"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "copyto source:path dest:path",
Short: `Copy files from source to dest, skipping already copied`,
Long: `
If source:path is a file or directory then it copies it to a file or
directory named dest:path.
This can be used to upload single files to other than their current
name. If the source is a directory then it acts exactly like the copy
command.
So
rclone copyto src dst
where src and dst are rclone paths, either remote:path or
/path/to/local or C:\windows\path\if\on\windows.
This will:
if src is file
copy it to dst, overwriting an existing file if it exists
if src is directory
copy it to dst, overwriting existing files if they exist
see copy command for full details
This doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. It doesn't delete files from the
destination.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
cmd.Run(true, true, command, func() error {
if srcFileName == "" {
return sync.CopyDir(fdst, fsrc)
}
return operations.CopyFile(fdst, fsrc, dstFileName, srcFileName)
})
},
}

View File

@@ -0,0 +1,39 @@
package copyurl
import (
"net/http"
"time"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "copyurl https://example.com dest:path",
Short: `Copy url content to dest.`,
Long: `
Download urls content and copy it to destination
without saving it in tmp storage.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
fsdst, dstFileName := cmd.NewFsDstFile(args[1:])
cmd.Run(true, true, command, func() error {
resp, err := http.Get(args[0])
if err != nil {
return err
}
_, err = operations.RcatSize(fsdst, dstFileName, resp.Body, resp.ContentLength, time.Now())
return err
})
},
}

View File

@@ -0,0 +1,114 @@
package cryptcheck
import (
"github.com/ncw/rclone/backend/crypt"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/operations"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
// Globals
var (
oneway = false
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&oneway, "one-way", "", oneway, "Check one way only, source files must exist on destination")
}
var commandDefintion = &cobra.Command{
Use: "cryptcheck remote:path cryptedremote:path",
Short: `Cryptcheck checks the integrity of a crypted remote.`,
Long: `
rclone cryptcheck checks a remote against a crypted remote. This is
the equivalent of running rclone check, but able to check the
checksums of the crypted remote.
For it to work the underlying remote of the cryptedremote must support
some kind of checksum.
It works by reading the nonce from each file on the cryptedremote: and
using that to encrypt each file on the remote:. It then checks the
checksum of the underlying file on the cryptedremote: against the
checksum of the file it has just encrypted.
Use it like this
rclone cryptcheck /path/to/files encryptedremote:path
You can use it like this also, but that will involve downloading all
the files in remote:path.
rclone cryptcheck remote:path encryptedremote:path
After it has run it will log the status of the encryptedremote:.
If you supply the --one-way flag, it will only check that files in source
match the files in destination, not the other way around. Meaning extra files in
destination that are not in the source will not trigger an error.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
fsrc, fdst := cmd.NewFsSrcDst(args)
cmd.Run(false, true, command, func() error {
return cryptCheck(fdst, fsrc)
})
},
}
// cryptCheck checks the integrity of a crypted remote
func cryptCheck(fdst, fsrc fs.Fs) error {
// Check to see fcrypt is a crypt
fcrypt, ok := fdst.(*crypt.Fs)
if !ok {
return errors.Errorf("%s:%s is not a crypt remote", fdst.Name(), fdst.Root())
}
// Find a hash to use
funderlying := fcrypt.UnWrap()
hashType := funderlying.Hashes().GetOne()
if hashType == hash.None {
return errors.Errorf("%s:%s does not support any hashes", funderlying.Name(), funderlying.Root())
}
fs.Infof(nil, "Using %v for hash comparisons", hashType)
// checkIdentical checks to see if dst and src are identical
//
// it returns true if differences were found
// it also returns whether it couldn't be hashed
checkIdentical := func(dst, src fs.Object) (differ bool, noHash bool) {
cryptDst := dst.(*crypt.Object)
underlyingDst := cryptDst.UnWrap()
underlyingHash, err := underlyingDst.Hash(hashType)
if err != nil {
fs.CountError(err)
fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err)
return true, false
}
if underlyingHash == "" {
return false, true
}
cryptHash, err := fcrypt.ComputeHash(cryptDst, src, hashType)
if err != nil {
fs.CountError(err)
fs.Errorf(dst, "Error computing hash: %v", err)
return true, false
}
if cryptHash == "" {
return false, true
}
if cryptHash != underlyingHash {
err = errors.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
fs.CountError(err)
fs.Errorf(src, err.Error())
return true, false
}
fs.Debugf(src, "OK")
return false, false
}
return operations.CheckFn(fcrypt, fsrc, checkIdentical, oneway)
}

View File

@@ -0,0 +1,92 @@
package cryptdecode
import (
"errors"
"fmt"
"github.com/ncw/rclone/backend/crypt"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/flags"
"github.com/spf13/cobra"
)
// Options set by command line flags
var (
Reverse = false
)
func init() {
cmd.Root.AddCommand(commandDefinition)
flagSet := commandDefinition.Flags()
flags.BoolVarP(flagSet, &Reverse, "reverse", "", Reverse, "Reverse cryptdecode, encrypts filenames")
}
var commandDefinition = &cobra.Command{
Use: "cryptdecode encryptedremote: encryptedfilename",
Short: `Cryptdecode returns unencrypted file names.`,
Long: `
rclone cryptdecode returns unencrypted file names when provided with
a list of encrypted file names. List limit is 10 items.
If you supply the --reverse flag, it will return encrypted file names.
use it like this
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
rclone cryptdecode --reverse encryptedremote: filename1 filename2
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 11, command, args)
cmd.Run(false, false, command, func() error {
fsInfo, _, _, config, err := fs.ConfigFs(args[0])
if err != nil {
return err
}
if fsInfo.Name != "crypt" {
return errors.New("The remote needs to be of type \"crypt\"")
}
cipher, err := crypt.NewCipher(config)
if err != nil {
return err
}
if Reverse {
return cryptEncode(cipher, args[1:])
}
return cryptDecode(cipher, args[1:])
})
},
}
// cryptDecode returns the unencrypted file name
func cryptDecode(cipher crypt.Cipher, args []string) error {
output := ""
for _, encryptedFileName := range args {
fileName, err := cipher.DecryptFileName(encryptedFileName)
if err != nil {
output += fmt.Sprintln(encryptedFileName, "\t", "Failed to decrypt")
} else {
output += fmt.Sprintln(encryptedFileName, "\t", fileName)
}
}
fmt.Printf(output)
return nil
}
// cryptEncode returns the encrypted file name
func cryptEncode(cipher crypt.Cipher, args []string) error {
output := ""
for _, fileName := range args {
encryptedFileName := cipher.EncryptFileName(fileName)
output += fmt.Sprintln(fileName, "\t", encryptedFileName)
}
fmt.Printf(output)
return nil
}

View File

@@ -0,0 +1,31 @@
package dbhashsum
import (
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "dbhashsum remote:path",
Short: `Produces a Dropbox hash file for all the objects in the path.`,
Long: `
Produces a Dropbox hash file for all the objects in the path. The
hashes are calculated according to [Dropbox content hash
rules](https://www.dropbox.com/developers/reference/content-hash).
The output is in the same format as md5sum and sha1sum.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return operations.DropboxHashSum(fsrc, os.Stdout)
})
},
}

118
.rclone_repo/cmd/dedupe/dedupe.go Executable file
View File

@@ -0,0 +1,118 @@
package dedupe
import (
"log"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
var (
dedupeMode = operations.DeduplicateInteractive
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().VarP(&dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|rename.")
}
var commandDefintion = &cobra.Command{
Use: "dedupe [mode] remote:path",
Short: `Interactively find duplicate files and delete/rename them.`,
Long: `
By default ` + "`" + `dedupe` + "`" + ` interactively finds duplicate files and offers to
delete all but one or rename them to be different. Only useful with
Google Drive which can have duplicate file names.
In the first pass it will merge directories with the same name. It
will do this iteratively until all the identical directories have been
merged.
The ` + "`" + `dedupe` + "`" + ` command will delete all but one of any identical (same
md5sum) files it finds without confirmation. This means that for most
duplicated files the ` + "`" + `dedupe` + "`" + ` command will not be interactive. You
can use ` + "`" + `--dry-run` + "`" + ` to see what would happen without doing anything.
Here is an example run.
Before - with duplicates
$ rclone lsl drive:dupes
6048320 2016-03-05 16:23:16.798000000 one.txt
6048320 2016-03-05 16:23:11.775000000 one.txt
564374 2016-03-05 16:23:06.731000000 one.txt
6048320 2016-03-05 16:18:26.092000000 one.txt
6048320 2016-03-05 16:22:46.185000000 two.txt
1744073 2016-03-05 16:22:38.104000000 two.txt
564374 2016-03-05 16:22:52.118000000 two.txt
Now the ` + "`" + `dedupe` + "`" + ` session
$ rclone dedupe drive:dupes
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
one.txt: Found 4 duplicates - deleting identical copies
one.txt: Deleting 2/3 identical duplicates (md5sum "1eedaa9fe86fd4b8632e2ac549403b36")
one.txt: 2 duplicates remain
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
2: 564374 bytes, 2016-03-05 16:23:06.731000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)
s/k/r> k
Enter the number of the file to keep> 1
one.txt: Deleted 1 extra copies
two.txt: Found 3 duplicates - deleting identical copies
two.txt: 3 duplicates remain
1: 564374 bytes, 2016-03-05 16:22:52.118000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, md5sum 851957f7fb6f0bc4ce76be966d336802
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)
s/k/r> r
two-1.txt: renamed from: two.txt
two-2.txt: renamed from: two.txt
two-3.txt: renamed from: two.txt
The result being
$ rclone lsl drive:dupes
6048320 2016-03-05 16:23:16.798000000 one.txt
564374 2016-03-05 16:22:52.118000000 two-1.txt
6048320 2016-03-05 16:22:46.185000000 two-2.txt
1744073 2016-03-05 16:22:38.104000000 two-3.txt
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag or by using an extra parameter with the same value
* ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
* ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
* ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
* ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
* ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
* ` + "`" + `--dedupe-mode largest` + "`" + ` - removes identical files then keeps the largest one.
* ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
For example to rename all the identically named photos in your Google Photos directory, do
rclone dedupe --dedupe-mode rename "drive:Google Photos"
Or
rclone dedupe rename "drive:Google Photos"
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 2, command, args)
if len(args) > 1 {
err := dedupeMode.Set(args[0])
if err != nil {
log.Fatal(err)
}
args = args[1:]
}
fdst := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return operations.Deduplicate(fdst, dedupeMode)
})
},
}

View File

@@ -0,0 +1,41 @@
package delete
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "delete remote:path",
Short: `Remove the contents of path.`,
Long: `
Remove the contents of path. Unlike ` + "`" + `purge` + "`" + ` it obeys include/exclude
filters so can be used to selectively delete files.
Eg delete all files bigger than 100MBytes
Check what would be deleted first (use either)
rclone --min-size 100M lsl remote:path
rclone --dry-run --min-size 100M delete remote:path
Then delete
rclone --min-size 100M delete remote:path
That reads "delete everything with a minimum size of 100 MB", hence
delete all files bigger than 100MBytes.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(true, false, command, func() error {
return operations.Delete(fsrc)
})
},
}

View File

@@ -0,0 +1,37 @@
package deletefile
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "deletefile remote:path",
Short: `Remove a single file from remote.`,
Long: `
Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to
remove a directory and it doesn't obey include/exclude filters - if the specified file exists,
it will always be removed.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fs, fileName := cmd.NewFsFile(args[0])
cmd.Run(true, false, command, func() error {
if fileName == "" {
return errors.Errorf("%s is a directory or doesn't exist", args[0])
}
fileObj, err := fs.NewObject(fileName)
if err != nil {
return err
}
return operations.DeleteFile(fileObj)
})
},
}

View File

@@ -0,0 +1,19 @@
package genautocomplete
import (
"github.com/ncw/rclone/cmd"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(completionDefinition)
}
var completionDefinition = &cobra.Command{
Use: "genautocomplete [shell]",
Short: `Output completion script for a given shell.`,
Long: `
Generates a shell completion script for rclone.
Run with --help to list the supported shells.
`,
}

View File

@@ -0,0 +1,44 @@
package genautocomplete
import (
"log"
"github.com/ncw/rclone/cmd"
"github.com/spf13/cobra"
)
func init() {
completionDefinition.AddCommand(bashCommandDefinition)
}
var bashCommandDefinition = &cobra.Command{
Use: "bash [output_file]",
Short: `Output bash completion script for rclone.`,
Long: `
Generates a bash shell autocompletion script for rclone.
This writes to /etc/bash_completion.d/rclone by default so will
probably need to be run with sudo or as root, eg
sudo rclone genautocomplete bash
Logout and login again to use the autocompletion scripts, or source
them directly
. /etc/bash_completion
If you supply a command line argument the script will be written
there.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
out := "/etc/bash_completion.d/rclone"
if len(args) > 0 {
out = args[0]
}
err := cmd.Root.GenBashCompletionFile(out)
if err != nil {
log.Fatal(err)
}
},
}

View File

@@ -0,0 +1,35 @@
package genautocomplete
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCompletionBash(t *testing.T) {
tempFile, err := ioutil.TempFile("", "completion_bash")
assert.NoError(t, err)
defer func() { _ = tempFile.Close() }()
defer func() { _ = os.Remove(tempFile.Name()) }()
bashCommandDefinition.Run(bashCommandDefinition, []string{tempFile.Name()})
bs, err := ioutil.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(bs))
}
func TestCompletionZsh(t *testing.T) {
tempFile, err := ioutil.TempFile("", "completion_zsh")
assert.NoError(t, err)
defer func() { _ = tempFile.Close() }()
defer func() { _ = os.Remove(tempFile.Name()) }()
zshCommandDefinition.Run(zshCommandDefinition, []string{tempFile.Name()})
bs, err := ioutil.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(bs))
}

View File

@@ -0,0 +1,50 @@
package genautocomplete
import (
"log"
"os"
"github.com/ncw/rclone/cmd"
"github.com/spf13/cobra"
)
func init() {
completionDefinition.AddCommand(zshCommandDefinition)
}
var zshCommandDefinition = &cobra.Command{
Use: "zsh [output_file]",
Short: `Output zsh completion script for rclone.`,
Long: `
Generates a zsh autocompletion script for rclone.
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
probably need to be run with sudo or as root, eg
sudo rclone genautocomplete zsh
Logout and login again to use the autocompletion scripts, or source
them directly
autoload -U compinit && compinit
If you supply a command line argument the script will be written
there.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
out := "/usr/share/zsh/vendor-completions/_rclone"
if len(args) > 0 {
out = args[0]
}
outFile, err := os.Create(out)
if err != nil {
log.Fatal(err)
}
defer func() { _ = outFile.Close() }()
err = cmd.Root.GenZshCompletion(outFile)
if err != nil {
log.Fatal(err)
}
},
}

View File

@@ -0,0 +1,55 @@
package gendocs
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/ncw/rclone/cmd"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
const gendocFrontmatterTemplate = `---
date: %s
title: "%s"
slug: %s
url: %s
---
`
var commandDefintion = &cobra.Command{
Use: "gendocs output_directory",
Short: `Output markdown docs for rclone to the directory supplied.`,
Long: `
This produces markdown docs for the rclone commands to the directory
supplied. These are in a format suitable for hugo to render into the
rclone.org website.`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 1, command, args)
out := args[0]
err := os.MkdirAll(out, 0777)
if err != nil {
return err
}
now := time.Now().Format(time.RFC3339)
prepender := func(filename string) string {
name := filepath.Base(filename)
base := strings.TrimSuffix(name, path.Ext(name))
url := "/commands/" + strings.ToLower(base) + "/"
return fmt.Sprintf(gendocFrontmatterTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
}
linkHandler := func(name string) string {
base := strings.TrimSuffix(name, path.Ext(name))
return "/commands/" + strings.ToLower(base) + "/"
}
return doc.GenMarkdownTreeCustom(cmd.Root, out, prepender, linkHandler)
},
}

View File

@@ -0,0 +1,61 @@
package hashsum
import (
"errors"
"fmt"
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefinition)
}
var commandDefinition = &cobra.Command{
Use: "hashsum <hash> remote:path",
Short: `Produces an hashsum file for all the objects in the path.`,
Long: `
Produces a hash file for all the objects in the path using the hash
named. The output is in the same format as the standard
md5sum/sha1sum tool.
Run without a hash to see the list of supported hashes, eg
$ rclone hashsum
Supported hashes are:
* MD5
* SHA-1
* DropboxHash
* QuickXorHash
Then
$ rclone hashsum MD5 remote:path
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 2, command, args)
if len(args) == 0 {
fmt.Printf("Supported hashes are:\n")
for _, ht := range hash.Supported.Array() {
fmt.Printf(" * %v\n", ht)
}
return nil
} else if len(args) == 1 {
return errors.New("need hash type and remote")
}
var ht hash.Type
err := ht.Set(args[0])
if err != nil {
return err
}
fsrc := cmd.NewFsSrc(args[1:])
cmd.Run(false, false, command, func() error {
return operations.HashLister(ht, fsrc, os.Stdout)
})
return nil
},
}

18
.rclone_repo/cmd/info/all.sh Executable file
View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
exec rclone --check-normalization=true --check-control=true --check-length=true info \
/tmp/testInfo \
TestAmazonCloudDrive:testInfo \
TestB2:testInfo \
TestCryptDrive:testInfo \
TestCryptSwift:testInfo \
TestDrive:testInfo \
TestDropbox:testInfo \
TestGoogleCloudStorage:rclone-testinfo \
TestOneDrive:testInfo \
TestS3:rclone-testinfo \
TestSftp:testInfo \
TestSwift:testInfo \
TestYandex:testInfo \
TestFTP:testInfo
# TestHubic:testInfo \

269
.rclone_repo/cmd/info/info.go Executable file
View File

@@ -0,0 +1,269 @@
package info
// FIXME once translations are implemented will need a no-escape
// option for Put so we can make these tests work agaig
import (
"bytes"
"fmt"
"io"
"sort"
"strings"
"sync"
"time"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/object"
"github.com/ncw/rclone/fstest"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
checkNormalization bool
checkControl bool
checkLength bool
checkStreaming bool
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&checkNormalization, "check-normalization", "", true, "Check UTF-8 Normalization.")
commandDefintion.Flags().BoolVarP(&checkControl, "check-control", "", true, "Check control characters.")
commandDefintion.Flags().BoolVarP(&checkLength, "check-length", "", true, "Check max filename length.")
commandDefintion.Flags().BoolVarP(&checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
}
var commandDefintion = &cobra.Command{
Use: "info [remote:path]+",
Short: `Discovers file name or other limitations for paths.`,
Long: `rclone info discovers what filenames and upload methods are possible
to write to the paths passed in and how long they can be. It can take some
time. It will write test files into the remote:path passed in. It outputs
a bit of go code for each one.
`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1E6, command, args)
for i := range args {
f := cmd.NewFsDir(args[i : i+1])
cmd.Run(false, false, command, func() error {
return readInfo(f)
})
}
},
}
type results struct {
f fs.Fs
mu sync.Mutex
charNeedsEscaping map[rune]bool
maxFileLength int
canWriteUnnormalized bool
canReadUnnormalized bool
canReadRenormalized bool
canStream bool
}
func newResults(f fs.Fs) *results {
return &results{
f: f,
charNeedsEscaping: make(map[rune]bool),
}
}
// Print the results to stdout
func (r *results) Print() {
fmt.Printf("// %s\n", r.f.Name())
if checkControl {
escape := []string{}
for c, needsEscape := range r.charNeedsEscaping {
if needsEscape {
escape = append(escape, fmt.Sprintf("0x%02X", c))
}
}
sort.Strings(escape)
fmt.Printf("charNeedsEscaping = []byte{\n")
fmt.Printf("\t%s\n", strings.Join(escape, ", "))
fmt.Printf("}\n")
}
if checkLength {
fmt.Printf("maxFileLength = %d\n", r.maxFileLength)
}
if checkNormalization {
fmt.Printf("canWriteUnnormalized = %v\n", r.canWriteUnnormalized)
fmt.Printf("canReadUnnormalized = %v\n", r.canReadUnnormalized)
fmt.Printf("canReadRenormalized = %v\n", r.canReadRenormalized)
}
if checkStreaming {
fmt.Printf("canStream = %v\n", r.canStream)
}
}
// writeFile writes a file with some random contents
func (r *results) writeFile(path string) (fs.Object, error) {
contents := fstest.RandomString(50)
src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
return r.f.Put(bytes.NewBufferString(contents), src)
}
// check whether normalization is enforced and check whether it is
// done on the files anyway
func (r *results) checkUTF8Normalization() {
unnormalized := "Héroique"
normalized := "Héroique"
_, err := r.writeFile(unnormalized)
if err != nil {
r.canWriteUnnormalized = false
return
}
r.canWriteUnnormalized = true
_, err = r.f.NewObject(unnormalized)
if err == nil {
r.canReadUnnormalized = true
}
_, err = r.f.NewObject(normalized)
if err == nil {
r.canReadRenormalized = true
}
}
// check we can write file with the rune passed in
func (r *results) checkChar(c rune) {
fs.Infof(r.f, "Writing file 0x%02X", c)
path := fmt.Sprintf("0x%02X-%c-", c, c)
_, err := r.writeFile(path)
escape := false
if err != nil {
fs.Infof(r.f, "Couldn't write file 0x%02X", c)
} else {
fs.Infof(r.f, "OK writing file 0x%02X", c)
}
r.mu.Lock()
r.charNeedsEscaping[c] = escape
r.mu.Unlock()
}
// check we can write a file with the control chars
func (r *results) checkControls() {
fs.Infof(r.f, "Trying to create control character file names")
// Concurrency control
tokens := make(chan struct{}, fs.Config.Checkers)
for i := 0; i < fs.Config.Checkers; i++ {
tokens <- struct{}{}
}
var wg sync.WaitGroup
for i := rune(0); i < 128; i++ {
if i == 0 || i == '/' {
// We're not even going to check NULL or /
r.charNeedsEscaping[i] = true
continue
}
wg.Add(1)
c := i
go func() {
defer wg.Done()
token := <-tokens
r.checkChar(c)
tokens <- token
}()
}
wg.Wait()
fs.Infof(r.f, "Done trying to create control character file names")
}
// find the max file name size we can use
func (r *results) findMaxLength() {
const maxLen = 16 * 1024
name := make([]byte, maxLen)
for i := range name {
name[i] = 'a'
}
// Find the first size of filename we can't write
i := sort.Search(len(name), func(i int) (fail bool) {
defer func() {
if err := recover(); err != nil {
fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err)
fail = true
}
}()
path := string(name[:i])
_, err := r.writeFile(path)
if err != nil {
fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err)
return true
}
fs.Infof(r.f, "Wrote file with name length %d", i)
return false
})
r.maxFileLength = i - 1
fs.Infof(r.f, "Max file length is %d", r.maxFileLength)
}
func (r *results) checkStreaming() {
putter := r.f.Put
if r.f.Features().PutStream != nil {
fs.Infof(r.f, "Given remote has specialized streaming function. Using that to test streaming.")
putter = r.f.Features().PutStream
}
contents := "thinking of test strings is hard"
buf := bytes.NewBufferString(contents)
hashIn := hash.NewMultiHasher()
in := io.TeeReader(buf, hashIn)
objIn := object.NewStaticObjectInfo("checkStreamingTest", time.Now(), -1, true, nil, r.f)
objR, err := putter(in, objIn)
if err != nil {
fs.Infof(r.f, "Streamed file failed to upload (%v)", err)
r.canStream = false
return
}
hashes := hashIn.Sums()
types := objR.Fs().Hashes().Array()
for _, Hash := range types {
sum, err := objR.Hash(Hash)
if err != nil {
fs.Infof(r.f, "Streamed file failed when getting hash %v (%v)", Hash, err)
r.canStream = false
return
}
if !hash.Equals(hashes[Hash], sum) {
fs.Infof(r.f, "Streamed file has incorrect hash %v: expecting %q got %q", Hash, hashes[Hash], sum)
r.canStream = false
return
}
}
if int64(len(contents)) != objR.Size() {
fs.Infof(r.f, "Streamed file has incorrect file size: expecting %d got %d", len(contents), objR.Size())
r.canStream = false
return
}
r.canStream = true
}
func readInfo(f fs.Fs) error {
err := f.Mkdir("")
if err != nil {
return errors.Wrap(err, "couldn't mkdir")
}
r := newResults(f)
if checkControl {
r.checkControls()
}
if checkLength {
r.findMaxLength()
}
if checkNormalization {
r.checkUTF8Normalization()
}
if checkStreaming {
r.checkStreaming()
}
r.Print()
return nil
}

41
.rclone_repo/cmd/link/link.go Executable file
View File

@@ -0,0 +1,41 @@
package link
import (
"fmt"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "link remote:path",
Short: `Generate public link to file/folder.`,
Long: `
rclone link will create or retrieve a public link to the given file or folder.
rclone link remote:path/to/file
rclone link remote:path/to/folder/
If successful, the last line of the output will contain the link. Exact
capabilities depend on the remote, but the link will always be created with
the least constraints e.g. no expiry, no password protection, accessible
without account.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc, remote := cmd.NewFsFile(args[0])
cmd.Run(false, false, command, func() error {
link, err := operations.PublicLink(fsrc, remote)
if err != nil {
return err
}
fmt.Println(link)
return nil
})
},
}

View File

@@ -0,0 +1,49 @@
package ls
import (
"fmt"
"sort"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/config"
"github.com/spf13/cobra"
)
// Globals
var (
listLong bool
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&listLong, "long", "l", listLong, "Show the type as well as names.")
}
var commandDefintion = &cobra.Command{
Use: "listremotes",
Short: `List all the remotes in the config file.`,
Long: `
rclone listremotes lists all the available remotes from the config file.
When uses with the -l flag it lists the types too.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 0, command, args)
remotes := config.FileSections()
sort.Strings(remotes)
maxlen := 1
for _, remote := range remotes {
if len(remote) > maxlen {
maxlen = len(remote)
}
}
for _, remote := range remotes {
if listLong {
remoteType := config.FileGet(remote, "type", "UNKNOWN")
fmt.Printf("%-*s %s\n", maxlen+1, remote+":", remoteType)
} else {
fmt.Printf("%s:\n", remote)
}
}
},
}

39
.rclone_repo/cmd/ls/ls.go Executable file
View File

@@ -0,0 +1,39 @@
package ls
import (
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/ls/lshelp"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "ls remote:path",
Short: `List the objects in the path with size and path.`,
Long: `
Lists the objects in the source path to standard output in a human
readable format with size and path. Recurses by default.
Eg
$ rclone ls swift:bucket
60295 bevajer5jef
90613 canole
94467 diwogej7
37600 fubuwic
` + lshelp.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return operations.List(fsrc, os.Stdout)
})
},
}

View File

@@ -0,0 +1,26 @@
package lshelp
// Help describes the common help for all the list commands
var Help = `
Any of the filtering options can be applied to this commmand.
There are several related list commands
* ` + "`ls`" + ` to list size and path of objects only
* ` + "`lsl`" + ` to list modification time, size and path of objects only
* ` + "`lsd`" + ` to list directories only
* ` + "`lsf`" + ` to list objects and directories in easy to parse format
* ` + "`lsjson`" + ` to list objects and directories in JSON format
` + "`ls`,`lsl`,`lsd`" + ` are designed to be human readable.
` + "`lsf`" + ` is designed to be human and machine readable.
` + "`lsjson`" + ` is designed to be machine readable.
Note that ` + "`ls` and `lsl`" + ` recurse by default - use "--max-depth 1" to stop the recursion.
The other list commands ` + "`lsd`,`lsf`,`lsjson`" + ` do not recurse by default - use "-R" to make them recurse.
Listing a non existent directory will produce an error except for
remotes which can't have empty directories (eg s3, swift, gcs, etc -
the bucket based remotes).
`

58
.rclone_repo/cmd/lsd/lsd.go Executable file
View File

@@ -0,0 +1,58 @@
package lsd
import (
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/ls/lshelp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
var (
recurse bool
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
}
var commandDefintion = &cobra.Command{
Use: "lsd remote:path",
Short: `List all directories/containers/buckets in the path.`,
Long: `
Lists the directories in the source path to standard output. Does not
recurse by default. Use the -R flag to recurse.
This command lists the total size of the directory (if known, -1 if
not), the modification time (if known, the current time if not), the
number of objects in the directory (if known, -1 if not) and the name
of the directory, Eg
$ rclone lsd swift:
494000 2018-04-26 08:43:20 10000 10000files
65 2018-04-26 08:43:20 1 1File
Or
$ rclone lsd drive:test
-1 2016-10-17 17:41:53 -1 1000files
-1 2017-01-03 14:40:54 -1 2500files
-1 2017-07-08 14:39:28 -1 4000files
If you just want the directory names use "rclone lsf --dirs-only".
` + lshelp.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
if recurse {
fs.Config.MaxDepth = 0
}
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return operations.ListDir(fsrc, os.Stdout)
})
},
}

205
.rclone_repo/cmd/lsf/lsf.go Executable file
View File

@@ -0,0 +1,205 @@
package lsf
import (
"fmt"
"io"
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/ls/lshelp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
format string
separator string
dirSlash bool
recurse bool
hashType = hash.MD5
filesOnly bool
dirsOnly bool
csv bool
absolute bool
)
func init() {
cmd.Root.AddCommand(commandDefintion)
flags := commandDefintion.Flags()
flags.StringVarP(&format, "format", "F", "p", "Output format - see help for details")
flags.StringVarP(&separator, "separator", "s", ";", "Separator for the items in the format.")
flags.BoolVarP(&dirSlash, "dir-slash", "d", true, "Append a slash to directory names.")
flags.VarP(&hashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash")
flags.BoolVarP(&filesOnly, "files-only", "", false, "Only list files.")
flags.BoolVarP(&dirsOnly, "dirs-only", "", false, "Only list directories.")
flags.BoolVarP(&csv, "csv", "", false, "Output in CSV format.")
flags.BoolVarP(&absolute, "absolute", "", false, "Put a leading / in front of path names.")
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
}
var commandDefintion = &cobra.Command{
Use: "lsf remote:path",
Short: `List directories and objects in remote:path formatted for parsing`,
Long: `
List the contents of the source path (directories and objects) to
standard output in a form which is easy to parse by scripts. By
default this will just be the names of the objects and directories,
one per line. The directories will have a / suffix.
Eg
$ rclone lsf swift:bucket
bevajer5jef
canole
diwogej7
ferejej3gux/
fubuwic
Use the --format option to control what gets listed. By default this
is just the path, but you can use these parameters to control the
output:
p - path
s - size
t - modification time
h - hash
i - ID of object if known
m - MimeType of object if known
So if you wanted the path, size and modification time, you would use
--format "pst", or maybe --format "tsp" to put the path last.
Eg
$ rclone lsf --format "tsp" swift:bucket
2016-06-25 18:55:41;60295;bevajer5jef
2016-06-25 18:55:43;90613;canole
2016-06-25 18:55:43;94467;diwogej7
2018-04-26 08:50:45;0;ferejej3gux/
2016-06-25 18:55:40;37600;fubuwic
If you specify "h" in the format you will get the MD5 hash by default,
use the "--hash" flag to change which hash you want. Note that this
can be returned as an empty string if it isn't available on the object
(and for directories), "ERROR" if there was an error reading it from
the object and "UNSUPPORTED" if that object does not support that hash
type.
For example to emulate the md5sum command you can use
rclone lsf -R --hash MD5 --format hp --separator " " --files-only .
Eg
$ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket
7908e352297f0f530b84a756f188baa3 bevajer5jef
cd65ac234e6fea5925974a51cdd865cc canole
03b5341b4f234b9d984d03ad076bae91 diwogej7
8fd37c3810dd660778137ac3a66cc06d fubuwic
99713e14a4c4ff553acaf1930fad985b gixacuh7ku
(Though "rclone md5sum ." is an easier way of typing this.)
By default the separator is ";" this can be changed with the
--separator flag. Note that separators aren't escaped in the path so
putting it last is a good strategy.
Eg
$ rclone lsf --separator "," --format "tshp" swift:bucket
2016-06-25 18:55:41,60295,7908e352297f0f530b84a756f188baa3,bevajer5jef
2016-06-25 18:55:43,90613,cd65ac234e6fea5925974a51cdd865cc,canole
2016-06-25 18:55:43,94467,03b5341b4f234b9d984d03ad076bae91,diwogej7
2018-04-26 08:52:53,0,,ferejej3gux/
2016-06-25 18:55:40,37600,8fd37c3810dd660778137ac3a66cc06d,fubuwic
You can output in CSV standard format. This will escape things in "
if they contain ,
Eg
$ rclone lsf --csv --files-only --format ps remote:path
test.log,22355
test.sh,449
"this file contains a comma, in the file name.txt",6
Note that the --absolute parameter is useful for making lists of files
to pass to an rclone copy with the --files-from flag.
For example to find all the files modified within one day and copy
those only (without traversing the whole directory structure):
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
rclone copy --files-from new_files /path/to/local remote:path
` + lshelp.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
// Work out if the separatorFlag was supplied or not
separatorFlag := command.Flags().Lookup("separator")
separatorFlagSupplied := separatorFlag != nil && separatorFlag.Changed
// Default the separator to , if using CSV
if csv && !separatorFlagSupplied {
separator = ","
}
return Lsf(fsrc, os.Stdout)
})
},
}
// Lsf lists all the objects in the path with modification time, size
// and path in specific format.
func Lsf(fsrc fs.Fs, out io.Writer) error {
var list operations.ListFormat
list.SetSeparator(separator)
list.SetCSV(csv)
list.SetDirSlash(dirSlash)
list.SetAbsolute(absolute)
for _, char := range format {
switch char {
case 'p':
list.AddPath()
case 't':
list.AddModTime()
case 's':
list.AddSize()
case 'h':
list.AddHash(hashType)
case 'i':
list.AddID()
case 'm':
list.AddMimeType()
default:
return errors.Errorf("Unknown format character %q", char)
}
}
return walk.Walk(fsrc, "", false, operations.ConfigMaxDepth(recurse), func(path string, entries fs.DirEntries, err error) error {
if err != nil {
fs.CountError(err)
fs.Errorf(path, "error listing: %v", err)
return nil
}
for _, entry := range entries {
_, isDir := entry.(fs.Directory)
if isDir {
if filesOnly {
continue
}
} else {
if dirsOnly {
continue
}
}
_, _ = fmt.Fprintln(out, list.Format(entry))
}
return nil
})
}

225
.rclone_repo/cmd/lsf/lsf_test.go Executable file
View File

@@ -0,0 +1,225 @@
package lsf
import (
"bytes"
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/list"
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
_ "github.com/ncw/rclone/backend/local"
)
func TestDefaultLsf(t *testing.T) {
fstest.Initialise()
buf := new(bytes.Buffer)
f, err := fs.NewFs("testfiles")
require.NoError(t, err)
err = Lsf(f, buf)
require.NoError(t, err)
assert.Equal(t, `file1
file2
file3
subdir/
`, buf.String())
}
func TestRecurseFlag(t *testing.T) {
fstest.Initialise()
buf := new(bytes.Buffer)
f, err := fs.NewFs("testfiles")
require.NoError(t, err)
recurse = true
err = Lsf(f, buf)
require.NoError(t, err)
assert.Equal(t, `file1
file2
file3
subdir/
subdir/file1
subdir/file2
subdir/file3
`, buf.String())
recurse = false
}
func TestDirSlashFlag(t *testing.T) {
fstest.Initialise()
buf := new(bytes.Buffer)
f, err := fs.NewFs("testfiles")
require.NoError(t, err)
dirSlash = true
format = "p"
err = Lsf(f, buf)
require.NoError(t, err)
assert.Equal(t, `file1
file2
file3
subdir/
`, buf.String())
buf = new(bytes.Buffer)
dirSlash = false
err = Lsf(f, buf)
require.NoError(t, err)
assert.Equal(t, `file1
file2
file3
subdir
`, buf.String())
}
func TestFormat(t *testing.T) {
fstest.Initialise()
f, err := fs.NewFs("testfiles")
require.NoError(t, err)
buf := new(bytes.Buffer)
format = "p"
err = Lsf(f, buf)
require.NoError(t, err)
assert.Equal(t, `file1
file2
file3
subdir
`, buf.String())
buf = new(bytes.Buffer)
format = "s"
err = Lsf(f, buf)
require.NoError(t, err)
assert.Equal(t, `0
321
1234
-1
`, buf.String())
buf = new(bytes.Buffer)
format = "hp"
err = Lsf(f, buf)
require.NoError(t, err)
assert.Equal(t, `d41d8cd98f00b204e9800998ecf8427e;file1
409d6c19451dd39d4a94e42d2ff2c834;file2
9b4c8a5e36d3be7e2c4b1d75ded8c8a1;file3
;subdir
`, buf.String())
buf = new(bytes.Buffer)
format = "p"
filesOnly = true
err = Lsf(f, buf)
require.NoError(t, err)
assert.Equal(t, `file1
file2
file3
`, buf.String())
filesOnly = false
buf = new(bytes.Buffer)
format = "p"
dirsOnly = true
err = Lsf(f, buf)
require.NoError(t, err)
assert.Equal(t, `subdir
`, buf.String())
dirsOnly = false
buf = new(bytes.Buffer)
format = "t"
err = Lsf(f, buf)
require.NoError(t, err)
items, _ := list.DirSorted(f, true, "")
var expectedOutput string
for _, item := range items {
expectedOutput += item.ModTime().Format("2006-01-02 15:04:05") + "\n"
}
assert.Equal(t, expectedOutput, buf.String())
buf = new(bytes.Buffer)
format = "sp"
err = Lsf(f, buf)
require.NoError(t, err)
assert.Equal(t, `0;file1
321;file2
1234;file3
-1;subdir
`, buf.String())
format = ""
}
func TestSeparator(t *testing.T) {
fstest.Initialise()
f, err := fs.NewFs("testfiles")
require.NoError(t, err)
format = "ps"
buf := new(bytes.Buffer)
err = Lsf(f, buf)
require.NoError(t, err)
assert.Equal(t, `file1;0
file2;321
file3;1234
subdir;-1
`, buf.String())
separator = "__SEP__"
buf = new(bytes.Buffer)
err = Lsf(f, buf)
require.NoError(t, err)
assert.Equal(t, `file1__SEP__0
file2__SEP__321
file3__SEP__1234
subdir__SEP__-1
`, buf.String())
format = ""
separator = ""
}
func TestWholeLsf(t *testing.T) {
fstest.Initialise()
f, err := fs.NewFs("testfiles")
require.NoError(t, err)
format = "pst"
separator = "_+_"
recurse = true
dirSlash = true
buf := new(bytes.Buffer)
err = Lsf(f, buf)
require.NoError(t, err)
items, _ := list.DirSorted(f, true, "")
itemsInSubdir, _ := list.DirSorted(f, true, "subdir")
var expectedOutput []string
for _, item := range items {
expectedOutput = append(expectedOutput, item.ModTime().Format("2006-01-02 15:04:05"))
}
for _, item := range itemsInSubdir {
expectedOutput = append(expectedOutput, item.ModTime().Format("2006-01-02 15:04:05"))
}
assert.Equal(t, `file1_+_0_+_`+expectedOutput[0]+`
file2_+_321_+_`+expectedOutput[1]+`
file3_+_1234_+_`+expectedOutput[2]+`
subdir/_+_-1_+_`+expectedOutput[3]+`
subdir/file1_+_0_+_`+expectedOutput[4]+`
subdir/file2_+_1_+_`+expectedOutput[5]+`
subdir/file3_+_111_+_`+expectedOutput[6]+`
`, buf.String())
format = ""
separator = ""
recurse = false
dirSlash = false
}

View File

Binary file not shown.

Binary file not shown.

View File

Binary file not shown.

Binary file not shown.

216
.rclone_repo/cmd/lsjson/lsjson.go Executable file
View File

@@ -0,0 +1,216 @@
package lsjson
import (
"encoding/json"
"fmt"
"log"
"os"
"path"
"time"
"github.com/ncw/rclone/backend/crypt"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/ls/lshelp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
recurse bool
showHash bool
showEncrypted bool
showOrigIDs bool
noModTime bool
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
commandDefintion.Flags().BoolVarP(&showHash, "hash", "", false, "Include hashes in the output (may take longer).")
commandDefintion.Flags().BoolVarP(&noModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
commandDefintion.Flags().BoolVarP(&showEncrypted, "encrypted", "M", false, "Show the encrypted names.")
commandDefintion.Flags().BoolVarP(&showOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
}
// lsJSON in the struct which gets marshalled for each line
type lsJSON struct {
Path string
Name string
Encrypted string `json:",omitempty"`
Size int64
MimeType string `json:",omitempty"`
ModTime Timestamp //`json:",omitempty"`
IsDir bool
Hashes map[string]string `json:",omitempty"`
ID string `json:",omitempty"`
OrigID string `json:",omitempty"`
}
// Timestamp a time in RFC3339 format with Nanosecond precision secongs
type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON
func (t Timestamp) MarshalJSON() (out []byte, err error) {
tt := time.Time(t)
if tt.IsZero() {
return []byte(`""`), nil
}
return []byte(`"` + tt.Format(time.RFC3339Nano) + `"`), nil
}
var commandDefintion = &cobra.Command{
Use: "lsjson remote:path",
Short: `List directories and objects in the path in JSON format.`,
Long: `List directories and objects in the path in JSON format.
The output is an array of Items, where each Item looks like this
{
"Hashes" : {
"SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f",
"MD5" : "b1946ac92492d2347c6235b4d2611184",
"DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc"
},
"ID": "y2djkhiujf83u33",
"OrigID": "UYOJVTUW00Q1RzTDA",
"IsDir" : false,
"MimeType" : "application/octet-stream",
"ModTime" : "2017-05-31T16:15:57.034468261+01:00",
"Name" : "file.txt",
"Encrypted" : "v0qpsdq8anpci8n929v3uu9338",
"Path" : "full/path/goes/here/file.txt",
"Size" : 6
}
If --hash is not specified the Hashes property won't be emitted.
If --no-modtime is specified then ModTime will be blank.
If --encrypted is not specified the Encrypted won't be emitted.
The Path field will only show folders below the remote path being listed.
If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt"
will be "subfolder/file.txt", not "remote:path/subfolder/file.txt".
When used without --recursive the Path will always be the same as Name.
The time is in RFC3339 format with nanosecond precision.
The whole output can be processed as a JSON blob, or alternatively it
can be processed line by line as each item is written one to a line.
` + lshelp.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
var cipher crypt.Cipher
if showEncrypted {
fsInfo, _, _, config, err := fs.ConfigFs(args[0])
if err != nil {
log.Fatalf(err.Error())
}
if fsInfo.Name != "crypt" {
log.Fatalf("The remote needs to be of type \"crypt\"")
}
cipher, err = crypt.NewCipher(config)
if err != nil {
log.Fatalf(err.Error())
}
}
cmd.Run(false, false, command, func() error {
fmt.Println("[")
first := true
err := walk.Walk(fsrc, "", false, operations.ConfigMaxDepth(recurse), func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
fs.CountError(err)
fs.Errorf(dirPath, "error listing: %v", err)
return nil
}
for _, entry := range entries {
item := lsJSON{
Path: entry.Remote(),
Name: path.Base(entry.Remote()),
Size: entry.Size(),
MimeType: fs.MimeTypeDirEntry(entry),
}
if !noModTime {
item.ModTime = Timestamp(entry.ModTime())
}
if cipher != nil {
switch entry.(type) {
case fs.Directory:
item.Encrypted = cipher.EncryptDirName(path.Base(entry.Remote()))
case fs.Object:
item.Encrypted = cipher.EncryptFileName(path.Base(entry.Remote()))
default:
fs.Errorf(nil, "Unknown type %T in listing", entry)
}
}
if do, ok := entry.(fs.IDer); ok {
item.ID = do.ID()
}
if showOrigIDs {
cur := entry
for {
u, ok := cur.(fs.ObjectUnWrapper)
if !ok {
break // not a wrapped object, use current id
}
next := u.UnWrap()
if next == nil {
break // no base object found, use current id
}
cur = next
}
if do, ok := cur.(fs.IDer); ok {
item.OrigID = do.ID()
}
}
switch x := entry.(type) {
case fs.Directory:
item.IsDir = true
case fs.Object:
item.IsDir = false
if showHash {
item.Hashes = make(map[string]string)
for _, hashType := range x.Fs().Hashes().Array() {
hash, err := x.Hash(hashType)
if err != nil {
fs.Errorf(x, "Failed to read hash: %v", err)
} else if hash != "" {
item.Hashes[hashType.String()] = hash
}
}
}
default:
fs.Errorf(nil, "Unknown type %T in listing", entry)
}
out, err := json.Marshal(item)
if err != nil {
return errors.Wrap(err, "failed to marshal list object")
}
if first {
first = false
} else {
fmt.Print(",\n")
}
_, err = os.Stdout.Write(out)
if err != nil {
return errors.Wrap(err, "failed to write to output")
}
}
return nil
})
if err != nil {
return errors.Wrap(err, "error listing JSON")
}
if !first {
fmt.Println()
}
fmt.Println("]")
return nil
})
},
}

39
.rclone_repo/cmd/lsl/lsl.go Executable file
View File

@@ -0,0 +1,39 @@
package lsl
import (
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/ls/lshelp"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "lsl remote:path",
Short: `List the objects in path with modification time, size and path.`,
Long: `
Lists the objects in the source path to standard output in a human
readable format with modification time, size and path. Recurses by default.
Eg
$ rclone lsl swift:bucket
60295 2016-06-25 18:55:41.062626927 bevajer5jef
90613 2016-06-25 18:55:43.302607074 canole
94467 2016-06-25 18:55:43.046609333 diwogej7
37600 2016-06-25 18:55:40.814629136 fubuwic
` + lshelp.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return operations.ListLong(fsrc, os.Stdout)
})
},
}

View File

@@ -0,0 +1,29 @@
package md5sum
import (
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "md5sum remote:path",
Short: `Produces an md5sum file for all the objects in the path.`,
Long: `
Produces an md5sum file for all the objects in the path. This
is in the same format as the standard md5sum tool produces.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return operations.Md5sum(fsrc, os.Stdout)
})
},
}

View File

@@ -0,0 +1,50 @@
package memtest
import (
"runtime"
"sync"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "memtest remote:path",
Short: `Load all the objects at remote:path and report memory stats.`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
objects, _, err := operations.Count(fsrc)
if err != nil {
return err
}
objs := make([]fs.Object, 0, objects)
var before, after runtime.MemStats
runtime.GC()
runtime.ReadMemStats(&before)
var mu sync.Mutex
err = operations.ListFn(fsrc, func(o fs.Object) {
mu.Lock()
objs = append(objs, o)
mu.Unlock()
})
if err != nil {
return err
}
runtime.GC()
runtime.ReadMemStats(&after)
usedMemory := after.Alloc - before.Alloc
fs.Logf(nil, "%d objects took %d bytes, %.1f bytes/object", len(objs), usedMemory, float64(usedMemory)/float64(len(objs)))
fs.Logf(nil, "System memory changed from %d to %d bytes a change of %d bytes", before.Sys, after.Sys, after.Sys-before.Sys)
return nil
})
},
}

23
.rclone_repo/cmd/mkdir/mkdir.go Executable file
View File

@@ -0,0 +1,23 @@
package mkdir
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "mkdir remote:path",
Short: `Make the path if it doesn't already exist.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fdst := cmd.NewFsDir(args)
cmd.Run(true, false, command, func() error {
return operations.Mkdir(fdst, "")
})
},
}

195
.rclone_repo/cmd/mount/dir.go Executable file
View File

@@ -0,0 +1,195 @@
// +build linux darwin freebsd
package mount
import (
"os"
"time"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs/log"
"github.com/ncw/rclone/vfs"
"github.com/pkg/errors"
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
)
// Dir represents a directory entry
type Dir struct {
*vfs.Dir
}
// Check interface satsified
var _ fusefs.Node = (*Dir)(nil)
// Attr updates the attributes of a directory
func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
defer log.Trace(d, "")("attr=%+v, err=%v", a, &err)
a.Valid = mountlib.AttrTimeout
a.Gid = d.VFS().Opt.GID
a.Uid = d.VFS().Opt.UID
a.Mode = os.ModeDir | d.VFS().Opt.DirPerms
modTime := d.ModTime()
a.Atime = modTime
a.Mtime = modTime
a.Ctime = modTime
a.Crtime = modTime
// FIXME include Valid so get some caching?
// FIXME fs.Debugf(d.path, "Dir.Attr %+v", a)
return nil
}
// Check interface satisfied
var _ fusefs.NodeSetattrer = (*Dir)(nil)
// Setattr handles attribute changes from FUSE. Currently supports ModTime only.
func (d *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) {
defer log.Trace(d, "stat=%+v", req)("err=%v", &err)
if d.VFS().Opt.NoModTime {
return nil
}
if req.Valid.MtimeNow() {
err = d.SetModTime(time.Now())
} else if req.Valid.Mtime() {
err = d.SetModTime(req.Mtime)
}
return translateError(err)
}
// Check interface satisfied
var _ fusefs.NodeRequestLookuper = (*Dir)(nil)
// Lookup looks up a specific entry in the receiver.
//
// Lookup should return a Node corresponding to the entry. If the
// name does not exist in the directory, Lookup should return ENOENT.
//
// Lookup need not to handle the names "." and "..".
func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fusefs.Node, err error) {
defer log.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err)
mnode, err := d.Dir.Stat(req.Name)
if err != nil {
return nil, translateError(err)
}
resp.EntryValid = mountlib.AttrTimeout
switch x := mnode.(type) {
case *vfs.File:
return &File{x}, nil
case *vfs.Dir:
return &Dir{x}, nil
}
panic("bad type")
}
// Check interface satisfied
var _ fusefs.HandleReadDirAller = (*Dir)(nil)
// ReadDirAll reads the contents of the directory
func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error) {
itemsRead := -1
defer log.Trace(d, "")("item=%d, err=%v", &itemsRead, &err)
items, err := d.Dir.ReadDirAll()
if err != nil {
return nil, translateError(err)
}
for _, node := range items {
var dirent = fuse.Dirent{
// Inode FIXME ???
Type: fuse.DT_File,
Name: node.Name(),
}
if node.IsDir() {
dirent.Type = fuse.DT_Dir
}
dirents = append(dirents, dirent)
}
itemsRead = len(dirents)
return dirents, nil
}
var _ fusefs.NodeCreater = (*Dir)(nil)
// Create makes a new file
func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) {
defer log.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err)
file, err := d.Dir.Create(req.Name, int(req.Flags))
if err != nil {
return nil, nil, translateError(err)
}
fh, err := file.Open(int(req.Flags) | os.O_CREATE)
if err != nil {
return nil, nil, translateError(err)
}
return &File{file}, &FileHandle{fh}, err
}
var _ fusefs.NodeMkdirer = (*Dir)(nil)
// Mkdir creates a new directory
func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (node fusefs.Node, err error) {
defer log.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err)
dir, err := d.Dir.Mkdir(req.Name)
if err != nil {
return nil, translateError(err)
}
return &Dir{dir}, nil
}
var _ fusefs.NodeRemover = (*Dir)(nil)
// Remove removes the entry with the given name from
// the receiver, which must be a directory. The entry to be removed
// may correspond to a file (unlink) or to a directory (rmdir).
func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) {
defer log.Trace(d, "name=%q", req.Name)("err=%v", &err)
err = d.Dir.RemoveName(req.Name)
if err != nil {
return translateError(err)
}
return nil
}
// Check interface satisfied
var _ fusefs.NodeRenamer = (*Dir)(nil)
// Rename the file
func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs.Node) (err error) {
defer log.Trace(d, "oldName=%q, newName=%q, newDir=%+v", req.OldName, req.NewName, newDir)("err=%v", &err)
destDir, ok := newDir.(*Dir)
if !ok {
return errors.Errorf("Unknown Dir type %T", newDir)
}
err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir)
if err != nil {
return translateError(err)
}
return nil
}
// Check interface satisfied
var _ fusefs.NodeFsyncer = (*Dir)(nil)
// Fsync the directory
func (d *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) {
defer log.Trace(d, "")("err=%v", &err)
err = d.Dir.Sync()
if err != nil {
return translateError(err)
}
return nil
}
// Check interface satisfied
var _ fusefs.NodeLinker = (*Dir)(nil)
// Link creates a new directory entry in the receiver based on an
// existing Node. Receiver must be a directory.
func (d *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fusefs.Node) (newNode fusefs.Node, err error) {
defer log.Trace(d, "req=%v, old=%v", req, old)("new=%v, err=%v", &newNode, &err)
return nil, fuse.ENOSYS
}

128
.rclone_repo/cmd/mount/file.go Executable file
View File

@@ -0,0 +1,128 @@
// +build linux darwin freebsd
package mount
import (
"io"
"time"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs/log"
"github.com/ncw/rclone/vfs"
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
)
// File represents a file
type File struct {
*vfs.File
}
// Check interface satisfied
var _ fusefs.Node = (*File)(nil)
// Attr fills out the attributes for the file
func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
defer log.Trace(f, "")("a=%+v, err=%v", a, &err)
a.Valid = mountlib.AttrTimeout
modTime := f.File.ModTime()
Size := uint64(f.File.Size())
Blocks := (Size + 511) / 512
a.Gid = f.VFS().Opt.GID
a.Uid = f.VFS().Opt.UID
a.Mode = f.VFS().Opt.FilePerms
a.Size = Size
a.Atime = modTime
a.Mtime = modTime
a.Ctime = modTime
a.Crtime = modTime
a.Blocks = Blocks
return nil
}
// Check interface satisfied
var _ fusefs.NodeSetattrer = (*File)(nil)
// Setattr handles attribute changes from FUSE. Currently supports ModTime and Size only
func (f *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) {
defer log.Trace(f, "a=%+v", req)("err=%v", &err)
if !f.VFS().Opt.NoModTime {
if req.Valid.Mtime() {
err = f.File.SetModTime(req.Mtime)
} else if req.Valid.MtimeNow() {
err = f.File.SetModTime(time.Now())
}
}
if req.Valid.Size() {
err = f.File.Truncate(int64(req.Size))
}
return translateError(err)
}
// Check interface satisfied
var _ fusefs.NodeOpener = (*File)(nil)
// Open the file for read or write
func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fh fusefs.Handle, err error) {
defer log.Trace(f, "flags=%v", req.Flags)("fh=%v, err=%v", &fh, &err)
// fuse flags are based off syscall flags as are os flags, so
// should be compatible
handle, err := f.File.Open(int(req.Flags))
if err != nil {
return nil, translateError(err)
}
// See if seeking is supported and set FUSE hint accordingly
if _, err = handle.Seek(0, io.SeekCurrent); err != nil {
resp.Flags |= fuse.OpenNonSeekable
}
return &FileHandle{handle}, nil
}
// Check interface satisfied
var _ fusefs.NodeFsyncer = (*File)(nil)
// Fsync the file
//
// Note that we don't do anything except return OK
func (f *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) {
defer log.Trace(f, "")("err=%v", &err)
return nil
}
// Getxattr gets an extended attribute by the given name from the
// node.
//
// If there is no xattr by that name, returns fuse.ErrNoXattr.
func (f *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
return fuse.ENOSYS // we never implement this
}
var _ fusefs.NodeGetxattrer = (*File)(nil)
// Listxattr lists the extended attributes recorded for the node.
func (f *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
return fuse.ENOSYS // we never implement this
}
var _ fusefs.NodeListxattrer = (*File)(nil)
// Setxattr sets an extended attribute with the given name and
// value for the node.
func (f *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
return fuse.ENOSYS // we never implement this
}
var _ fusefs.NodeSetxattrer = (*File)(nil)
// Removexattr removes an extended attribute for the name.
//
// If there is no xattr by that name, returns fuse.ErrNoXattr.
func (f *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
return fuse.ENOSYS // we never implement this
}
var _ fusefs.NodeRemovexattrer = (*File)(nil)

112
.rclone_repo/cmd/mount/fs.go Executable file
View File

@@ -0,0 +1,112 @@
// FUSE main Fs
// +build linux darwin freebsd
package mount
import (
"syscall"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/log"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/pkg/errors"
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
)
// FS represents the top level filing system
type FS struct {
*vfs.VFS
f fs.Fs
}
// Check interface satistfied
var _ fusefs.FS = (*FS)(nil)
// NewFS makes a new FS
func NewFS(f fs.Fs) *FS {
fsys := &FS{
VFS: vfs.New(f, &vfsflags.Opt),
f: f,
}
return fsys
}
// Root returns the root node
func (f *FS) Root() (node fusefs.Node, err error) {
defer log.Trace("", "")("node=%+v, err=%v", &node, &err)
root, err := f.VFS.Root()
if err != nil {
return nil, translateError(err)
}
return &Dir{root}, nil
}
// Check interface satsified
var _ fusefs.FSStatfser = (*FS)(nil)
// Statfs is called to obtain file system metadata.
// It should write that data to resp.
func (f *FS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) (err error) {
defer log.Trace("", "")("stat=%+v, err=%v", resp, &err)
const blockSize = 4096
const fsBlocks = (1 << 50) / blockSize
resp.Blocks = fsBlocks // Total data blocks in file system.
resp.Bfree = fsBlocks // Free blocks in file system.
resp.Bavail = fsBlocks // Free blocks in file system if you're not root.
resp.Files = 1E9 // Total files in file system.
resp.Ffree = 1E9 // Free files in file system.
resp.Bsize = blockSize // Block size
resp.Namelen = 255 // Maximum file name length?
resp.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
total, used, free := f.VFS.Statfs()
if total >= 0 {
resp.Blocks = uint64(total) / blockSize
}
if used >= 0 {
resp.Bfree = resp.Blocks - uint64(used)/blockSize
}
if free >= 0 {
resp.Bavail = uint64(free) / blockSize
}
mountlib.ClipBlocks(&resp.Blocks)
mountlib.ClipBlocks(&resp.Bfree)
mountlib.ClipBlocks(&resp.Bavail)
return nil
}
// Translate errors from mountlib
func translateError(err error) error {
if err == nil {
return nil
}
switch errors.Cause(err) {
case vfs.OK:
return nil
case vfs.ENOENT:
return fuse.ENOENT
case vfs.EEXIST:
return fuse.EEXIST
case vfs.EPERM:
return fuse.EPERM
case vfs.ECLOSED:
return fuse.Errno(syscall.EBADF)
case vfs.ENOTEMPTY:
return fuse.Errno(syscall.ENOTEMPTY)
case vfs.ESPIPE:
return fuse.Errno(syscall.ESPIPE)
case vfs.EBADF:
return fuse.Errno(syscall.EBADF)
case vfs.EROFS:
return fuse.Errno(syscall.EROFS)
case vfs.ENOSYS:
return fuse.ENOSYS
case vfs.EINVAL:
return fuse.Errno(syscall.EINVAL)
}
return err
}

View File

@@ -0,0 +1,84 @@
// +build linux darwin freebsd
package mount
import (
"io"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/fs/log"
"github.com/ncw/rclone/vfs"
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
)
// FileHandle is an open for read file handle on a File
type FileHandle struct {
vfs.Handle
}
// Check interface satisfied
var _ fusefs.HandleReader = (*FileHandle)(nil)
// Read from the file handle
func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) (err error) {
var n int
defer log.Trace(fh, "len=%d, offset=%d", req.Size, req.Offset)("read=%d, err=%v", &n, &err)
data := make([]byte, req.Size)
n, err = fh.Handle.ReadAt(data, req.Offset)
if err == io.EOF {
err = nil
} else if err != nil {
return translateError(err)
}
resp.Data = data[:n]
return nil
}
// Check interface satisfied
var _ fusefs.HandleWriter = (*FileHandle)(nil)
// Write data to the file handle
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
defer log.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
n, err := fh.Handle.WriteAt(req.Data, req.Offset)
if err != nil {
return translateError(err)
}
resp.Size = int(n)
return nil
}
// Check interface satisfied
var _ fusefs.HandleFlusher = (*FileHandle)(nil)
// Flush is called on each close() of a file descriptor. So if a
// filesystem wants to return write errors in close() and the file has
// cached dirty data, this is a good place to write back data and
// return any errors. Since many applications ignore close() errors
// this is not always useful.
//
// NOTE: The flush() method may be called more than once for each
// open(). This happens if more than one file descriptor refers to an
// opened file due to dup(), dup2() or fork() calls. It is not
// possible to determine if a flush is final, so each flush should be
// treated equally. Multiple write-flush sequences are relatively
// rare, so this shouldn't be a problem.
//
// Filesystems shouldn't assume that flush will always be called after
// some writes, or that if will be called at all.
func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) (err error) {
defer log.Trace(fh, "")("err=%v", &err)
return translateError(fh.Handle.Flush())
}
var _ fusefs.HandleReleaser = (*FileHandle)(nil)
// Release is called when we are finished with the file handle
//
// It isn't called directly from userspace so the error is ignored by
// the kernel
func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) (err error) {
defer log.Trace(fh, "")("err=%v", &err)
return translateError(fh.Handle.Release())
}

174
.rclone_repo/cmd/mount/mount.go Executable file
View File

@@ -0,0 +1,174 @@
// Package mount implents a FUSE mounting system for rclone remotes.
// +build linux darwin freebsd
package mount
import (
"fmt"
"os"
"os/signal"
"syscall"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/lib/atexit"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/okzk/sdnotify"
"github.com/pkg/errors"
)
func init() {
mountlib.NewMountCommand("mount", Mount)
}
// mountOptions configures the options from the command line flags
func mountOptions(device string) (options []fuse.MountOption) {
options = []fuse.MountOption{
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
fuse.Subtype("rclone"),
fuse.FSName(device),
fuse.VolumeName(mountlib.VolumeName),
// Options from benchmarking in the fuse module
//fuse.MaxReadahead(64 * 1024 * 1024),
//fuse.AsyncRead(), - FIXME this causes
// ReadFileHandle.Read error: read /home/files/ISOs/xubuntu-15.10-desktop-amd64.iso: bad file descriptor
// which is probably related to errors people are having
//fuse.WritebackCache(),
}
if mountlib.NoAppleDouble {
options = append(options, fuse.NoAppleDouble())
}
if mountlib.NoAppleXattr {
options = append(options, fuse.NoAppleXattr())
}
if mountlib.AllowNonEmpty {
options = append(options, fuse.AllowNonEmptyMount())
}
if mountlib.AllowOther {
options = append(options, fuse.AllowOther())
}
if mountlib.AllowRoot {
options = append(options, fuse.AllowRoot())
}
if mountlib.DefaultPermissions {
options = append(options, fuse.DefaultPermissions())
}
if vfsflags.Opt.ReadOnly {
options = append(options, fuse.ReadOnly())
}
if mountlib.WritebackCache {
options = append(options, fuse.WritebackCache())
}
if mountlib.DaemonTimeout != 0 {
options = append(options, fuse.DaemonTimeout(fmt.Sprint(int(mountlib.DaemonTimeout.Seconds()))))
}
if len(mountlib.ExtraOptions) > 0 {
fs.Errorf(nil, "-o/--option not supported with this FUSE backend")
}
if len(mountlib.ExtraOptions) > 0 {
fs.Errorf(nil, "--fuse-flag not supported with this FUSE backend")
}
return options
}
// mount the file system
//
// The mount point will be ready when this returns.
//
// returns an error, and an error channel for the serve process to
// report an error when fusermount is called.
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
fs.Debugf(f, "Mounting on %q", mountpoint)
c, err := fuse.Mount(mountpoint, mountOptions(f.Name()+":"+f.Root())...)
if err != nil {
return nil, nil, nil, err
}
filesys := NewFS(f)
server := fusefs.New(c, nil)
// Serve the mount point in the background returning error to errChan
errChan := make(chan error, 1)
go func() {
err := server.Serve(filesys)
closeErr := c.Close()
if err == nil {
err = closeErr
}
errChan <- err
}()
// check if the mount process has an error to report
<-c.Ready
if err := c.MountError; err != nil {
return nil, nil, nil, err
}
unmount := func() error {
// Shutdown the VFS
filesys.VFS.Shutdown()
return fuse.Unmount(mountpoint)
}
return filesys.VFS, errChan, unmount, nil
}
// Mount mounts the remote at mountpoint.
//
// If noModTime is set then it
func Mount(f fs.Fs, mountpoint string) error {
if mountlib.DebugFUSE {
fuse.Debug = func(msg interface{}) {
fs.Debugf("fuse", "%v", msg)
}
}
// Mount it
FS, errChan, unmount, err := mount(f, mountpoint)
if err != nil {
return errors.Wrap(err, "failed to mount FUSE fs")
}
sigInt := make(chan os.Signal, 1)
signal.Notify(sigInt, syscall.SIGINT, syscall.SIGTERM)
sigHup := make(chan os.Signal, 1)
signal.Notify(sigHup, syscall.SIGHUP)
atexit.IgnoreSignals()
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
return errors.Wrap(err, "failed to notify systemd")
}
waitloop:
for {
select {
// umount triggered outside the app
case err = <-errChan:
break waitloop
// Program abort: umount
case <-sigInt:
err = unmount()
break waitloop
// user sent SIGHUP to clear the cache
case <-sigHup:
root, err := FS.Root()
if err != nil {
fs.Errorf(f, "Error reading root: %v", err)
} else {
root.ForgetAll()
}
}
}
_ = sdnotify.Stopping()
if err != nil {
return errors.Wrap(err, "failed to umount FUSE fs")
}
return nil
}

View File

@@ -0,0 +1,13 @@
// +build linux darwin freebsd
package mount
import (
"testing"
"github.com/ncw/rclone/cmd/mountlib/mounttest"
)
func TestMount(t *testing.T) {
mounttest.RunTests(t, mount)
}

View File

@@ -0,0 +1,6 @@
// Build for mount for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !linux,!darwin,!freebsd
package mount

View File

@@ -0,0 +1,72 @@
// +build ignore
// Read blocks out of a single file to time the seeking code
package main
import (
"flag"
"io"
"log"
"math/rand"
"os"
"time"
)
var (
// Flags
iterations = flag.Int("n", 25, "Iterations to try")
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
randSeed = flag.Int64("seed", 1, "Seed for the random number generator")
)
func randomSeekTest(size int64, in *os.File, name string) {
start := rand.Int63n(size)
blockSize := rand.Intn(*maxBlockSize)
if int64(blockSize) > size-start {
blockSize = int(size - start)
}
log.Printf("Reading %d from %d", blockSize, start)
_, err := in.Seek(start, io.SeekStart)
if err != nil {
log.Fatalf("Seek failed on %q: %v", name, err)
}
buf := make([]byte, blockSize)
_, err = io.ReadFull(in, buf)
if err != nil {
log.Fatalf("Read failed on %q: %v", name, err)
}
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 1 {
log.Fatalf("Require 1 file as argument")
}
rand.Seed(*randSeed)
name := args[0]
in, err := os.Open(name)
if err != nil {
log.Fatalf("Couldn't open %q: %v", name, err)
}
fi, err := in.Stat()
if err != nil {
log.Fatalf("Couldn't stat %q: %v", name, err)
}
start := time.Now()
for i := 0; i < *iterations; i++ {
randomSeekTest(fi.Size(), in, name)
}
dt := time.Since(start)
log.Printf("That took %v for %d iterations, %v per iteration", dt, *iterations, dt/time.Duration(*iterations))
err = in.Close()
if err != nil {
log.Fatalf("Error closing %q: %v", name, err)
}
}

View File

@@ -0,0 +1,115 @@
// +build ignore
// Read two files with lots of seeking to stress test the seek code
package main
import (
"bytes"
"flag"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"time"
)
var (
// Flags
iterations = flag.Int("n", 1E6, "Iterations to try")
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func randomSeekTest(size int64, in1, in2 *os.File, file1, file2 string) {
start := rand.Int63n(size)
blockSize := rand.Intn(*maxBlockSize)
if int64(blockSize) > size-start {
blockSize = int(size - start)
}
log.Printf("Reading %d from %d", blockSize, start)
_, err := in1.Seek(start, io.SeekStart)
if err != nil {
log.Fatalf("Seek failed on %q: %v", file1, err)
}
_, err = in2.Seek(start, io.SeekStart)
if err != nil {
log.Fatalf("Seek failed on %q: %v", file2, err)
}
buf1 := make([]byte, blockSize)
n1, err := io.ReadFull(in1, buf1)
if err != nil {
log.Fatalf("Read failed on %q: %v", file1, err)
}
buf2 := make([]byte, blockSize)
n2, err := io.ReadFull(in2, buf2)
if err != nil {
log.Fatalf("Read failed on %q: %v", file2, err)
}
if n1 != n2 {
log.Fatalf("Read different lengths %d (%q) != %d (%q)", n1, file1, n2, file2)
}
if !bytes.Equal(buf1, buf2) {
log.Printf("Dumping different blocks")
err = ioutil.WriteFile("/tmp/z1", buf1, 0777)
if err != nil {
log.Fatalf("Failed to write /tmp/z1: %v", err)
}
err = ioutil.WriteFile("/tmp/z2", buf2, 0777)
if err != nil {
log.Fatalf("Failed to write /tmp/z2: %v", err)
}
log.Fatalf("Read different contents - saved in /tmp/z1 and /tmp/z2")
}
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 2 {
log.Fatalf("Require 2 files as argument")
}
file1, file2 := args[0], args[1]
in1, err := os.Open(file1)
if err != nil {
log.Fatalf("Couldn't open %q: %v", file1, err)
}
in2, err := os.Open(file2)
if err != nil {
log.Fatalf("Couldn't open %q: %v", file2, err)
}
fi1, err := in1.Stat()
if err != nil {
log.Fatalf("Couldn't stat %q: %v", file1, err)
}
fi2, err := in2.Stat()
if err != nil {
log.Fatalf("Couldn't stat %q: %v", file2, err)
}
if fi1.Size() != fi2.Size() {
log.Fatalf("Files not the same size")
}
for i := 0; i < *iterations; i++ {
randomSeekTest(fi1.Size(), in1, in2, file1, file2)
}
err = in1.Close()
if err != nil {
log.Fatalf("Error closing %q: %v", file1, err)
}
err = in2.Close()
if err != nil {
log.Fatalf("Error closing %q: %v", file2, err)
}
}

View File

@@ -0,0 +1,129 @@
// +build ignore
// Read lots files with lots of simultaneous seeking to stress test the seek code
package main
import (
"flag"
"io"
"log"
"math/rand"
"os"
"path/filepath"
"sort"
"sync"
"time"
)
var (
// Flags
iterations = flag.Int("n", 1E6, "Iterations to try")
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
simultaneous = flag.Int("transfers", 16, "Number of simultaneous files to open")
seeksPerFile = flag.Int("seeks", 8, "Seeks per file")
mask = flag.Int64("mask", 0, "mask for seek, eg 0x7fff")
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func seekTest(n int, file string) {
in, err := os.Open(file)
if err != nil {
log.Fatalf("Couldn't open %q: %v", file, err)
}
fi, err := in.Stat()
if err != nil {
log.Fatalf("Couldn't stat %q: %v", file, err)
}
size := fi.Size()
// FIXME make sure we try start and end
maxBlockSize := *maxBlockSize
if int64(maxBlockSize) > size {
maxBlockSize = int(size)
}
for i := 0; i < n; i++ {
start := rand.Int63n(size)
if *mask != 0 {
start &^= *mask
}
blockSize := rand.Intn(maxBlockSize)
beyondEnd := false
switch rand.Intn(10) {
case 0:
start = 0
case 1:
start = size - int64(blockSize)
case 2:
// seek beyond the end
start = size + int64(blockSize)
beyondEnd = true
default:
}
if !beyondEnd && int64(blockSize) > size-start {
blockSize = int(size - start)
}
log.Printf("%s: Reading %d from %d", file, blockSize, start)
_, err = in.Seek(start, io.SeekStart)
if err != nil {
log.Fatalf("Seek failed on %q: %v", file, err)
}
buf := make([]byte, blockSize)
n, err := io.ReadFull(in, buf)
if beyondEnd && err == io.EOF {
// OK
} else if err != nil {
log.Fatalf("Read failed on %q: %v (%d)", file, err, n)
}
}
err = in.Close()
if err != nil {
log.Fatalf("Error closing %q: %v", file, err)
}
}
// Find all the files in dir
func findFiles(dir string) (files []string) {
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if info.Mode().IsRegular() && info.Size() > 0 {
files = append(files, path)
}
return nil
})
sort.Strings(files)
return files
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 1 {
log.Fatalf("Require a directory as argument")
}
dir := args[0]
files := findFiles(dir)
jobs := make(chan string, *simultaneous)
var wg sync.WaitGroup
wg.Add(*simultaneous)
for i := 0; i < *simultaneous; i++ {
go func() {
defer wg.Done()
for file := range jobs {
seekTest(*seeksPerFile, file)
}
}()
}
for i := 0; i < *iterations; i++ {
i := rand.Intn(len(files))
jobs <- files[i]
//jobs <- files[i]
}
close(jobs)
wg.Wait()
}

View File

@@ -0,0 +1,15 @@
// Daemonization interface for non-Unix variants only
// +build windows
package mountlib
import (
"log"
"runtime"
)
func startBackgroundMode() bool {
log.Fatalf("background mode not supported on %s platform", runtime.GOOS)
return false
}

View File

@@ -0,0 +1,31 @@
// Daemonization interface for Unix variants only
// +build !windows
package mountlib
import (
"log"
"github.com/sevlyar/go-daemon"
)
func startBackgroundMode() bool {
cntxt := &daemon.Context{}
d, err := cntxt.Reborn()
if err != nil {
log.Fatalln(err)
}
if d != nil {
return true
}
defer func() {
if err := cntxt.Release(); err != nil {
log.Printf("error encountered while killing daemon: %v", err)
}
}()
return false
}

View File

@@ -0,0 +1,314 @@
package mountlib
import (
"io"
"log"
"os"
"runtime"
"strings"
"time"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
// Options set by command line flags
var (
DebugFUSE = false
AllowNonEmpty = false
AllowRoot = false
AllowOther = false
DefaultPermissions = false
WritebackCache = false
Daemon = false
MaxReadAhead fs.SizeSuffix = 128 * 1024
ExtraOptions []string
ExtraFlags []string
AttrTimeout = 1 * time.Second // how long the kernel caches attribute for
VolumeName string
NoAppleDouble = true // use noappledouble by default
NoAppleXattr = false // do not use noapplexattr by default
DaemonTimeout time.Duration // OSXFUSE only
)
// Check is folder is empty
func checkMountEmpty(mountpoint string) error {
fp, fpErr := os.Open(mountpoint)
if fpErr != nil {
return errors.Wrap(fpErr, "Can not open: "+mountpoint)
}
defer fs.CheckClose(fp, &fpErr)
_, fpErr = fp.Readdirnames(1)
// directory is not empty
if fpErr != io.EOF {
var e error
var errorMsg = "Directory is not empty: " + mountpoint + " If you want to mount it anyway use: --allow-non-empty option"
if fpErr == nil {
e = errors.New(errorMsg)
} else {
e = errors.Wrap(fpErr, errorMsg)
}
return e
}
return nil
}
// NewMountCommand makes a mount command with the given name and Mount function
func NewMountCommand(commandName string, Mount func(f fs.Fs, mountpoint string) error) *cobra.Command {
var commandDefintion = &cobra.Command{
Use: commandName + " remote:path /path/to/mountpoint",
Short: `Mount the remote as a mountpoint. **EXPERIMENTAL**`,
Long: `
rclone ` + commandName + ` allows Linux, FreeBSD, macOS and Windows to
mount any of Rclone's cloud storage systems as a file system with
FUSE.
This is **EXPERIMENTAL** - use with care.
First set up your remote using ` + "`rclone config`" + `. Check it works with ` + "`rclone ls`" + ` etc.
Start the mount like this
rclone ` + commandName + ` remote:path/to/files /path/to/local/mount
Or on Windows like this where X: is an unused drive letter
rclone ` + commandName + ` remote:path/to/files X:
When the program ends, either via Ctrl+C or receiving a SIGINT or SIGTERM signal,
the mount is automatically stopped.
The umount operation can fail, for example when the mountpoint is busy.
When that happens, it is the user's responsibility to stop the mount manually with
# Linux
fusermount -u /path/to/local/mount
# OS X
umount /path/to/local/mount
### Installing on Windows
To run rclone ` + commandName + ` on Windows, you will need to
download and install [WinFsp](http://www.secfs.net/winfsp/).
WinFsp is an [open source](https://github.com/billziss-gh/winfsp)
Windows File System Proxy which makes it easy to write user space file
systems for Windows. It provides a FUSE emulation layer which rclone
uses combination with
[cgofuse](https://github.com/billziss-gh/cgofuse). Both of these
packages are by Bill Zissimopoulos who was very helpful during the
implementation of rclone ` + commandName + ` for Windows.
#### Windows caveats
Note that drives created as Administrator are not visible by other
accounts (including the account that was elevated as
Administrator). So if you start a Windows drive from an Administrative
Command Prompt and then try to access the same drive from Explorer
(which does not run as Administrator), you will not be able to see the
new drive.
The easiest way around this is to start the drive from a normal
command prompt. It is also possible to start a drive from the SYSTEM
account (using [the WinFsp.Launcher
infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture))
which creates drives accessible for everyone on the system or
alternatively using [the nssm service manager](https://nssm.cc/usage).
### Limitations
Without the use of "--vfs-cache-mode" this can only write files
sequentially, it can only seek when reading. This means that many
applications won't work with their files on an rclone mount without
"--vfs-cache-mode writes" or "--vfs-cache-mode full". See the [File
Caching](#file-caching) section for more info.
The bucket based remotes (eg Swift, S3, Google Compute Storage, B2,
Hubic) won't work from the root - you will need to specify a bucket,
or a path within the bucket. So ` + "`swift:`" + ` won't work whereas
` + "`swift:bucket`" + ` will as will ` + "`swift:bucket/path`" + `.
None of these support the concept of directories, so empty
directories will have a tendency to disappear once they fall out of
the directory cache.
Only supported on Linux, FreeBSD, OS X and Windows at the moment.
### rclone ` + commandName + ` vs rclone sync/copy
File systems expect things to be 100% reliable, whereas cloud storage
systems are a long way from 100% reliable. The rclone sync/copy
commands cope with this with lots of retries. However rclone ` + commandName + `
can't use retries in the same way without making local copies of the
uploads. Look at the **EXPERIMENTAL** [file caching](#file-caching)
for solutions to make ` + commandName + ` mount more reliable.
### Attribute caching
You can use the flag --attr-timeout to set the time the kernel caches
the attributes (size, modification time etc) for directory entries.
The default is "1s" which caches files just long enough to avoid
too many callbacks to rclone from the kernel.
In theory 0s should be the correct value for filesystems which can
change outside the control of the kernel. However this causes quite a
few problems such as
[rclone using too much memory](https://github.com/ncw/rclone/issues/2157),
[rclone not serving files to samba](https://forum.rclone.org/t/rclone-1-39-vs-1-40-mount-issue/5112)
and [excessive time listing directories](https://github.com/ncw/rclone/issues/2095#issuecomment-371141147).
The kernel can cache the info about a file for the time given by
"--attr-timeout". You may see corruption if the remote file changes
length during this window. It will show up as either a truncated file
or a file with garbage on the end. With "--attr-timeout 1s" this is
very unlikely but not impossible. The higher you set "--attr-timeout"
the more likely it is. The default setting of "1s" is the lowest
setting which mitigates the problems above.
If you set it higher ('10s' or '1m' say) then the kernel will call
back to rclone less often making it more efficient, however there is
more chance of the corruption issue above.
If files don't change on the remote outside of the control of rclone
then there is no chance of corruption.
This is the same as setting the attr_timeout option in mount.fuse.
### Filters
Note that all the rclone filters can be used to select a subset of the
files to be visible in the mount.
### systemd
When running rclone ` + commandName + ` as a systemd service, it is possible
to use Type=notify. In this case the service will enter the started state
after the mountpoint has been successfully set up.
Units having the rclone ` + commandName + ` service specified as a requirement
will see all files and folders immediately in this mode.
### chunked reading ###
--vfs-read-chunk-size will enable reading the source objects in parts.
This can reduce the used download quota for some remotes by requesting only chunks
from the remote that are actually read at the cost of an increased number of requests.
When --vfs-read-chunk-size-limit is also specified and greater than --vfs-read-chunk-size,
the chunk size for each open file will get doubled for each chunk read, until the
specified value is reached. A value of -1 will disable the limit and the chunk size will
grow indefinitely.
With --vfs-read-chunk-size 100M and --vfs-read-chunk-size-limit 0 the following
parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
When --vfs-read-chunk-size-limit 500M is specified, the result would be
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
be copied to the vfs cache before opening with --vfs-cache-mode full.
` + vfs.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
if Daemon {
config.PassConfigKeyForDaemonization = true
}
fdst := cmd.NewFsDir(args)
// Show stats if the user has specifically requested them
if cmd.ShowStats() {
stopStats := cmd.StartStats()
defer close(stopStats)
}
// Skip checkMountEmpty if --allow-non-empty flag is used or if
// the Operating System is Windows
if !AllowNonEmpty && runtime.GOOS != "windows" {
err := checkMountEmpty(args[1])
if err != nil {
log.Fatalf("Fatal error: %v", err)
}
}
// Work out the volume name, removing special
// characters from it if necessary
if VolumeName == "" {
VolumeName = fdst.Name() + ":" + fdst.Root()
}
VolumeName = strings.Replace(VolumeName, ":", " ", -1)
VolumeName = strings.Replace(VolumeName, "/", " ", -1)
VolumeName = strings.TrimSpace(VolumeName)
// Start background task if --background is specified
if Daemon {
daemonized := startBackgroundMode()
if daemonized {
return
}
}
err := Mount(fdst, args[1])
if err != nil {
log.Fatalf("Fatal error: %v", err)
}
},
}
// Register the command
cmd.Root.AddCommand(commandDefintion)
// Add flags
flagSet := commandDefintion.Flags()
flags.BoolVarP(flagSet, &DebugFUSE, "debug-fuse", "", DebugFUSE, "Debug the FUSE internals - needs -v.")
// mount options
flags.BoolVarP(flagSet, &AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory.")
flags.BoolVarP(flagSet, &AllowRoot, "allow-root", "", AllowRoot, "Allow access to root user.")
flags.BoolVarP(flagSet, &AllowOther, "allow-other", "", AllowOther, "Allow access to other users.")
flags.BoolVarP(flagSet, &DefaultPermissions, "default-permissions", "", DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
flags.BoolVarP(flagSet, &WritebackCache, "write-back-cache", "", WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
flags.FVarP(flagSet, &MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
flags.DurationVarP(flagSet, &AttrTimeout, "attr-timeout", "", AttrTimeout, "Time for which file/directory attributes are cached.")
flags.StringArrayVarP(flagSet, &ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.")
flags.StringArrayVarP(flagSet, &ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
flags.BoolVarP(flagSet, &Daemon, "daemon", "", Daemon, "Run mount as a daemon (background mode).")
flags.StringVarP(flagSet, &VolumeName, "volname", "", VolumeName, "Set the volume name (not supported by all OSes).")
flags.DurationVarP(flagSet, &DaemonTimeout, "daemon-timeout", "", DaemonTimeout, "Time limit for rclone to respond to kernel (not supported by all OSes).")
if runtime.GOOS == "darwin" {
flags.BoolVarP(flagSet, &NoAppleDouble, "noappledouble", "", NoAppleDouble, "Sets the OSXFUSE option noappledouble.")
flags.BoolVarP(flagSet, &NoAppleXattr, "noapplexattr", "", NoAppleXattr, "Sets the OSXFUSE option noapplexattr.")
}
// Add in the generic flags
vfsflags.AddFlags(flagSet)
return commandDefintion
}
// ClipBlocks clips the blocks pointed to to the OS max
func ClipBlocks(b *uint64) {
var max uint64
switch runtime.GOOS {
case "windows":
max = (1 << 43) - 1
case "darwin":
// OSX FUSE only supports 32 bit number of blocks
// https://github.com/osxfuse/osxfuse/issues/396
max = (1 << 32) - 1
default:
// no clipping
return
}
if *b > max {
*b = max
}
}

View File

@@ -0,0 +1,228 @@
package mounttest
import (
"os"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestDirLs checks out listing
func TestDirLs(t *testing.T) {
run.skipIfNoFUSE(t)
run.checkDir(t, "")
run.mkdir(t, "a directory")
run.createFile(t, "a file", "hello")
run.checkDir(t, "a directory/|a file 5")
run.rmdir(t, "a directory")
run.rm(t, "a file")
run.checkDir(t, "")
}
// TestDirCreateAndRemoveDir tests creating and removing a directory
func TestDirCreateAndRemoveDir(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
run.mkdir(t, "dir/subdir")
run.checkDir(t, "dir/|dir/subdir/")
// Check we can't delete a directory with stuff in
err := os.Remove(run.path("dir"))
assert.Error(t, err, "file exists")
// Now delete subdir then dir - should produce no errors
run.rmdir(t, "dir/subdir")
run.checkDir(t, "dir/")
run.rmdir(t, "dir")
run.checkDir(t, "")
}
// TestDirCreateAndRemoveFile tests creating and removing a file
func TestDirCreateAndRemoveFile(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
run.createFile(t, "dir/file", "potato")
run.checkDir(t, "dir/|dir/file 6")
// Check we can't delete a directory with stuff in
err := os.Remove(run.path("dir"))
assert.Error(t, err, "file exists")
// Now delete file
run.rm(t, "dir/file")
run.checkDir(t, "dir/")
run.rmdir(t, "dir")
run.checkDir(t, "")
}
// TestDirRenameFile tests renaming a file
func TestDirRenameFile(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
run.createFile(t, "file", "potato")
run.checkDir(t, "dir/|file 6")
err := os.Rename(run.path("file"), run.path("file2"))
require.NoError(t, err)
run.checkDir(t, "dir/|file2 6")
data := run.readFile(t, "file2")
assert.Equal(t, "potato", data)
err = os.Rename(run.path("file2"), run.path("dir/file3"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/file3 6")
data = run.readFile(t, "dir/file3")
require.NoError(t, err)
assert.Equal(t, "potato", data)
run.rm(t, "dir/file3")
run.rmdir(t, "dir")
run.checkDir(t, "")
}
// TestDirRenameEmptyDir tests renaming and empty directory
func TestDirRenameEmptyDir(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
run.mkdir(t, "dir1")
run.checkDir(t, "dir/|dir1/")
err := os.Rename(run.path("dir1"), run.path("dir/dir2"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/dir2/")
err = os.Rename(run.path("dir/dir2"), run.path("dir/dir3"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/dir3/")
run.rmdir(t, "dir/dir3")
run.rmdir(t, "dir")
run.checkDir(t, "")
}
// TestDirRenameFullDir tests renaming a full directory
func TestDirRenameFullDir(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
run.mkdir(t, "dir1")
run.createFile(t, "dir1/potato.txt", "maris piper")
run.checkDir(t, "dir/|dir1/|dir1/potato.txt 11")
err := os.Rename(run.path("dir1"), run.path("dir/dir2"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/dir2/|dir/dir2/potato.txt 11")
err = os.Rename(run.path("dir/dir2"), run.path("dir/dir3"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/dir3/|dir/dir3/potato.txt 11")
run.rm(t, "dir/dir3/potato.txt")
run.rmdir(t, "dir/dir3")
run.rmdir(t, "dir")
run.checkDir(t, "")
}
// TestDirModTime tests mod times
func TestDirModTime(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
err := os.Chtimes(run.path("dir"), mtime, mtime)
require.NoError(t, err)
info, err := os.Stat(run.path("dir"))
require.NoError(t, err)
// avoid errors because of timezone differences
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
run.rmdir(t, "dir")
}
// TestDirCacheFlush tests fluching the dir cache
func TestDirCacheFlush(t *testing.T) {
run.skipIfNoFUSE(t)
run.checkDir(t, "")
run.mkdir(t, "dir")
run.mkdir(t, "otherdir")
run.createFile(t, "dir/file", "1")
run.createFile(t, "otherdir/file", "1")
dm := newDirMap("otherdir/|otherdir/file 1|dir/|dir/file 1")
localDm := make(dirMap)
run.readLocal(t, localDm, "")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
err := run.fremote.Mkdir("dir/subdir")
require.NoError(t, err)
root, err := run.vfs.Root()
require.NoError(t, err)
// expect newly created "subdir" on remote to not show up
root.ForgetPath("otherdir", fs.EntryDirectory)
run.readLocal(t, localDm, "")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
root.ForgetPath("dir", fs.EntryDirectory)
dm = newDirMap("otherdir/|otherdir/file 1|dir/|dir/file 1|dir/subdir/")
run.readLocal(t, localDm, "")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
run.rm(t, "otherdir/file")
run.rmdir(t, "otherdir")
run.rm(t, "dir/file")
run.rmdir(t, "dir/subdir")
run.rmdir(t, "dir")
run.checkDir(t, "")
}
// TestDirCacheFlushOnDirRename tests flushing the dir cache on rename
func TestDirCacheFlushOnDirRename(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
run.createFile(t, "dir/file", "1")
dm := newDirMap("dir/|dir/file 1")
localDm := make(dirMap)
run.readLocal(t, localDm, "")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
// expect remotely created directory to not show up
err := run.fremote.Mkdir("dir/subdir")
require.NoError(t, err)
run.readLocal(t, localDm, "")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
err = os.Rename(run.path("dir"), run.path("rid"))
require.NoError(t, err)
dm = newDirMap("rid/|rid/subdir/|rid/file 1")
localDm = make(dirMap)
run.readLocal(t, localDm, "")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
run.rm(t, "rid/file")
run.rmdir(t, "rid/subdir")
run.rmdir(t, "rid")
run.checkDir(t, "")
}

View File

@@ -0,0 +1,59 @@
package mounttest
import (
"os"
"runtime"
"testing"
"github.com/stretchr/testify/require"
)
// TestTouchAndDelete checks that writing a zero byte file and immediately
// deleting it is not racy. See https://github.com/ncw/rclone/issues/1181
func TestTouchAndDelete(t *testing.T) {
run.skipIfNoFUSE(t)
run.checkDir(t, "")
run.createFile(t, "touched", "")
run.rm(t, "touched")
run.checkDir(t, "")
}
// TestRenameOpenHandle checks that a file with open writers is successfully
// renamed after all writers close. See https://github.com/ncw/rclone/issues/2130
func TestRenameOpenHandle(t *testing.T) {
run.skipIfNoFUSE(t)
if runtime.GOOS == "windows" {
t.Skip("Skipping test on Windows")
}
run.checkDir(t, "")
// create file
example := []byte("Some Data")
path := run.path("rename")
file, err := osCreate(path)
require.NoError(t, err)
// write some data
_, err = file.Write(example)
require.NoError(t, err)
err = file.Sync()
require.NoError(t, err)
// attempt to rename open file
err = os.Rename(path, path+"bla")
require.NoError(t, err)
// close open writers to allow rename on remote to go through
err = file.Close()
require.NoError(t, err)
// verify file was renamed properly
run.checkDir(t, "renamebla 9")
// cleanup
run.rm(t, "renamebla")
run.checkDir(t, "")
}

View File

@@ -0,0 +1,68 @@
package mounttest
import (
"os"
"runtime"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestFileModTime tests mod times on files
func TestFileModTime(t *testing.T) {
run.skipIfNoFUSE(t)
run.createFile(t, "file", "123")
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
err := os.Chtimes(run.path("file"), mtime, mtime)
require.NoError(t, err)
info, err := os.Stat(run.path("file"))
require.NoError(t, err)
// avoid errors because of timezone differences
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
run.rm(t, "file")
}
// os.Create without opening for write too
func osCreate(name string) (*os.File, error) {
return os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
}
// TestFileModTimeWithOpenWriters tests mod time on open files
func TestFileModTimeWithOpenWriters(t *testing.T) {
run.skipIfNoFUSE(t)
if runtime.GOOS == "windows" {
t.Skip("Skipping test on Windows")
}
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
filepath := run.path("cp-archive-test")
f, err := osCreate(filepath)
require.NoError(t, err)
_, err = f.Write([]byte{104, 105})
require.NoError(t, err)
err = os.Chtimes(filepath, mtime, mtime)
require.NoError(t, err)
err = f.Close()
require.NoError(t, err)
run.waitForWriters()
info, err := os.Stat(filepath)
require.NoError(t, err)
// avoid errors because of timezone differences
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
run.rm(t, "cp-archive-test")
}

View File

@@ -0,0 +1,409 @@
// Test suite for rclonefs
package mounttest
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"runtime"
"strings"
"testing"
"time"
_ "github.com/ncw/rclone/backend/all" // import all the backends
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/vfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type (
// UnmountFn is called to unmount the file system
UnmountFn func() error
// MountFn is called to mount the file system
MountFn func(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error)
)
var (
mountFn MountFn
)
// RunTests runs all the tests against all the VFS cache modes
func RunTests(t *testing.T, fn MountFn) {
mountFn = fn
flag.Parse()
cacheModes := []vfs.CacheMode{
vfs.CacheModeOff,
vfs.CacheModeMinimal,
vfs.CacheModeWrites,
vfs.CacheModeFull,
}
run = newRun()
for _, cacheMode := range cacheModes {
run.cacheMode(cacheMode)
log.Printf("Starting test run with cache mode %v", cacheMode)
ok := t.Run(fmt.Sprintf("CacheMode=%v", cacheMode), func(t *testing.T) {
t.Run("TestTouchAndDelete", TestTouchAndDelete)
t.Run("TestRenameOpenHandle", TestRenameOpenHandle)
t.Run("TestDirLs", TestDirLs)
t.Run("TestDirCreateAndRemoveDir", TestDirCreateAndRemoveDir)
t.Run("TestDirCreateAndRemoveFile", TestDirCreateAndRemoveFile)
t.Run("TestDirRenameFile", TestDirRenameFile)
t.Run("TestDirRenameEmptyDir", TestDirRenameEmptyDir)
t.Run("TestDirRenameFullDir", TestDirRenameFullDir)
t.Run("TestDirModTime", TestDirModTime)
t.Run("TestDirCacheFlush", TestDirCacheFlush)
t.Run("TestDirCacheFlushOnDirRename", TestDirCacheFlushOnDirRename)
t.Run("TestFileModTime", TestFileModTime)
t.Run("TestFileModTimeWithOpenWriters", TestFileModTimeWithOpenWriters)
t.Run("TestMount", TestMount)
t.Run("TestRoot", TestRoot)
t.Run("TestReadByByte", TestReadByByte)
t.Run("TestReadChecksum", TestReadChecksum)
t.Run("TestReadFileDoubleClose", TestReadFileDoubleClose)
t.Run("TestReadSeek", TestReadSeek)
t.Run("TestWriteFileNoWrite", TestWriteFileNoWrite)
t.Run("TestWriteFileWrite", TestWriteFileWrite)
t.Run("TestWriteFileOverwrite", TestWriteFileOverwrite)
t.Run("TestWriteFileDoubleClose", TestWriteFileDoubleClose)
t.Run("TestWriteFileFsync", TestWriteFileFsync)
})
log.Printf("Finished test run with cache mode %v (ok=%v)", cacheMode, ok)
if !ok {
break
}
}
run.Finalise()
}
// Run holds the remotes for a test run
type Run struct {
vfs *vfs.VFS
mountPath string
fremote fs.Fs
fremoteName string
cleanRemote func()
umountResult <-chan error
umountFn UnmountFn
skip bool
}
// run holds the master Run data
var run *Run
// newRun initialise the remote mount for testing and returns a run
// object.
//
// r.fremote is an empty remote Fs
//
// Finalise() will tidy them away when done.
func newRun() *Run {
r := &Run{
umountResult: make(chan error, 1),
}
fstest.Initialise()
var err error
r.fremote, r.fremoteName, r.cleanRemote, err = fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
if err != nil {
log.Fatalf("Failed to open remote %q: %v", *fstest.RemoteName, err)
}
err = r.fremote.Mkdir("")
if err != nil {
log.Fatalf("Failed to open mkdir %q: %v", *fstest.RemoteName, err)
}
r.mountPath = findMountPath()
// Mount it up
r.mount()
return r
}
func findMountPath() string {
if runtime.GOOS != "windows" {
mountPath, err := ioutil.TempDir("", "rclonefs-mount")
if err != nil {
log.Fatalf("Failed to create mount dir: %v", err)
}
return mountPath
}
// Find a free drive letter
drive := ""
for letter := 'E'; letter <= 'Z'; letter++ {
drive = string(letter) + ":"
_, err := os.Stat(drive + "\\")
if os.IsNotExist(err) {
goto found
}
}
log.Fatalf("Couldn't find free drive letter for test")
found:
return drive
}
func (r *Run) mount() {
log.Printf("mount %q %q", r.fremote, r.mountPath)
var err error
r.vfs, r.umountResult, r.umountFn, err = mountFn(r.fremote, r.mountPath)
if err != nil {
log.Printf("mount FAILED: %v", err)
r.skip = true
} else {
log.Printf("mount OK")
}
}
func (r *Run) umount() {
if r.skip {
log.Printf("FUSE not found so skipping umount")
return
}
/*
log.Printf("Calling fusermount -u %q", r.mountPath)
err := exec.Command("fusermount", "-u", r.mountPath).Run()
if err != nil {
log.Printf("fusermount failed: %v", err)
}
*/
log.Printf("Unmounting %q", r.mountPath)
err := r.umountFn()
if err != nil {
log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
err = r.umountFn()
}
if err != nil {
log.Fatalf("signal to umount failed: %v", err)
}
log.Printf("Waiting for umount")
err = <-r.umountResult
if err != nil {
log.Fatalf("umount failed: %v", err)
}
// Cleanup the VFS cache - umount has called Shutdown
err = r.vfs.CleanUp()
if err != nil {
log.Printf("Failed to cleanup the VFS cache: %v", err)
}
}
// cacheMode flushes the VFS and changes the CacheMode
func (r *Run) cacheMode(cacheMode vfs.CacheMode) {
if r.skip {
log.Printf("FUSE not found so skipping cacheMode")
return
}
// Wait for writers to finish
r.vfs.WaitForWriters(30 * time.Second)
// Empty and remake the remote
r.cleanRemote()
err := r.fremote.Mkdir("")
if err != nil {
log.Fatalf("Failed to open mkdir %q: %v", *fstest.RemoteName, err)
}
// Empty the cache
err = r.vfs.CleanUp()
if err != nil {
log.Printf("Failed to cleanup the VFS cache: %v", err)
}
// Reset the cache mode
r.vfs.SetCacheMode(cacheMode)
// Flush the directory cache
r.vfs.FlushDirCache()
}
func (r *Run) skipIfNoFUSE(t *testing.T) {
if r.skip {
t.Skip("FUSE not found so skipping test")
}
}
// Finalise cleans the remote and unmounts
func (r *Run) Finalise() {
r.umount()
r.cleanRemote()
err := os.RemoveAll(r.mountPath)
if err != nil {
log.Printf("Failed to clean mountPath %q: %v", r.mountPath, err)
}
}
// path returns an OS local path for filepath
func (r *Run) path(filePath string) string {
// return windows drive letter root as E:\
if filePath == "" && runtime.GOOS == "windows" {
return run.mountPath + `\`
}
return filepath.Join(run.mountPath, filepath.FromSlash(filePath))
}
type dirMap map[string]struct{}
// Create a dirMap from a string
func newDirMap(dirString string) (dm dirMap) {
dm = make(dirMap)
for _, entry := range strings.Split(dirString, "|") {
if entry != "" {
dm[entry] = struct{}{}
}
}
return dm
}
// Returns a dirmap with only the files in
func (dm dirMap) filesOnly() dirMap {
newDm := make(dirMap)
for name := range dm {
if !strings.HasSuffix(name, "/") {
newDm[name] = struct{}{}
}
}
return newDm
}
// reads the local tree into dir
func (r *Run) readLocal(t *testing.T, dir dirMap, filePath string) {
realPath := r.path(filePath)
files, err := ioutil.ReadDir(realPath)
require.NoError(t, err)
for _, fi := range files {
name := path.Join(filePath, fi.Name())
if fi.IsDir() {
dir[name+"/"] = struct{}{}
r.readLocal(t, dir, name)
assert.Equal(t, run.vfs.Opt.DirPerms&os.ModePerm, fi.Mode().Perm())
} else {
dir[fmt.Sprintf("%s %d", name, fi.Size())] = struct{}{}
assert.Equal(t, run.vfs.Opt.FilePerms&os.ModePerm, fi.Mode().Perm())
}
}
}
// reads the remote tree into dir
func (r *Run) readRemote(t *testing.T, dir dirMap, filepath string) {
objs, dirs, err := walk.GetAll(r.fremote, filepath, true, 1)
if err == fs.ErrorDirNotFound {
return
}
require.NoError(t, err)
for _, obj := range objs {
dir[fmt.Sprintf("%s %d", obj.Remote(), obj.Size())] = struct{}{}
}
for _, d := range dirs {
name := d.Remote()
dir[name+"/"] = struct{}{}
r.readRemote(t, dir, name)
}
}
// checkDir checks the local and remote against the string passed in
func (r *Run) checkDir(t *testing.T, dirString string) {
var retries = *fstest.ListRetries
sleep := time.Second / 5
var remoteOK, fuseOK bool
var dm, localDm, remoteDm dirMap
for i := 1; i <= retries; i++ {
dm = newDirMap(dirString)
localDm = make(dirMap)
r.readLocal(t, localDm, "")
remoteDm = make(dirMap)
r.readRemote(t, remoteDm, "")
// Ignore directories for remote compare
remoteOK = reflect.DeepEqual(dm.filesOnly(), remoteDm.filesOnly())
fuseOK = reflect.DeepEqual(dm, localDm)
if remoteOK && fuseOK {
return
}
sleep *= 2
t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries)
time.Sleep(sleep)
}
assert.Equal(t, dm.filesOnly(), remoteDm.filesOnly(), "expected vs remote")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
}
// wait for any files being written to be released by fuse
func (r *Run) waitForWriters() {
run.vfs.WaitForWriters(10 * time.Second)
}
func (r *Run) createFile(t *testing.T, filepath string, contents string) {
filepath = r.path(filepath)
err := ioutil.WriteFile(filepath, []byte(contents), 0600)
require.NoError(t, err)
r.waitForWriters()
}
func (r *Run) readFile(t *testing.T, filepath string) string {
filepath = r.path(filepath)
result, err := ioutil.ReadFile(filepath)
require.NoError(t, err)
time.Sleep(100 * time.Millisecond) // FIXME wait for Release
return string(result)
}
func (r *Run) mkdir(t *testing.T, filepath string) {
filepath = r.path(filepath)
err := os.Mkdir(filepath, 0700)
require.NoError(t, err)
}
func (r *Run) rm(t *testing.T, filepath string) {
filepath = r.path(filepath)
err := os.Remove(filepath)
require.NoError(t, err)
// Wait for file to disappear from listing
for i := 0; i < 100; i++ {
_, err := os.Stat(filepath)
if os.IsNotExist(err) {
return
}
time.Sleep(100 * time.Millisecond)
}
assert.Fail(t, "failed to delete file", filepath)
}
func (r *Run) rmdir(t *testing.T, filepath string) {
filepath = r.path(filepath)
err := os.Remove(filepath)
require.NoError(t, err)
}
// TestMount checks that the Fs is mounted by seeing if the mountpoint
// is in the mount output
func TestMount(t *testing.T) {
run.skipIfNoFUSE(t)
if runtime.GOOS == "windows" {
t.Skip("not running on windows")
}
out, err := exec.Command("mount").Output()
require.NoError(t, err)
assert.Contains(t, string(out), run.mountPath)
}
// TestRoot checks root directory is present and correct
func TestRoot(t *testing.T) {
run.skipIfNoFUSE(t)
fi, err := os.Lstat(run.mountPath)
require.NoError(t, err)
assert.True(t, fi.IsDir())
assert.Equal(t, run.vfs.Opt.DirPerms&os.ModePerm, fi.Mode().Perm())
}

View File

@@ -0,0 +1,127 @@
package mounttest
import (
"io"
"io/ioutil"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
// TestReadByByte reads by byte including don't read any bytes
func TestReadByByte(t *testing.T) {
run.skipIfNoFUSE(t)
var data = []byte("hellohello")
run.createFile(t, "testfile", string(data))
run.checkDir(t, "testfile 10")
for i := 0; i < len(data); i++ {
fd, err := os.Open(run.path("testfile"))
assert.NoError(t, err)
for j := 0; j < i; j++ {
buf := make([]byte, 1)
n, err := io.ReadFull(fd, buf)
assert.NoError(t, err)
assert.Equal(t, 1, n)
assert.Equal(t, buf[0], data[j])
}
err = fd.Close()
assert.NoError(t, err)
}
time.Sleep(100 * time.Millisecond) // FIXME wait for Release
run.rm(t, "testfile")
}
// TestReadChecksum checks the checksum reading is working
func TestReadChecksum(t *testing.T) {
run.skipIfNoFUSE(t)
// create file big enough so we exceed any single FUSE read
// request
b := make([]rune, 3*128*1024)
for i := range b {
b[i] = 'r'
}
run.createFile(t, "bigfile", string(b))
// The hash comparison would fail in Flush, if we did not
// ensure we read the whole file
fd, err := os.Open(run.path("bigfile"))
assert.NoError(t, err)
buf := make([]byte, 10)
_, err = io.ReadFull(fd, buf)
assert.NoError(t, err)
err = fd.Close()
assert.NoError(t, err)
// The hash comparison would fail, because we only read parts
// of the file
fd, err = os.Open(run.path("bigfile"))
assert.NoError(t, err)
// read at start
_, err = io.ReadFull(fd, buf)
assert.NoError(t, err)
// read at end
_, err = fd.Seek(int64(len(b)-len(buf)), io.SeekStart)
assert.NoError(t, err)
_, err = io.ReadFull(fd, buf)
assert.NoError(t, err)
// ensure we don't compare hashes
err = fd.Close()
assert.NoError(t, err)
run.rm(t, "bigfile")
}
// TestReadSeek test seeking
func TestReadSeek(t *testing.T) {
run.skipIfNoFUSE(t)
var data = []byte("helloHELLO")
run.createFile(t, "testfile", string(data))
run.checkDir(t, "testfile 10")
fd, err := os.Open(run.path("testfile"))
assert.NoError(t, err)
// Seek to half way
_, err = fd.Seek(5, io.SeekStart)
assert.NoError(t, err)
buf, err := ioutil.ReadAll(fd)
assert.NoError(t, err)
assert.Equal(t, buf, []byte("HELLO"))
// Test seeking to the end
_, err = fd.Seek(10, io.SeekStart)
assert.NoError(t, err)
buf, err = ioutil.ReadAll(fd)
assert.NoError(t, err)
assert.Equal(t, buf, []byte(""))
// Test seeking beyond the end
_, err = fd.Seek(1000000, io.SeekStart)
assert.NoError(t, err)
buf, err = ioutil.ReadAll(fd)
assert.NoError(t, err)
assert.Equal(t, buf, []byte(""))
// Now back to the start
_, err = fd.Seek(0, io.SeekStart)
assert.NoError(t, err)
buf, err = ioutil.ReadAll(fd)
assert.NoError(t, err)
assert.Equal(t, buf, []byte("helloHELLO"))
err = fd.Close()
assert.NoError(t, err)
run.rm(t, "testfile")
}

View File

@@ -0,0 +1,13 @@
// +build !linux,!darwin,!freebsd
package mounttest
import (
"runtime"
"testing"
)
// TestReadFileDoubleClose tests double close on read
func TestReadFileDoubleClose(t *testing.T) {
t.Skip("not supported on " + runtime.GOOS)
}

View File

@@ -0,0 +1,53 @@
// +build linux darwin freebsd
package mounttest
import (
"os"
"syscall"
"testing"
"github.com/stretchr/testify/assert"
)
// TestReadFileDoubleClose tests double close on read
func TestReadFileDoubleClose(t *testing.T) {
run.skipIfNoFUSE(t)
run.createFile(t, "testdoubleclose", "hello")
in, err := os.Open(run.path("testdoubleclose"))
assert.NoError(t, err)
fd := in.Fd()
fd1, err := syscall.Dup(int(fd))
assert.NoError(t, err)
fd2, err := syscall.Dup(int(fd))
assert.NoError(t, err)
// close one of the dups - should produce no error
err = syscall.Close(fd1)
assert.NoError(t, err)
// read from the file
buf := make([]byte, 1)
_, err = in.Read(buf)
assert.NoError(t, err)
// close it
err = in.Close()
assert.NoError(t, err)
// read from the other dup - should produce no error as this
// file is now buffered
n, err := syscall.Read(fd2, buf)
assert.NoError(t, err)
assert.Equal(t, 1, n)
// close the dup - should not produce an error
err = syscall.Close(fd2)
assert.NoError(t, err, "input/output error")
run.rm(t, "testdoubleclose")
}

View File

@@ -0,0 +1,84 @@
package mounttest
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestWriteFileNoWrite tests writing a file with no write()'s to it
func TestWriteFileNoWrite(t *testing.T) {
run.skipIfNoFUSE(t)
fd, err := osCreate(run.path("testnowrite"))
assert.NoError(t, err)
err = fd.Close()
assert.NoError(t, err)
run.waitForWriters()
run.checkDir(t, "testnowrite 0")
run.rm(t, "testnowrite")
}
// FIXMETestWriteOpenFileInDirListing tests open file in directory listing
func FIXMETestWriteOpenFileInDirListing(t *testing.T) {
run.skipIfNoFUSE(t)
fd, err := osCreate(run.path("testnowrite"))
assert.NoError(t, err)
run.checkDir(t, "testnowrite 0")
err = fd.Close()
assert.NoError(t, err)
run.waitForWriters()
run.rm(t, "testnowrite")
}
// TestWriteFileWrite tests writing a file and reading it back
func TestWriteFileWrite(t *testing.T) {
run.skipIfNoFUSE(t)
run.createFile(t, "testwrite", "data")
run.checkDir(t, "testwrite 4")
contents := run.readFile(t, "testwrite")
assert.Equal(t, "data", contents)
run.rm(t, "testwrite")
}
// TestWriteFileOverwrite tests overwriting a file
func TestWriteFileOverwrite(t *testing.T) {
run.skipIfNoFUSE(t)
run.createFile(t, "testwrite", "data")
run.checkDir(t, "testwrite 4")
run.createFile(t, "testwrite", "potato")
contents := run.readFile(t, "testwrite")
assert.Equal(t, "potato", contents)
run.rm(t, "testwrite")
}
// TestWriteFileFsync tests Fsync
//
// NB the code for this is in file.go rather than write.go
func TestWriteFileFsync(t *testing.T) {
run.skipIfNoFUSE(t)
filepath := run.path("to be synced")
fd, err := osCreate(filepath)
require.NoError(t, err)
_, err = fd.Write([]byte("hello"))
require.NoError(t, err)
err = fd.Sync()
require.NoError(t, err)
err = fd.Close()
require.NoError(t, err)
run.waitForWriters()
run.rm(t, "to be synced")
}

View File

@@ -0,0 +1,13 @@
// +build !linux,!darwin,!freebsd
package mounttest
import (
"runtime"
"testing"
)
// TestWriteFileDoubleClose tests double close on write
func TestWriteFileDoubleClose(t *testing.T) {
t.Skip("not supported on " + runtime.GOOS)
}

View File

@@ -0,0 +1,54 @@
// +build linux darwin freebsd
package mounttest
import (
"runtime"
"syscall"
"testing"
"github.com/stretchr/testify/assert"
)
// TestWriteFileDoubleClose tests double close on write
func TestWriteFileDoubleClose(t *testing.T) {
run.skipIfNoFUSE(t)
if runtime.GOOS == "darwin" {
t.Skip("Skipping test on OSX")
}
out, err := osCreate(run.path("testdoubleclose"))
assert.NoError(t, err)
fd := out.Fd()
fd1, err := syscall.Dup(int(fd))
assert.NoError(t, err)
fd2, err := syscall.Dup(int(fd))
assert.NoError(t, err)
// close one of the dups - should produce no error
err = syscall.Close(fd1)
assert.NoError(t, err)
// write to the file
buf := []byte("hello")
n, err := out.Write(buf)
assert.NoError(t, err)
assert.Equal(t, 5, n)
// close it
err = out.Close()
assert.NoError(t, err)
// write to the other dup - should produce an error
_, err = syscall.Write(fd2, buf)
assert.Error(t, err, "input/output error")
// close the dup - should not produce an error
err = syscall.Close(fd2)
assert.NoError(t, err)
run.waitForWriters()
run.rm(t, "testdoubleclose")
}

53
.rclone_repo/cmd/move/move.go Executable file
View File

@@ -0,0 +1,53 @@
package move
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/sync"
"github.com/spf13/cobra"
)
// Globals
var (
deleteEmptySrcDirs = false
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move")
}
var commandDefintion = &cobra.Command{
Use: "move source:path dest:path",
Short: `Move files from source to dest.`,
Long: `
Moves the contents of the source directory to the destination
directory. Rclone will error if the source and destination overlap and
the remote does not support a server side directory move operation.
If no filters are in use and if possible this will server side move
` + "`source:path`" + ` into ` + "`dest:path`" + `. After this ` + "`source:path`" + ` will no
longer longer exist.
Otherwise for each file in ` + "`source:path`" + ` selected by the filters (if
any) this will move it into ` + "`dest:path`" + `. If possible a server side
move will be used, otherwise it will copy it (server side if possible)
into ` + "`dest:path`" + ` then delete the original (if no errors on copy) in
` + "`source:path`" + `.
If you want to delete empty source directories after move, use the --delete-empty-src-dirs flag.
**Important**: Since this can cause data loss, test first with the
--dry-run flag.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
cmd.Run(true, true, command, func() error {
if srcFileName == "" {
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs)
}
return operations.MoveFile(fdst, fsrc, srcFileName, srcFileName)
})
},
}

View File

@@ -0,0 +1,58 @@
package moveto
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/sync"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "moveto source:path dest:path",
Short: `Move file or directory from source to dest.`,
Long: `
If source:path is a file or directory then it moves it to a file or
directory named dest:path.
This can be used to rename files or upload single files to other than
their existing name. If the source is a directory then it acts exacty
like the move command.
So
rclone moveto src dst
where src and dst are rclone paths, either remote:path or
/path/to/local or C:\windows\path\if\on\windows.
This will:
if src is file
move it to dst, overwriting an existing file if it exists
if src is directory
move it to dst, overwriting existing files if they exist
see move command for full details
This doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. src will be deleted on successful
transfer.
**Important**: Since this can cause data loss, test first with the
--dry-run flag.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
cmd.Run(true, true, command, func() error {
if srcFileName == "" {
return sync.MoveDir(fdst, fsrc, false)
}
return operations.MoveFile(fdst, fsrc, dstFileName, srcFileName)
})
},
}

556
.rclone_repo/cmd/ncdu/ncdu.go Executable file
View File

@@ -0,0 +1,556 @@
// Package ncdu implements a text based user interface for exploring a remote
//+build !plan9,!solaris
package ncdu
import (
"fmt"
"path"
"sort"
"strings"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/ncdu/scan"
"github.com/ncw/rclone/fs"
termbox "github.com/nsf/termbox-go"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "ncdu remote:path",
Short: `Explore a remote with a text based user interface.`,
Long: `
This displays a text based user interface allowing the navigation of a
remote. It is most useful for answering the question - "What is using
all my disk space?".
<script src="https://asciinema.org/a/157793.js" id="asciicast-157793" async></script>
To make the user interface it first scans the entire remote given and
builds an in memory representation. rclone ncdu can be used during
this scanning phase and you will see it building up the directory
structure as it goes along.
Here are the keys - press '?' to toggle the help on and off
` + strings.Join(helpText[1:], "\n ") + `
This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for
rclone remotes. It is missing lots of features at the moment, most
importantly deleting files, but is useful as it stands.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return NewUI(fsrc).Show()
})
},
}
// help text
var helpText = []string{
"rclone ncdu",
" ↑,↓ or k,j to Move",
" →,l to enter",
" ←,h to return",
" c toggle counts",
" g toggle graph",
" n,s,C sort by name,size,count",
" ^L refresh screen",
" ? to toggle help on and off",
" q/ESC/c-C to quit",
}
// UI contains the state of the user interface
type UI struct {
f fs.Fs // fs being displayed
fsName string // human name of Fs
root *scan.Dir // root directory
d *scan.Dir // current directory being displayed
path string // path of current directory
showBox bool // whether to show a box
boxText []string // text to show in box
entries fs.DirEntries // entries of current directory
sortPerm []int // order to display entries in after sorting
invSortPerm []int // inverse order
dirListHeight int // height of listing
listing bool // whether listing is in progress
showGraph bool // toggle showing graph
showCounts bool // toggle showing counts
sortByName int8 // +1 for normal, 0 for off, -1 for reverse
sortBySize int8
sortByCount int8
dirPosMap map[string]dirPos // store for directory positions
}
// Where we have got to in the directory listing
type dirPos struct {
entry int
offset int
}
// Print a string
func Print(x, y int, fg, bg termbox.Attribute, msg string) {
for _, c := range msg {
termbox.SetCell(x, y, c, fg, bg)
x++
}
}
// Printf a string
func Printf(x, y int, fg, bg termbox.Attribute, format string, args ...interface{}) {
s := fmt.Sprintf(format, args...)
Print(x, y, fg, bg, s)
}
// Line prints a string to given xmax, with given space
func Line(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, msg string) {
for _, c := range msg {
termbox.SetCell(x, y, c, fg, bg)
x++
if x >= xmax {
return
}
}
for ; x < xmax; x++ {
termbox.SetCell(x, y, spacer, fg, bg)
}
}
// Linef a string
func Linef(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, format string, args ...interface{}) {
s := fmt.Sprintf(format, args...)
Line(x, y, xmax, fg, bg, spacer, s)
}
// Box the u.boxText onto the screen
func (u *UI) Box() {
w, h := termbox.Size()
// Find dimensions of text
boxWidth := 10
for _, s := range u.boxText {
if len(s) > boxWidth && len(s) < w-4 {
boxWidth = len(s)
}
}
boxHeight := len(u.boxText)
// position
x := (w - boxWidth) / 2
y := (h - boxHeight) / 2
xmax := x + boxWidth
// draw text
fg, bg := termbox.ColorRed, termbox.ColorWhite
for i, s := range u.boxText {
Line(x, y+i, xmax, fg, bg, ' ', s)
fg = termbox.ColorBlack
}
// FIXME draw a box around
}
// find the biggest entry in the current listing
func (u *UI) biggestEntry() (biggest int64) {
if u.d == nil {
return
}
for i := range u.entries {
size, _, _, _ := u.d.AttrI(u.sortPerm[i])
if size > biggest {
biggest = size
}
}
return
}
// Draw the current screen
func (u *UI) Draw() error {
w, h := termbox.Size()
u.dirListHeight = h - 3
// Plot
err := termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
if err != nil {
return errors.Wrap(err, "failed to clear screen")
}
// Header line
Linef(0, 0, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "rclone ncdu %s - use the arrow keys to navigate, press ? for help", fs.Version)
// Directory line
Linef(0, 1, w, termbox.ColorWhite, termbox.ColorBlack, '-', "-- %s ", u.path)
// graphs
const (
graphBars = 10
graph = "########## "
)
// Directory listing
if u.d != nil {
y := 2
perBar := u.biggestEntry() / graphBars
if perBar == 0 {
perBar = 1
}
dirPos := u.dirPosMap[u.path]
for i, j := range u.sortPerm[dirPos.offset:] {
entry := u.entries[j]
n := i + dirPos.offset
if y >= h-1 {
break
}
fg := termbox.ColorWhite
bg := termbox.ColorBlack
if n == dirPos.entry {
fg, bg = bg, fg
}
size, count, isDir, readable := u.d.AttrI(u.sortPerm[n])
mark := ' '
if isDir {
mark = '/'
}
message := ""
if !readable {
message = " [not read yet]"
}
extras := ""
if u.showCounts {
if count > 0 {
extras += fmt.Sprintf("%8v ", fs.SizeSuffix(count))
} else {
extras += " "
}
}
if u.showGraph {
bars := (size + perBar/2 - 1) / perBar
// clip if necessary - only happens during startup
if bars > 10 {
bars = 10
} else if bars < 0 {
bars = 0
}
extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] "
}
Linef(0, y, w, fg, bg, ' ', "%8v %s%c%s%s", fs.SizeSuffix(size), extras, mark, path.Base(entry.Remote()), message)
y++
}
}
// Footer
if u.d == nil {
Line(0, h-1, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "Waiting for root directory...")
} else {
message := ""
if u.listing {
message = " [listing in progress]"
}
size, count := u.d.Attr()
Linef(0, h-1, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "Total usage: %v, Objects: %d%s", fs.SizeSuffix(size), count, message)
}
// Show the box on top if requred
if u.showBox {
u.Box()
}
err = termbox.Flush()
if err != nil {
return errors.Wrap(err, "failed to flush screen")
}
return nil
}
// Move the cursor this many spaces adjusting the viewport as necessary
func (u *UI) move(d int) {
if u.d == nil {
return
}
absD := d
if d < 0 {
absD = -d
}
entries := len(u.entries)
// Fetch current dirPos
dirPos := u.dirPosMap[u.path]
dirPos.entry += d
// check entry in range
if dirPos.entry < 0 {
dirPos.entry = 0
} else if dirPos.entry >= entries {
dirPos.entry = entries - 1
}
// check cursor still on screen
p := dirPos.entry - dirPos.offset // where dirPos.entry appears on the screen
if p < 0 {
dirPos.offset -= absD
} else if p >= u.dirListHeight {
dirPos.offset += absD
}
// check dirPos.offset in bounds
if entries == 0 || dirPos.offset < 0 {
dirPos.offset = 0
} else if dirPos.offset >= entries {
dirPos.offset = entries - 1
}
// write dirPos back for later
u.dirPosMap[u.path] = dirPos
}
// Sort by the configured sort method
type ncduSort struct {
sortPerm []int
entries fs.DirEntries
d *scan.Dir
u *UI
}
// Less is part of sort.Interface.
func (ds *ncduSort) Less(i, j int) bool {
isize, icount, _, _ := ds.d.AttrI(ds.sortPerm[i])
jsize, jcount, _, _ := ds.d.AttrI(ds.sortPerm[j])
iname, jname := ds.entries[ds.sortPerm[i]].Remote(), ds.entries[ds.sortPerm[j]].Remote()
switch {
case ds.u.sortByName < 0:
return iname > jname
case ds.u.sortByName > 0:
break
case ds.u.sortBySize < 0:
if isize != jsize {
return isize < jsize
}
case ds.u.sortBySize > 0:
if isize != jsize {
return isize > jsize
}
case ds.u.sortByCount < 0:
if icount != jcount {
return icount < jcount
}
case ds.u.sortByCount > 0:
if icount != jcount {
return icount > jcount
}
}
// if everything equal, sort by name
return iname < jname
}
// Swap is part of sort.Interface.
func (ds *ncduSort) Swap(i, j int) {
ds.sortPerm[i], ds.sortPerm[j] = ds.sortPerm[j], ds.sortPerm[i]
}
// Len is part of sort.Interface.
func (ds *ncduSort) Len() int {
return len(ds.sortPerm)
}
// sort the permutation map of the current directory
func (u *UI) sortCurrentDir() {
u.sortPerm = u.sortPerm[:0]
for i := range u.entries {
u.sortPerm = append(u.sortPerm, i)
}
data := ncduSort{
sortPerm: u.sortPerm,
entries: u.entries,
d: u.d,
u: u,
}
sort.Sort(&data)
if len(u.invSortPerm) < len(u.sortPerm) {
u.invSortPerm = make([]int, len(u.sortPerm))
}
for i, j := range u.sortPerm {
u.invSortPerm[j] = i
}
}
// setCurrentDir sets the current directory
func (u *UI) setCurrentDir(d *scan.Dir) {
u.d = d
u.entries = d.Entries()
u.path = path.Join(u.fsName, d.Path())
u.sortCurrentDir()
}
// enters the current entry
func (u *UI) enter() {
if u.d == nil || len(u.entries) == 0 {
return
}
dirPos := u.dirPosMap[u.path]
d, _ := u.d.GetDir(u.sortPerm[dirPos.entry])
if d == nil {
return
}
u.setCurrentDir(d)
}
// up goes up to the parent directory
func (u *UI) up() {
if u.d == nil {
return
}
parent := u.d.Parent()
if parent != nil {
u.setCurrentDir(parent)
}
}
// popupBox shows a box with the text in
func (u *UI) popupBox(text []string) {
u.boxText = text
u.showBox = true
}
// togglePopupBox shows a box with the text in
func (u *UI) togglePopupBox(text []string) {
if u.showBox {
u.showBox = false
} else {
u.popupBox(text)
}
}
// toggle the sorting for the flag passed in
func (u *UI) toggleSort(sortType *int8) {
old := *sortType
u.sortBySize = 0
u.sortByCount = 0
u.sortByName = 0
if old == 0 {
*sortType = 1
} else {
*sortType = -old
}
u.sortCurrentDir()
}
// NewUI creates a new user interface for ncdu on f
func NewUI(f fs.Fs) *UI {
return &UI{
f: f,
path: "Waiting for root...",
dirListHeight: 20, // updated in Draw
fsName: f.Name() + ":" + f.Root(),
showGraph: true,
showCounts: false,
sortByName: 0, // +1 for normal, 0 for off, -1 for reverse
sortBySize: 1,
sortByCount: 0,
dirPosMap: make(map[string]dirPos),
}
}
// Show shows the user interface
func (u *UI) Show() error {
err := termbox.Init()
if err != nil {
return errors.Wrap(err, "termbox init")
}
defer termbox.Close()
// scan the disk in the background
u.listing = true
rootChan, errChan, updated := scan.Scan(u.f)
// Poll the events into a channel
events := make(chan termbox.Event)
doneWithEvent := make(chan bool)
go func() {
for {
events <- termbox.PollEvent()
<-doneWithEvent
}
}()
// Main loop, waiting for events and channels
outer:
for {
//Reset()
err := u.Draw()
if err != nil {
return errors.Wrap(err, "draw failed")
}
var root *scan.Dir
select {
case root = <-rootChan:
u.root = root
u.setCurrentDir(root)
case err := <-errChan:
if err != nil {
return errors.Wrap(err, "ncdu directory listing")
}
u.listing = false
case <-updated:
// redraw
// might want to limit updates per second
u.sortCurrentDir()
case ev := <-events:
doneWithEvent <- true
if ev.Type == termbox.EventKey {
switch ev.Key + termbox.Key(ev.Ch) {
case termbox.KeyEsc, termbox.KeyCtrlC, 'q':
if u.showBox {
u.showBox = false
} else {
break outer
}
case termbox.KeyArrowDown, 'j':
u.move(1)
case termbox.KeyArrowUp, 'k':
u.move(-1)
case termbox.KeyPgdn, '-', '_':
u.move(u.dirListHeight)
case termbox.KeyPgup, '=', '+':
u.move(-u.dirListHeight)
case termbox.KeyArrowLeft, 'h':
u.up()
case termbox.KeyArrowRight, 'l', termbox.KeyEnter:
u.enter()
case 'c':
u.showCounts = !u.showCounts
case 'g':
u.showGraph = !u.showGraph
case 'n':
u.toggleSort(&u.sortByName)
case 's':
u.toggleSort(&u.sortBySize)
case 'C':
u.toggleSort(&u.sortByCount)
case '?':
u.togglePopupBox(helpText)
// Refresh the screen. Not obvious what key to map
// this onto, but ^L is a common choice.
case termbox.KeyCtrlL:
err := termbox.Sync()
if err != nil {
fs.Errorf(nil, "termbox sync returned error: %v", err)
}
}
}
}
// listen to key presses, etc
}
return nil
}

View File

@@ -0,0 +1,6 @@
// Build for ncdu for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 solaris
package ncdu

View File

@@ -0,0 +1,165 @@
// Package scan does concurrent scanning of an Fs building up a directory tree.
package scan
import (
"path"
"sync"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors"
)
// Dir represents a directory found in the remote
type Dir struct {
parent *Dir
path string
mu sync.Mutex
count int64
size int64
entries fs.DirEntries
dirs map[string]*Dir
}
// Parent returns the directory above this one
func (d *Dir) Parent() *Dir {
// no locking needed since these are write once in newDir()
return d.parent
}
// Path returns the position of the dir in the filesystem
func (d *Dir) Path() string {
// no locking needed since these are write once in newDir()
return d.path
}
// make a new directory
func newDir(parent *Dir, dirPath string, entries fs.DirEntries) *Dir {
d := &Dir{
parent: parent,
path: dirPath,
entries: entries,
dirs: make(map[string]*Dir),
}
// Count size in this dir
for _, entry := range entries {
if o, ok := entry.(fs.Object); ok {
d.count++
d.size += o.Size()
}
}
// Set my directory entry in parent
if parent != nil {
parent.mu.Lock()
leaf := path.Base(dirPath)
d.parent.dirs[leaf] = d
parent.mu.Unlock()
}
// Accumulate counts in parents
for ; parent != nil; parent = parent.parent {
parent.mu.Lock()
parent.count += d.count
parent.size += d.size
parent.mu.Unlock()
}
return d
}
// Entries returns a copy of the entries in the directory
func (d *Dir) Entries() fs.DirEntries {
return append(fs.DirEntries(nil), d.entries...)
}
// gets the directory of the i-th entry
//
// returns nil if it is a file
// returns a flag as to whether is directory or not
//
// Call with d.mu held
func (d *Dir) getDir(i int) (subDir *Dir, isDir bool) {
obj := d.entries[i]
dir, ok := obj.(fs.Directory)
if !ok {
return nil, false
}
leaf := path.Base(dir.Remote())
subDir = d.dirs[leaf]
return subDir, true
}
// GetDir returns the Dir of the i-th entry
//
// returns nil if it is a file
// returns a flag as to whether is directory or not
func (d *Dir) GetDir(i int) (subDir *Dir, isDir bool) {
d.mu.Lock()
defer d.mu.Unlock()
return d.getDir(i)
}
// Attr returns the size and count for the directory
func (d *Dir) Attr() (size int64, count int64) {
d.mu.Lock()
defer d.mu.Unlock()
return d.size, d.count
}
// AttrI returns the size, count and flags for the i-th directory entry
func (d *Dir) AttrI(i int) (size int64, count int64, isDir bool, readable bool) {
d.mu.Lock()
defer d.mu.Unlock()
subDir, isDir := d.getDir(i)
if !isDir {
return d.entries[i].Size(), 0, false, true
}
if subDir == nil {
return 0, 0, true, false
}
size, count = subDir.Attr()
return size, count, true, true
}
// Scan the Fs passed in, returning a root directory channel and an
// error channel
func Scan(f fs.Fs) (chan *Dir, chan error, chan struct{}) {
root := make(chan *Dir, 1)
errChan := make(chan error, 1)
updated := make(chan struct{}, 1)
go func() {
parents := map[string]*Dir{}
err := walk.Walk(f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
return err // FIXME mark directory as errored instead of aborting
}
var parent *Dir
if dirPath != "" {
parentPath := path.Dir(dirPath)
if parentPath == "." {
parentPath = ""
}
var ok bool
parent, ok = parents[parentPath]
if !ok {
errChan <- errors.Errorf("couldn't find parent for %q", dirPath)
}
}
d := newDir(parent, dirPath, entries)
parents[dirPath] = d
if dirPath == "" {
root <- d
}
// Mark updated
select {
case updated <- struct{}{}:
default:
break
}
return nil
})
if err != nil {
errChan <- errors.Wrap(err, "ncdu listing failed")
}
errChan <- nil
}()
return root, errChan, updated
}

View File

@@ -0,0 +1,26 @@
package obscure
import (
"fmt"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "obscure password",
Short: `Obscure password for use in the rclone.conf`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
cmd.Run(false, false, command, func() error {
obscured := obscure.MustObscure(args[0])
fmt.Println(obscured)
return nil
})
},
}

118
.rclone_repo/cmd/progress.go Executable file
View File

@@ -0,0 +1,118 @@
// Show the dynamic progress bar
package cmd
import (
"bytes"
"fmt"
"os"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/log"
"golang.org/x/crypto/ssh/terminal"
)
const (
// interval between progress prints
defaultProgressInterval = 500 * time.Millisecond
// time format for logging
logTimeFormat = "2006-01-02 15:04:05"
)
// startProgress starts the progress bar printing
//
// It returns a channel which should be closed to stop the stats.
func startProgress() chan struct{} {
stopStats := make(chan struct{})
oldLogPrint := fs.LogPrint
if !log.Redirected() {
// Intercept the log calls if not logging to file or syslog
fs.LogPrint = func(level fs.LogLevel, text string) {
printProgress(fmt.Sprintf("%s %-6s: %s", time.Now().Format(logTimeFormat), level, text))
}
}
go func() {
progressInterval := defaultProgressInterval
if ShowStats() && *statsInterval > 0 {
progressInterval = *statsInterval
}
ticker := time.NewTicker(progressInterval)
for {
select {
case <-ticker.C:
printProgress("")
case <-stopStats:
ticker.Stop()
fs.LogPrint = oldLogPrint
fmt.Println("")
return
}
}
}()
return stopStats
}
// VT100 codes
const (
eraseLine = "\x1b[2K"
moveToStartOfLine = "\x1b[0G"
moveUp = "\x1b[A"
)
// state for the progress printing
var (
nlines = 0 // number of lines in the previous stats block
progressMu sync.Mutex
)
// printProgress prings the progress with an optional log
func printProgress(logMessage string) {
progressMu.Lock()
defer progressMu.Unlock()
var buf bytes.Buffer
w, h, err := terminal.GetSize(int(os.Stdout.Fd()))
if err != nil {
w, h = 80, 25
}
_ = h
stats := strings.TrimSpace(accounting.Stats.String())
logMessage = strings.TrimSpace(logMessage)
out := func(s string) {
buf.WriteString(s)
}
if logMessage != "" {
out("\n")
out(moveUp)
}
// Move to the start of the block we wrote erasing all the previous lines
for i := 0; i < nlines-1; i++ {
out(eraseLine)
out(moveUp)
}
out(eraseLine)
out(moveToStartOfLine)
if logMessage != "" {
out(eraseLine)
out(logMessage + "\n")
}
fixedLines := strings.Split(stats, "\n")
nlines = len(fixedLines)
for i, line := range fixedLines {
if len(line) > w {
line = line[:w]
}
out(line)
if i != nlines-1 {
out("\n")
}
}
writeToTerminal(buf.Bytes())
}

View File

@@ -0,0 +1,9 @@
//+build !windows
package cmd
import "os"
func writeToTerminal(b []byte) {
_, _ = os.Stdout.Write(b)
}

View File

@@ -0,0 +1,28 @@
//+build windows
package cmd
import (
"fmt"
"os"
"sync"
ansiterm "github.com/Azure/go-ansiterm"
"github.com/Azure/go-ansiterm/winterm"
)
var (
initAnsiParser sync.Once
ansiParser *ansiterm.AnsiParser
)
func writeToTerminal(b []byte) {
initAnsiParser.Do(func() {
winEventHandler := winterm.CreateWinEventHandler(os.Stdout.Fd(), os.Stdout)
ansiParser = ansiterm.CreateParser("Ground", winEventHandler)
})
_, err := ansiParser.Parse(b)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "\n*** Error from ANSI parser: %v\n", err)
}
}

28
.rclone_repo/cmd/purge/purge.go Executable file
View File

@@ -0,0 +1,28 @@
package purge
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "purge remote:path",
Short: `Remove the path and all of its contents.`,
Long: `
Remove the path and all of its contents. Note that this does not obey
include/exclude filters - everything will be removed. Use ` + "`" + `delete` + "`" + ` if
you want to selectively delete files.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fdst := cmd.NewFsDir(args)
cmd.Run(true, false, command, func() error {
return operations.Purge(fdst, "")
})
},
}

160
.rclone_repo/cmd/rc/rc.go Executable file
View File

@@ -0,0 +1,160 @@
package rc
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/rc"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
var (
noOutput = false
url = "http://localhost:5572/"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&noOutput, "no-output", "", noOutput, "If set don't output the JSON result.")
commandDefintion.Flags().StringVarP(&url, "url", "", url, "URL to connect to rclone remote control.")
}
var commandDefintion = &cobra.Command{
Use: "rc commands parameter",
Short: `Run a command against a running rclone.`,
Long: `
This runs a command against a running rclone. By default it will use
that specified in the --rc-addr command.
Arguments should be passed in as parameter=value.
The result will be returned as a JSON object by default.
Use "rclone rc" to see a list of all possible commands.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1E9, command, args)
cmd.Run(false, false, command, func() error {
if len(args) == 0 {
return list()
}
return run(args)
})
},
}
// do a call from (path, in) to (out, err).
//
// if err is set, out may be a valid error return or it may be nil
func doCall(path string, in rc.Params) (out rc.Params, err error) {
// Do HTTP request
client := fshttp.NewClient(fs.Config)
url := url
// set the user use --rc-addr as well as --url
if rcAddrFlag := pflag.Lookup("rc-addr"); rcAddrFlag != nil && rcAddrFlag.Changed {
url = rcAddrFlag.Value.String()
if strings.HasPrefix(url, ":") {
url = "localhost" + url
}
url = "http://" + url + "/"
}
if !strings.HasSuffix(url, "/") {
url += "/"
}
url += path
data, err := json.Marshal(in)
if err != nil {
return nil, errors.Wrap(err, "failed to encode JSON")
}
resp, err := client.Post(url, "application/json", bytes.NewBuffer(data))
if err != nil {
return nil, errors.Wrap(err, "connection failed")
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode != http.StatusOK {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
var bodyString string
if err == nil {
bodyString = string(body)
} else {
bodyString = err.Error()
}
bodyString = strings.TrimSpace(bodyString)
return nil, errors.Errorf("Failed to read rc response: %s: %s", resp.Status, bodyString)
}
// Parse output
out = make(rc.Params)
err = json.NewDecoder(resp.Body).Decode(&out)
if err != nil {
return nil, errors.Wrap(err, "failed to decode JSON")
}
// Check we got 200 OK
if resp.StatusCode != http.StatusOK {
err = errors.Errorf("operation %q failed: %v", path, out["error"])
}
return out, err
}
// Run the remote control command passed in
func run(args []string) (err error) {
path := strings.Trim(args[0], "/")
// parse input
in := make(rc.Params)
for _, param := range args[1:] {
equals := strings.IndexRune(param, '=')
if equals < 0 {
return errors.Errorf("No '=' found in parameter %q", param)
}
key, value := param[:equals], param[equals+1:]
in[key] = value
}
// Do the call
out, callErr := doCall(path, in)
// Write the JSON blob to stdout if required
if out != nil && !noOutput {
err := rc.WriteJSON(os.Stdout, out)
if err != nil {
return errors.Wrap(err, "failed to output JSON")
}
}
return callErr
}
// List the available commands to stdout
func list() error {
list, err := doCall("rc/list", nil)
if err != nil {
return errors.Wrap(err, "failed to list")
}
commands, ok := list["commands"].([]interface{})
if !ok {
return errors.New("bad JSON")
}
for _, command := range commands {
info, ok := command.(map[string]interface{})
if !ok {
return errors.New("bad JSON")
}
fmt.Printf("### %s: %s\n\n", info["Path"], info["Title"])
fmt.Printf("%s\n\n", info["Help"])
}
return nil
}

57
.rclone_repo/cmd/rcat/rcat.go Executable file
View File

@@ -0,0 +1,57 @@
package rcat
import (
"log"
"os"
"time"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "rcat remote:path",
Short: `Copies standard input to file on remote.`,
Long: `
rclone rcat reads from standard input (stdin) and copies it to a
single remote file.
echo "hello world" | rclone rcat remote:path/to/file
ffmpeg - | rclone rcat remote:path/to/file
If the remote file already exists, it will be overwritten.
rcat will try to upload small files in a single request, which is
usually more efficient than the streaming/chunked upload endpoints,
which use multiple requests. Exact behaviour depends on the remote.
What is considered a small file may be set through
` + "`--streaming-upload-cutoff`" + `. Uploading only starts after
the cutoff is reached or if the file ends before that. The data
must fit into RAM. The cutoff needs to be small enough to adhere
the limits of your remote, please see there. Generally speaking,
setting this cutoff too high will decrease your performance.
Note that the upload can also not be retried because the data is
not kept around until the upload succeeds. If you need to transfer
a lot of data, you're better off caching locally and then
` + "`rclone move`" + ` it to the destination.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
stat, _ := os.Stdin.Stat()
if (stat.Mode() & os.ModeCharDevice) != 0 {
log.Fatalf("nothing to read from standard input (stdin).")
}
fdst, dstFileName := cmd.NewFsDstFile(args)
cmd.Run(false, false, command, func() error {
_, err := operations.Rcat(fdst, dstFileName, os.Stdin, time.Now())
return err
})
},
}

View File

@@ -0,0 +1,30 @@
package reveal
import (
"fmt"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "reveal password",
Short: `Reveal obscured password from rclone.conf`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
cmd.Run(false, false, command, func() error {
revealed, err := obscure.Reveal(args[0])
if err != nil {
return err
}
fmt.Println(revealed)
return nil
})
},
Hidden: true,
}

26
.rclone_repo/cmd/rmdir/rmdir.go Executable file
View File

@@ -0,0 +1,26 @@
package rmdir
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "rmdir remote:path",
Short: `Remove the path if empty.`,
Long: `
Remove the path. Note that you can't remove a path with
objects in it, use purge for that.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fdst := cmd.NewFsDir(args)
cmd.Run(true, false, command, func() error {
return operations.Rmdir(fdst, "")
})
},
}

View File

@@ -0,0 +1,38 @@
package rmdir
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
)
var (
leaveRoot = false
)
func init() {
cmd.Root.AddCommand(rmdirsCmd)
rmdirsCmd.Flags().BoolVarP(&leaveRoot, "leave-root", "", leaveRoot, "Do not remove root directory if empty")
}
var rmdirsCmd = &cobra.Command{
Use: "rmdirs remote:path",
Short: `Remove empty directories under the path.`,
Long: `This removes any empty directories (or directories that only contain
empty directories) under the path that it finds, including the path if
it has nothing in.
If you supply the --leave-root flag, it will not remove the root directory.
This is useful for tidying up remotes that rclone has left a lot of
empty directories in.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fdst := cmd.NewFsDir(args)
cmd.Run(true, false, command, func() error {
return operations.Rmdirs(fdst, "", leaveRoot)
})
},
}

View File

@@ -0,0 +1,259 @@
package http
import (
"fmt"
"html/template"
"net/http"
"os"
"path"
"strconv"
"strings"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/serve/httplib"
"github.com/ncw/rclone/cmd/serve/httplib/httpflags"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/lib/rest"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
)
func init() {
httpflags.AddFlags(Command.Flags())
vfsflags.AddFlags(Command.Flags())
}
// Command definition for cobra
var Command = &cobra.Command{
Use: "http remote:path",
Short: `Serve the remote over HTTP.`,
Long: `rclone serve http implements a basic web server to serve the remote
over HTTP. This can be viewed in a web browser or you can make a
remote of type http read from it.
You can use the filter flags (eg --include, --exclude) to control what
is served.
The server will log errors. Use -v to see access logs.
--bwlimit will be respected for file transfers. Use --stats to
control the stats printing.
` + httplib.Help + vfs.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
cmd.Run(false, true, command, func() error {
s := newServer(f, &httpflags.Opt)
s.serve()
return nil
})
},
}
// server contains everything to run the server
type server struct {
f fs.Fs
vfs *vfs.VFS
srv *httplib.Server
}
func newServer(f fs.Fs, opt *httplib.Options) *server {
mux := http.NewServeMux()
s := &server{
f: f,
vfs: vfs.New(f, &vfsflags.Opt),
srv: httplib.NewServer(mux, opt),
}
mux.HandleFunc("/", s.handler)
return s
}
// serve runs the http server - doesn't return
func (s *server) serve() {
err := s.srv.Serve()
if err != nil {
fs.Errorf(s.f, "Opening listener: %v", err)
}
fs.Logf(s.f, "Serving on %s", s.srv.URL())
s.srv.Wait()
}
// handler reads incoming requests and dispatches them
func (s *server) handler(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" && r.Method != "HEAD" {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("Server", "rclone/"+fs.Version)
urlPath := r.URL.Path
isDir := strings.HasSuffix(urlPath, "/")
remote := strings.Trim(urlPath, "/")
if isDir {
s.serveDir(w, r, remote)
} else {
s.serveFile(w, r, remote)
}
}
// entry is a directory entry
type entry struct {
remote string
URL string
Leaf string
}
// entries represents a directory
type entries []entry
// addEntry adds an entry to that directory
func (es *entries) addEntry(node interface {
Path() string
Name() string
IsDir() bool
}) {
remote := node.Path()
leaf := node.Name()
urlRemote := leaf
if node.IsDir() {
leaf += "/"
urlRemote += "/"
}
*es = append(*es, entry{remote: remote, URL: rest.URLPathEscape(urlRemote), Leaf: leaf})
}
// indexPage is a directory listing template
var indexPage = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{{ .Title }}</title>
</head>
<body>
<h1>{{ .Title }}</h1>
{{ range $i := .Entries }}<a href="{{ $i.URL }}">{{ $i.Leaf }}</a><br />
{{ end }}</body>
</html>
`
// indexTemplate is the instantiated indexPage
var indexTemplate = template.Must(template.New("index").Parse(indexPage))
// indexData is used to fill in the indexTemplate
type indexData struct {
Title string
Entries entries
}
// error returns an http.StatusInternalServerError and logs the error
func internalError(what interface{}, w http.ResponseWriter, text string, err error) {
fs.CountError(err)
fs.Errorf(what, "%s: %v", text, err)
http.Error(w, text+".", http.StatusInternalServerError)
}
// serveDir serves a directory index at dirRemote
func (s *server) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) {
// List the directory
node, err := s.vfs.Stat(dirRemote)
if err == vfs.ENOENT {
http.Error(w, "Directory not found", http.StatusNotFound)
return
} else if err != nil {
internalError(dirRemote, w, "Failed to list directory", err)
return
}
if !node.IsDir() {
http.Error(w, "Not a directory", http.StatusNotFound)
return
}
dir := node.(*vfs.Dir)
dirEntries, err := dir.ReadDirAll()
if err != nil {
internalError(dirRemote, w, "Failed to list directory", err)
return
}
var out entries
for _, node := range dirEntries {
out.addEntry(node)
}
// Account the transfer
accounting.Stats.Transferring(dirRemote)
defer accounting.Stats.DoneTransferring(dirRemote, true)
fs.Infof(dirRemote, "%s: Serving directory", r.RemoteAddr)
err = indexTemplate.Execute(w, indexData{
Entries: out,
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
})
if err != nil {
internalError(dirRemote, w, "Failed to render template", err)
return
}
}
// serveFile serves a file object at remote
func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string) {
node, err := s.vfs.Stat(remote)
if err == vfs.ENOENT {
fs.Infof(remote, "%s: File not found", r.RemoteAddr)
http.Error(w, "File not found", http.StatusNotFound)
return
} else if err != nil {
internalError(remote, w, "Failed to find file", err)
return
}
if !node.IsFile() {
http.Error(w, "Not a file", http.StatusNotFound)
return
}
entry := node.DirEntry()
if entry == nil {
http.Error(w, "Can't open file being written", http.StatusNotFound)
return
}
obj := entry.(fs.Object)
file := node.(*vfs.File)
// Set content length since we know how long the object is
w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10))
// Set content type
mimeType := fs.MimeType(obj)
if mimeType == "application/octet-stream" && path.Ext(remote) == "" {
// Leave header blank so http server guesses
} else {
w.Header().Set("Content-Type", mimeType)
}
// If HEAD no need to read the object since we have set the headers
if r.Method == "HEAD" {
return
}
// open the object
in, err := file.Open(os.O_RDONLY)
if err != nil {
internalError(remote, w, "Failed to open file", err)
return
}
defer func() {
err := in.Close()
if err != nil {
fs.Errorf(remote, "Failed to close file: %v", err)
}
}()
// Account the transfer
accounting.Stats.Transferring(remote)
defer accounting.Stats.DoneTransferring(remote, true)
// FIXME in = fs.NewAccount(in, obj).WithBuffer() // account the transfer
// Serve the file
http.ServeContent(w, r, remote, node.ModTime(), in)
}

View File

@@ -0,0 +1,237 @@
// +build go1.8
package http
import (
"flag"
"io/ioutil"
"net"
"net/http"
"path"
"strings"
"testing"
"time"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/cmd/serve/httplib"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/filter"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
updateGolden = flag.Bool("updategolden", false, "update golden files for regression test")
httpServer *server
)
const (
testBindAddress = "localhost:51777"
testURL = "http://" + testBindAddress + "/"
)
func startServer(t *testing.T, f fs.Fs) {
opt := httplib.DefaultOpt
opt.ListenAddr = testBindAddress
httpServer = newServer(f, &opt)
go httpServer.serve()
// try to connect to the test server
pause := time.Millisecond
for i := 0; i < 10; i++ {
conn, err := net.Dial("tcp", testBindAddress)
if err == nil {
_ = conn.Close()
return
}
// t.Logf("couldn't connect, sleeping for %v: %v", pause, err)
time.Sleep(pause)
pause *= 2
}
t.Fatal("couldn't connect to server")
}
func TestInit(t *testing.T) {
// Configure the remote
config.LoadConfig()
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true
// exclude files called hidden.txt and directories called hidden
require.NoError(t, filter.Active.AddRule("- hidden.txt"))
require.NoError(t, filter.Active.AddRule("- hidden/**"))
// Create a test Fs
f, err := fs.NewFs("testdata/files")
require.NoError(t, err)
startServer(t, f)
}
// check body against the file, or re-write body if -updategolden is
// set.
func checkGolden(t *testing.T, fileName string, got []byte) {
if *updateGolden {
t.Logf("Updating golden file %q", fileName)
err := ioutil.WriteFile(fileName, got, 0666)
require.NoError(t, err)
} else {
want, err := ioutil.ReadFile(fileName)
require.NoError(t, err)
wants := strings.Split(string(want), "\n")
gots := strings.Split(string(got), "\n")
assert.Equal(t, wants, gots, fileName)
}
}
func TestGET(t *testing.T) {
for _, test := range []struct {
URL string
Status int
Golden string
Method string
Range string
}{
{
URL: "",
Status: http.StatusOK,
Golden: "testdata/golden/index.html",
},
{
URL: "notfound",
Status: http.StatusNotFound,
Golden: "testdata/golden/notfound.html",
},
{
URL: "dirnotfound/",
Status: http.StatusNotFound,
Golden: "testdata/golden/dirnotfound.html",
},
{
URL: "hidden/",
Status: http.StatusNotFound,
Golden: "testdata/golden/hiddendir.html",
},
{
URL: "one%25.txt",
Status: http.StatusOK,
Golden: "testdata/golden/one.txt",
},
{
URL: "hidden.txt",
Status: http.StatusNotFound,
Golden: "testdata/golden/hidden.txt",
},
{
URL: "three/",
Status: http.StatusOK,
Golden: "testdata/golden/three.html",
},
{
URL: "three/a.txt",
Status: http.StatusOK,
Golden: "testdata/golden/a.txt",
},
{
URL: "",
Method: "HEAD",
Status: http.StatusOK,
Golden: "testdata/golden/indexhead.txt",
},
{
URL: "one%25.txt",
Method: "HEAD",
Status: http.StatusOK,
Golden: "testdata/golden/onehead.txt",
},
{
URL: "",
Method: "POST",
Status: http.StatusMethodNotAllowed,
Golden: "testdata/golden/indexpost.txt",
},
{
URL: "one%25.txt",
Method: "POST",
Status: http.StatusMethodNotAllowed,
Golden: "testdata/golden/onepost.txt",
},
{
URL: "two.txt",
Status: http.StatusOK,
Golden: "testdata/golden/two.txt",
},
{
URL: "two.txt",
Status: http.StatusPartialContent,
Range: "bytes=2-5",
Golden: "testdata/golden/two2-5.txt",
},
{
URL: "two.txt",
Status: http.StatusPartialContent,
Range: "bytes=0-6",
Golden: "testdata/golden/two-6.txt",
},
{
URL: "two.txt",
Status: http.StatusPartialContent,
Range: "bytes=3-",
Golden: "testdata/golden/two3-.txt",
},
} {
method := test.Method
if method == "" {
method = "GET"
}
req, err := http.NewRequest(method, testURL+test.URL, nil)
require.NoError(t, err)
if test.Range != "" {
req.Header.Add("Range", test.Range)
}
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, test.Status, resp.StatusCode, test.Golden)
body, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
checkGolden(t, test.Golden, body)
}
}
type mockNode struct {
path string
isdir bool
}
func (n mockNode) Path() string { return n.path }
func (n mockNode) Name() string {
if n.path == "" {
return ""
}
return path.Base(n.path)
}
func (n mockNode) IsDir() bool { return n.isdir }
func TestAddEntry(t *testing.T) {
var es entries
es.addEntry(mockNode{path: "", isdir: true})
es.addEntry(mockNode{path: "dir", isdir: true})
es.addEntry(mockNode{path: "a/b/c/d.txt", isdir: false})
es.addEntry(mockNode{path: "a/b/c/colon:colon.txt", isdir: false})
es.addEntry(mockNode{path: "\"quotes\".txt", isdir: false})
assert.Equal(t, entries{
{remote: "", URL: "/", Leaf: "/"},
{remote: "dir", URL: "dir/", Leaf: "dir/"},
{remote: "a/b/c/d.txt", URL: "d.txt", Leaf: "d.txt"},
{remote: "a/b/c/colon:colon.txt", URL: "./colon:colon.txt", Leaf: "colon:colon.txt"},
{remote: "\"quotes\".txt", URL: "%22quotes%22.txt", Leaf: "\"quotes\".txt"},
}, es)
}
func TestFinalise(t *testing.T) {
httpServer.srv.Close()
}

View File

@@ -0,0 +1 @@
hidden

View File

@@ -0,0 +1 @@
hiddenfile

View File

@@ -0,0 +1 @@
one%

View File

@@ -0,0 +1 @@
three

View File

@@ -0,0 +1 @@
threeb

View File

@@ -0,0 +1 @@
0123456789

View File

@@ -0,0 +1 @@
three

View File

@@ -0,0 +1 @@
Directory not found

View File

@@ -0,0 +1 @@
File not found

View File

@@ -0,0 +1 @@
Directory not found

View File

@@ -0,0 +1,13 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing of /</title>
</head>
<body>
<h1>Directory listing of /</h1>
<a href="one%25.txt">one%.txt</a><br />
<a href="three/">three/</a><br />
<a href="two.txt">two.txt</a><br />
</body>
</html>

Some files were not shown because too many files have changed in this diff Show More