overdue
This commit is contained in:
437
.rclone_repo/vfs/cache.go
Executable file
437
.rclone_repo/vfs/cache.go
Executable file
@@ -0,0 +1,437 @@
|
||||
// This deals with caching of files locally
|
||||
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/djherbis/times"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// CacheMode controls the functionality of the cache
|
||||
type CacheMode byte
|
||||
|
||||
// CacheMode options
|
||||
const (
|
||||
CacheModeOff CacheMode = iota // cache nothing - return errors for writes which can't be satisfied
|
||||
CacheModeMinimal // cache only the minimum, eg read/write opens
|
||||
CacheModeWrites // cache all files opened with write intent
|
||||
CacheModeFull // cache all files opened in any mode
|
||||
)
|
||||
|
||||
var cacheModeToString = []string{
|
||||
CacheModeOff: "off",
|
||||
CacheModeMinimal: "minimal",
|
||||
CacheModeWrites: "writes",
|
||||
CacheModeFull: "full",
|
||||
}
|
||||
|
||||
// String turns a CacheMode into a string
|
||||
func (l CacheMode) String() string {
|
||||
if l >= CacheMode(len(cacheModeToString)) {
|
||||
return fmt.Sprintf("CacheMode(%d)", l)
|
||||
}
|
||||
return cacheModeToString[l]
|
||||
}
|
||||
|
||||
// Set a CacheMode
|
||||
func (l *CacheMode) Set(s string) error {
|
||||
for n, name := range cacheModeToString {
|
||||
if s != "" && name == s {
|
||||
*l = CacheMode(n)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Errorf("Unknown cache mode level %q", s)
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (l *CacheMode) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
// cache opened files
|
||||
type cache struct {
|
||||
f fs.Fs // fs for the cache directory
|
||||
opt *Options // vfs Options
|
||||
root string // root of the cache directory
|
||||
itemMu sync.Mutex // protects the next two maps
|
||||
item map[string]*cacheItem // files/directories in the cache
|
||||
}
|
||||
|
||||
// cacheItem is stored in the item map
|
||||
type cacheItem struct {
|
||||
opens int // number of times file is open
|
||||
atime time.Time // last time file was accessed
|
||||
isFile bool // if this is a file or a directory
|
||||
}
|
||||
|
||||
// newCacheItem returns an item for the cache
|
||||
func newCacheItem(isFile bool) *cacheItem {
|
||||
return &cacheItem{atime: time.Now(), isFile: isFile}
|
||||
}
|
||||
|
||||
// newCache creates a new cache heirachy for f
|
||||
//
|
||||
// This starts background goroutines which can be cancelled with the
|
||||
// context passed in.
|
||||
func newCache(ctx context.Context, f fs.Fs, opt *Options) (*cache, error) {
|
||||
fRoot := filepath.FromSlash(f.Root())
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.HasPrefix(fRoot, `\\?`) {
|
||||
fRoot = fRoot[3:]
|
||||
}
|
||||
fRoot = strings.Replace(fRoot, ":", "", -1)
|
||||
}
|
||||
root := filepath.Join(config.CacheDir, "vfs", f.Name(), fRoot)
|
||||
fs.Debugf(nil, "vfs cache root is %q", root)
|
||||
|
||||
f, err := fs.NewFs(root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create cache remote")
|
||||
}
|
||||
|
||||
c := &cache{
|
||||
f: f,
|
||||
opt: opt,
|
||||
root: root,
|
||||
item: make(map[string]*cacheItem),
|
||||
}
|
||||
|
||||
go c.cleaner(ctx)
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// findParent returns the parent directory of name, or "" for the root
|
||||
func findParent(name string) string {
|
||||
parent := path.Dir(name)
|
||||
if parent == "." || parent == "/" {
|
||||
parent = ""
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
// clean returns the cleaned version of name for use in the index map
|
||||
func clean(name string) string {
|
||||
name = strings.Trim(name, "/")
|
||||
name = path.Clean(name)
|
||||
if name == "." || name == "/" {
|
||||
name = ""
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// toOSPath turns a remote relative name into an OS path in the cache
|
||||
func (c *cache) toOSPath(name string) string {
|
||||
return filepath.Join(c.root, filepath.FromSlash(name))
|
||||
}
|
||||
|
||||
// mkdir makes the directory for name in the cache and returns an os
|
||||
// path for the file
|
||||
func (c *cache) mkdir(name string) (string, error) {
|
||||
parent := findParent(name)
|
||||
leaf := path.Base(name)
|
||||
parentPath := c.toOSPath(parent)
|
||||
err := os.MkdirAll(parentPath, 0700)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "make cache directory failed")
|
||||
}
|
||||
c.cacheDir(parent)
|
||||
return filepath.Join(parentPath, leaf), nil
|
||||
}
|
||||
|
||||
// _get gets name from the cache or creates a new one
|
||||
//
|
||||
// It returns the item and found as to whether this item was found in
|
||||
// the cache (or just created).
|
||||
//
|
||||
// name should be a remote path not an osPath
|
||||
//
|
||||
// must be called with itemMu held
|
||||
func (c *cache) _get(isFile bool, name string) (item *cacheItem, found bool) {
|
||||
item = c.item[name]
|
||||
found = item != nil
|
||||
if !found {
|
||||
item = newCacheItem(isFile)
|
||||
c.item[name] = item
|
||||
}
|
||||
return item, found
|
||||
}
|
||||
|
||||
// opens returns the number of opens that are on the file
|
||||
//
|
||||
// name should be a remote path not an osPath
|
||||
func (c *cache) opens(name string) int {
|
||||
name = clean(name)
|
||||
c.itemMu.Lock()
|
||||
defer c.itemMu.Unlock()
|
||||
item := c.item[name]
|
||||
if item == nil {
|
||||
return 0
|
||||
}
|
||||
return item.opens
|
||||
}
|
||||
|
||||
// get gets name from the cache or creates a new one
|
||||
//
|
||||
// name should be a remote path not an osPath
|
||||
func (c *cache) get(name string) *cacheItem {
|
||||
name = clean(name)
|
||||
c.itemMu.Lock()
|
||||
item, _ := c._get(true, name)
|
||||
c.itemMu.Unlock()
|
||||
return item
|
||||
}
|
||||
|
||||
// updateTime sets the atime of the name to that passed in if it is
|
||||
// newer than the existing or there isn't an existing time.
|
||||
//
|
||||
// name should be a remote path not an osPath
|
||||
func (c *cache) updateTime(name string, when time.Time) {
|
||||
name = clean(name)
|
||||
c.itemMu.Lock()
|
||||
item, found := c._get(true, name)
|
||||
if !found || when.Sub(item.atime) > 0 {
|
||||
fs.Debugf(name, "updateTime: setting atime to %v", when)
|
||||
item.atime = when
|
||||
}
|
||||
c.itemMu.Unlock()
|
||||
}
|
||||
|
||||
// _open marks name as open, must be called with the lock held
|
||||
//
|
||||
// name should be a remote path not an osPath
|
||||
func (c *cache) _open(isFile bool, name string) {
|
||||
for {
|
||||
item, _ := c._get(isFile, name)
|
||||
item.opens++
|
||||
item.atime = time.Now()
|
||||
if name == "" {
|
||||
break
|
||||
}
|
||||
isFile = false
|
||||
name = findParent(name)
|
||||
}
|
||||
}
|
||||
|
||||
// open marks name as open
|
||||
//
|
||||
// name should be a remote path not an osPath
|
||||
func (c *cache) open(name string) {
|
||||
name = clean(name)
|
||||
c.itemMu.Lock()
|
||||
c._open(true, name)
|
||||
c.itemMu.Unlock()
|
||||
}
|
||||
|
||||
// cacheDir marks a directory and its parents as being in the cache
|
||||
//
|
||||
// name should be a remote path not an osPath
|
||||
func (c *cache) cacheDir(name string) {
|
||||
name = clean(name)
|
||||
c.itemMu.Lock()
|
||||
defer c.itemMu.Unlock()
|
||||
for {
|
||||
item := c.item[name]
|
||||
if item != nil {
|
||||
break
|
||||
}
|
||||
c.item[name] = newCacheItem(false)
|
||||
if name == "" {
|
||||
break
|
||||
}
|
||||
name = findParent(name)
|
||||
}
|
||||
}
|
||||
|
||||
// _close marks name as closed - must be called with the lock held
|
||||
func (c *cache) _close(isFile bool, name string) {
|
||||
for {
|
||||
item, _ := c._get(isFile, name)
|
||||
item.opens--
|
||||
item.atime = time.Now()
|
||||
if item.opens < 0 {
|
||||
fs.Errorf(name, "cache: double close")
|
||||
}
|
||||
if name == "" {
|
||||
break
|
||||
}
|
||||
isFile = false
|
||||
name = findParent(name)
|
||||
}
|
||||
}
|
||||
|
||||
// close marks name as closed
|
||||
//
|
||||
// name should be a remote path not an osPath
|
||||
func (c *cache) close(name string) {
|
||||
name = clean(name)
|
||||
c.itemMu.Lock()
|
||||
c._close(true, name)
|
||||
c.itemMu.Unlock()
|
||||
}
|
||||
|
||||
// remove should be called if name is deleted
|
||||
func (c *cache) remove(name string) {
|
||||
osPath := c.toOSPath(name)
|
||||
err := os.Remove(osPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
fs.Errorf(name, "Failed to remove from cache: %v", err)
|
||||
} else {
|
||||
fs.Debugf(name, "Removed from cache")
|
||||
}
|
||||
}
|
||||
|
||||
// removeDir should be called if dir is deleted and returns true if
|
||||
// the directory is gone.
|
||||
func (c *cache) removeDir(dir string) bool {
|
||||
osPath := c.toOSPath(dir)
|
||||
err := os.Remove(osPath)
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
if err == nil {
|
||||
fs.Debugf(dir, "Removed empty directory")
|
||||
}
|
||||
return true
|
||||
}
|
||||
if !os.IsExist(err) {
|
||||
fs.Errorf(dir, "Failed to remove cached dir: %v", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// cleanUp empties the cache of everything
|
||||
func (c *cache) cleanUp() error {
|
||||
return os.RemoveAll(c.root)
|
||||
}
|
||||
|
||||
// walk walks the cache calling the function
|
||||
func (c *cache) walk(fn func(osPath string, fi os.FileInfo, name string) error) error {
|
||||
return filepath.Walk(c.root, func(osPath string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Find path relative to the cache root
|
||||
name, err := filepath.Rel(c.root, osPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "filepath.Rel failed in walk")
|
||||
}
|
||||
if name == "." {
|
||||
name = ""
|
||||
}
|
||||
// And convert into slashes
|
||||
name = filepath.ToSlash(name)
|
||||
|
||||
return fn(osPath, fi, name)
|
||||
})
|
||||
}
|
||||
|
||||
// updateAtimes walks the cache updating any atimes it finds
|
||||
func (c *cache) updateAtimes() error {
|
||||
return c.walk(func(osPath string, fi os.FileInfo, name string) error {
|
||||
if !fi.IsDir() {
|
||||
// Update the atime with that of the file
|
||||
atime := times.Get(fi).AccessTime()
|
||||
c.updateTime(name, atime)
|
||||
} else {
|
||||
c.cacheDir(name)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// purgeOld gets rid of any files that are over age
|
||||
func (c *cache) purgeOld(maxAge time.Duration) {
|
||||
c._purgeOld(maxAge, c.remove, c.removeDir)
|
||||
}
|
||||
|
||||
func (c *cache) _purgeOld(maxAge time.Duration, remove func(name string), removeDir func(name string) bool) {
|
||||
c.itemMu.Lock()
|
||||
defer c.itemMu.Unlock()
|
||||
cutoff := time.Now().Add(-maxAge)
|
||||
for name, item := range c.item {
|
||||
if item.isFile && item.opens == 0 {
|
||||
// If not locked and access time too long ago - delete the file
|
||||
dt := item.atime.Sub(cutoff)
|
||||
// fs.Debugf(name, "atime=%v cutoff=%v, dt=%v", item.atime, cutoff, dt)
|
||||
if dt < 0 {
|
||||
remove(name)
|
||||
// Remove the entry
|
||||
delete(c.item, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// now find any empty directories
|
||||
var dirs []string
|
||||
for name, item := range c.item {
|
||||
if !item.isFile && item.opens == 0 {
|
||||
dirs = append(dirs, name)
|
||||
}
|
||||
}
|
||||
// remove empty directories in reverse alphabetical order
|
||||
sort.Strings(dirs)
|
||||
for i := len(dirs) - 1; i >= 0; i-- {
|
||||
dir := dirs[i]
|
||||
// Remove the entry
|
||||
if removeDir(dir) {
|
||||
delete(c.item, dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// clean empties the cache of stuff if it can
|
||||
func (c *cache) clean() {
|
||||
// Cache may be empty so end
|
||||
_, err := os.Stat(c.root)
|
||||
if os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Cleaning the cache")
|
||||
|
||||
// first walk the FS to update the atimes
|
||||
err = c.updateAtimes()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Error traversing cache %q: %v", c.root, err)
|
||||
}
|
||||
|
||||
// Now remove any files that are over age and any empty
|
||||
// directories
|
||||
c.purgeOld(c.opt.CacheMaxAge)
|
||||
}
|
||||
|
||||
// cleaner calls clean at regular intervals
|
||||
//
|
||||
// doesn't return until context is cancelled
|
||||
func (c *cache) cleaner(ctx context.Context) {
|
||||
if c.opt.CachePollInterval <= 0 {
|
||||
fs.Debugf(nil, "Cache cleaning thread disabled because poll interval <= 0")
|
||||
return
|
||||
}
|
||||
// Start cleaning the cache immediately
|
||||
c.clean()
|
||||
// Then every interval specified
|
||||
timer := time.NewTicker(c.opt.CachePollInterval)
|
||||
defer timer.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
c.clean()
|
||||
case <-ctx.Done():
|
||||
fs.Debugf(nil, "cache cleaner exiting")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
462
.rclone_repo/vfs/cache_test.go
Executable file
462
.rclone_repo/vfs/cache_test.go
Executable file
@@ -0,0 +1,462 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/djherbis/times"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check CacheMode it satisfies the pflag interface
|
||||
var _ pflag.Value = (*CacheMode)(nil)
|
||||
|
||||
func TestCacheModeString(t *testing.T) {
|
||||
assert.Equal(t, "off", CacheModeOff.String())
|
||||
assert.Equal(t, "full", CacheModeFull.String())
|
||||
assert.Equal(t, "CacheMode(17)", CacheMode(17).String())
|
||||
}
|
||||
|
||||
func TestCacheModeSet(t *testing.T) {
|
||||
var m CacheMode
|
||||
|
||||
err := m.Set("full")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, CacheModeFull, m)
|
||||
|
||||
err = m.Set("potato")
|
||||
assert.Error(t, err, "Unknown cache mode level")
|
||||
|
||||
err = m.Set("")
|
||||
assert.Error(t, err, "Unknown cache mode level")
|
||||
}
|
||||
|
||||
func TestCacheModeType(t *testing.T) {
|
||||
var m CacheMode
|
||||
assert.Equal(t, "string", m.Type())
|
||||
}
|
||||
|
||||
// convert c.item to a string
|
||||
func itemAsString(c *cache) []string {
|
||||
c.itemMu.Lock()
|
||||
defer c.itemMu.Unlock()
|
||||
var out []string
|
||||
for name, item := range c.item {
|
||||
out = append(out, fmt.Sprintf("name=%q isFile=%v opens=%d", name, item.isFile, item.opens))
|
||||
}
|
||||
sort.Strings(out)
|
||||
return out
|
||||
}
|
||||
|
||||
func TestCacheNew(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Disable the cache cleaner as it interferes with these tests
|
||||
opt := DefaultOpt
|
||||
opt.CachePollInterval = 0
|
||||
c, err := newCache(ctx, r.Fremote, &opt)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Contains(t, c.root, "vfs")
|
||||
assert.Contains(t, c.f.Root(), filepath.Base(r.Fremote.Root()))
|
||||
assert.Equal(t, []string(nil), itemAsString(c))
|
||||
|
||||
// mkdir
|
||||
p, err := c.mkdir("potato")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "potato", filepath.Base(p))
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=0`,
|
||||
}, itemAsString(c))
|
||||
|
||||
fi, err := os.Stat(filepath.Dir(p))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fi.IsDir())
|
||||
|
||||
// get
|
||||
item := c.get("potato")
|
||||
item2 := c.get("potato")
|
||||
assert.Equal(t, item, item2)
|
||||
assert.WithinDuration(t, time.Now(), item.atime, time.Second)
|
||||
|
||||
// updateTime
|
||||
//.. before
|
||||
t1 := time.Now().Add(-60 * time.Minute)
|
||||
c.updateTime("potato", t1)
|
||||
item = c.get("potato")
|
||||
assert.NotEqual(t, t1, item.atime)
|
||||
assert.Equal(t, 0, item.opens)
|
||||
//..after
|
||||
t2 := time.Now().Add(60 * time.Minute)
|
||||
c.updateTime("potato", t2)
|
||||
item = c.get("potato")
|
||||
assert.Equal(t, t2, item.atime)
|
||||
assert.Equal(t, 0, item.opens)
|
||||
|
||||
// open
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=0`,
|
||||
`name="potato" isFile=true opens=0`,
|
||||
}, itemAsString(c))
|
||||
c.open("/potato")
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=1`,
|
||||
`name="potato" isFile=true opens=1`,
|
||||
}, itemAsString(c))
|
||||
item = c.get("potato")
|
||||
assert.WithinDuration(t, time.Now(), item.atime, time.Second)
|
||||
assert.Equal(t, 1, item.opens)
|
||||
|
||||
// write the file
|
||||
err = ioutil.WriteFile(p, []byte("hello"), 0600)
|
||||
require.NoError(t, err)
|
||||
|
||||
// read its atime
|
||||
fi, err = os.Stat(p)
|
||||
assert.NoError(t, err)
|
||||
atime := times.Get(fi).AccessTime()
|
||||
|
||||
// updateAtimes
|
||||
item = c.get("potato")
|
||||
item.atime = time.Now().Add(-24 * time.Hour)
|
||||
err = c.updateAtimes()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=1`,
|
||||
`name="potato" isFile=true opens=1`,
|
||||
}, itemAsString(c))
|
||||
item = c.get("potato")
|
||||
assert.Equal(t, atime, item.atime)
|
||||
|
||||
// updateAtimes - not in the cache
|
||||
oldItem := item
|
||||
c.itemMu.Lock()
|
||||
delete(c.item, "potato") // remove from cache
|
||||
c.itemMu.Unlock()
|
||||
err = c.updateAtimes()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=1`,
|
||||
`name="potato" isFile=true opens=0`,
|
||||
}, itemAsString(c))
|
||||
item = c.get("potato")
|
||||
assert.Equal(t, atime, item.atime)
|
||||
c.itemMu.Lock()
|
||||
c.item["potato"] = oldItem // restore to cache
|
||||
c.itemMu.Unlock()
|
||||
|
||||
// try purging with file open
|
||||
c.purgeOld(10 * time.Second)
|
||||
_, err = os.Stat(p)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// close
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=1`,
|
||||
`name="potato" isFile=true opens=1`,
|
||||
}, itemAsString(c))
|
||||
c.updateTime("potato", t2)
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=1`,
|
||||
`name="potato" isFile=true opens=1`,
|
||||
}, itemAsString(c))
|
||||
c.close("potato/")
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=0`,
|
||||
`name="potato" isFile=true opens=0`,
|
||||
}, itemAsString(c))
|
||||
item = c.get("potato")
|
||||
assert.WithinDuration(t, time.Now(), item.atime, time.Second)
|
||||
assert.Equal(t, 0, item.opens)
|
||||
|
||||
// try purging with file closed
|
||||
c.purgeOld(10 * time.Second)
|
||||
// ...nothing should happend
|
||||
_, err = os.Stat(p)
|
||||
assert.NoError(t, err)
|
||||
|
||||
//.. purge again with -ve age
|
||||
c.purgeOld(-10 * time.Second)
|
||||
_, err = os.Stat(p)
|
||||
assert.True(t, os.IsNotExist(err))
|
||||
|
||||
// clean - have tested the internals already
|
||||
c.clean()
|
||||
|
||||
// cleanup
|
||||
err = c.cleanUp()
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(c.root)
|
||||
assert.True(t, os.IsNotExist(err))
|
||||
}
|
||||
|
||||
func TestCacheOpens(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
c, err := newCache(ctx, r.Fremote, &DefaultOpt)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, []string(nil), itemAsString(c))
|
||||
c.open("potato")
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=1`,
|
||||
`name="potato" isFile=true opens=1`,
|
||||
}, itemAsString(c))
|
||||
c.open("potato")
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=2`,
|
||||
`name="potato" isFile=true opens=2`,
|
||||
}, itemAsString(c))
|
||||
c.close("potato")
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=1`,
|
||||
`name="potato" isFile=true opens=1`,
|
||||
}, itemAsString(c))
|
||||
c.close("potato")
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=0`,
|
||||
`name="potato" isFile=true opens=0`,
|
||||
}, itemAsString(c))
|
||||
|
||||
c.open("potato")
|
||||
c.open("a//b/c/d/one")
|
||||
c.open("a/b/c/d/e/two")
|
||||
c.open("a/b/c/d/e/f/three")
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=4`,
|
||||
`name="a" isFile=false opens=3`,
|
||||
`name="a/b" isFile=false opens=3`,
|
||||
`name="a/b/c" isFile=false opens=3`,
|
||||
`name="a/b/c/d" isFile=false opens=3`,
|
||||
`name="a/b/c/d/e" isFile=false opens=2`,
|
||||
`name="a/b/c/d/e/f" isFile=false opens=1`,
|
||||
`name="a/b/c/d/e/f/three" isFile=true opens=1`,
|
||||
`name="a/b/c/d/e/two" isFile=true opens=1`,
|
||||
`name="a/b/c/d/one" isFile=true opens=1`,
|
||||
`name="potato" isFile=true opens=1`,
|
||||
}, itemAsString(c))
|
||||
c.close("potato")
|
||||
c.close("a/b/c/d/one")
|
||||
c.close("a/b/c/d/e/two")
|
||||
c.close("a/b/c//d/e/f/three")
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=0`,
|
||||
`name="a" isFile=false opens=0`,
|
||||
`name="a/b" isFile=false opens=0`,
|
||||
`name="a/b/c" isFile=false opens=0`,
|
||||
`name="a/b/c/d" isFile=false opens=0`,
|
||||
`name="a/b/c/d/e" isFile=false opens=0`,
|
||||
`name="a/b/c/d/e/f" isFile=false opens=0`,
|
||||
`name="a/b/c/d/e/f/three" isFile=true opens=0`,
|
||||
`name="a/b/c/d/e/two" isFile=true opens=0`,
|
||||
`name="a/b/c/d/one" isFile=true opens=0`,
|
||||
`name="potato" isFile=true opens=0`,
|
||||
}, itemAsString(c))
|
||||
}
|
||||
|
||||
// test the open, mkdir, purge, close, purge sequence
|
||||
func TestCacheOpenMkdir(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Disable the cache cleaner as it interferes with these tests
|
||||
opt := DefaultOpt
|
||||
opt.CachePollInterval = 0
|
||||
c, err := newCache(ctx, r.Fremote, &opt)
|
||||
require.NoError(t, err)
|
||||
|
||||
// open
|
||||
c.open("sub/potato")
|
||||
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=1`,
|
||||
`name="sub" isFile=false opens=1`,
|
||||
`name="sub/potato" isFile=true opens=1`,
|
||||
}, itemAsString(c))
|
||||
|
||||
// mkdir
|
||||
p, err := c.mkdir("sub/potato")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "potato", filepath.Base(p))
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=1`,
|
||||
`name="sub" isFile=false opens=1`,
|
||||
`name="sub/potato" isFile=true opens=1`,
|
||||
}, itemAsString(c))
|
||||
|
||||
// test directory exists
|
||||
fi, err := os.Stat(filepath.Dir(p))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fi.IsDir())
|
||||
|
||||
// clean the cache
|
||||
c.purgeOld(-10 * time.Second)
|
||||
|
||||
// test directory still exists
|
||||
fi, err = os.Stat(filepath.Dir(p))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, fi.IsDir())
|
||||
|
||||
// close
|
||||
c.close("sub/potato")
|
||||
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=0`,
|
||||
`name="sub" isFile=false opens=0`,
|
||||
`name="sub/potato" isFile=true opens=0`,
|
||||
}, itemAsString(c))
|
||||
|
||||
// clean the cache
|
||||
c.purgeOld(-10 * time.Second)
|
||||
|
||||
assert.Equal(t, []string(nil), itemAsString(c))
|
||||
|
||||
// test directory does not exist
|
||||
fi, err = os.Stat(filepath.Dir(p))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
}
|
||||
|
||||
func TestCacheCacheDir(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
c, err := newCache(ctx, r.Fremote, &DefaultOpt)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, []string(nil), itemAsString(c))
|
||||
|
||||
c.cacheDir("dir")
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=0`,
|
||||
`name="dir" isFile=false opens=0`,
|
||||
}, itemAsString(c))
|
||||
|
||||
c.cacheDir("dir/sub")
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=0`,
|
||||
`name="dir" isFile=false opens=0`,
|
||||
`name="dir/sub" isFile=false opens=0`,
|
||||
}, itemAsString(c))
|
||||
|
||||
c.cacheDir("dir/sub2/subsub2")
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=0`,
|
||||
`name="dir" isFile=false opens=0`,
|
||||
`name="dir/sub" isFile=false opens=0`,
|
||||
`name="dir/sub2" isFile=false opens=0`,
|
||||
`name="dir/sub2/subsub2" isFile=false opens=0`,
|
||||
}, itemAsString(c))
|
||||
}
|
||||
|
||||
func TestCachePurgeOld(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
c, err := newCache(ctx, r.Fremote, &DefaultOpt)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test funcs
|
||||
var removed []string
|
||||
removedDir := true
|
||||
removeFile := func(name string) {
|
||||
removed = append(removed, name)
|
||||
}
|
||||
removeDir := func(name string) bool {
|
||||
if removedDir {
|
||||
removed = append(removed, name+"/")
|
||||
}
|
||||
return removedDir
|
||||
}
|
||||
|
||||
removed = nil
|
||||
c._purgeOld(-10*time.Second, removeFile, removeDir)
|
||||
assert.Equal(t, []string(nil), removed)
|
||||
|
||||
c.open("sub/dir2/potato2")
|
||||
c.open("sub/dir/potato")
|
||||
c.close("sub/dir2/potato2")
|
||||
c.open("sub/dir/potato")
|
||||
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=2`,
|
||||
`name="sub" isFile=false opens=2`,
|
||||
`name="sub/dir" isFile=false opens=2`,
|
||||
`name="sub/dir/potato" isFile=true opens=2`,
|
||||
`name="sub/dir2" isFile=false opens=0`,
|
||||
`name="sub/dir2/potato2" isFile=true opens=0`,
|
||||
}, itemAsString(c))
|
||||
|
||||
removed = nil
|
||||
removedDir = true
|
||||
c._purgeOld(-10*time.Second, removeFile, removeDir)
|
||||
assert.Equal(t, []string{
|
||||
"sub/dir2/potato2",
|
||||
"sub/dir2/",
|
||||
}, removed)
|
||||
|
||||
c.close("sub/dir/potato")
|
||||
|
||||
removed = nil
|
||||
removedDir = true
|
||||
c._purgeOld(-10*time.Second, removeFile, removeDir)
|
||||
assert.Equal(t, []string(nil), removed)
|
||||
|
||||
c.close("sub/dir/potato")
|
||||
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=0`,
|
||||
`name="sub" isFile=false opens=0`,
|
||||
`name="sub/dir" isFile=false opens=0`,
|
||||
`name="sub/dir/potato" isFile=true opens=0`,
|
||||
}, itemAsString(c))
|
||||
|
||||
removed = nil
|
||||
removedDir = false
|
||||
c._purgeOld(10*time.Second, removeFile, removeDir)
|
||||
assert.Equal(t, []string(nil), removed)
|
||||
|
||||
assert.Equal(t, []string{
|
||||
`name="" isFile=false opens=0`,
|
||||
`name="sub" isFile=false opens=0`,
|
||||
`name="sub/dir" isFile=false opens=0`,
|
||||
`name="sub/dir/potato" isFile=true opens=0`,
|
||||
}, itemAsString(c))
|
||||
|
||||
removed = nil
|
||||
removedDir = true
|
||||
c._purgeOld(-10*time.Second, removeFile, removeDir)
|
||||
assert.Equal(t, []string{
|
||||
"sub/dir/potato",
|
||||
"sub/dir/",
|
||||
"sub/",
|
||||
"/",
|
||||
}, removed)
|
||||
|
||||
assert.Equal(t, []string(nil), itemAsString(c))
|
||||
}
|
||||
576
.rclone_repo/vfs/dir.go
Executable file
576
.rclone_repo/vfs/dir.go
Executable file
@@ -0,0 +1,576 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/list"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Dir represents a directory entry
|
||||
type Dir struct {
|
||||
vfs *VFS
|
||||
inode uint64 // inode number
|
||||
f fs.Fs
|
||||
parent *Dir // parent, nil for root
|
||||
path string
|
||||
modTime time.Time
|
||||
entry fs.Directory
|
||||
mu sync.Mutex // protects the following
|
||||
read time.Time // time directory entry last read
|
||||
items map[string]Node // directory entries - can be empty but not nil
|
||||
}
|
||||
|
||||
func newDir(vfs *VFS, f fs.Fs, parent *Dir, fsDir fs.Directory) *Dir {
|
||||
return &Dir{
|
||||
vfs: vfs,
|
||||
f: f,
|
||||
parent: parent,
|
||||
entry: fsDir,
|
||||
path: fsDir.Remote(),
|
||||
modTime: fsDir.ModTime(),
|
||||
inode: newInode(),
|
||||
items: make(map[string]Node),
|
||||
}
|
||||
}
|
||||
|
||||
// String converts it to printablee
|
||||
func (d *Dir) String() string {
|
||||
if d == nil {
|
||||
return "<nil *Dir>"
|
||||
}
|
||||
return d.path + "/"
|
||||
}
|
||||
|
||||
// IsFile returns false for Dir - satisfies Node interface
|
||||
func (d *Dir) IsFile() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsDir returns true for Dir - satisfies Node interface
|
||||
func (d *Dir) IsDir() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Mode bits of the directory - satisfies Node interface
|
||||
func (d *Dir) Mode() (mode os.FileMode) {
|
||||
return d.vfs.Opt.DirPerms
|
||||
}
|
||||
|
||||
// Name (base) of the directory - satisfies Node interface
|
||||
func (d *Dir) Name() (name string) {
|
||||
name = path.Base(d.path)
|
||||
if name == "." {
|
||||
name = "/"
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// Path of the directory - satisfies Node interface
|
||||
func (d *Dir) Path() (name string) {
|
||||
return d.path
|
||||
}
|
||||
|
||||
// Sys returns underlying data source (can be nil) - satisfies Node interface
|
||||
func (d *Dir) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Inode returns the inode number - satisfies Node interface
|
||||
func (d *Dir) Inode() uint64 {
|
||||
return d.inode
|
||||
}
|
||||
|
||||
// Node returns the Node assocuated with this - satisfies Noder interface
|
||||
func (d *Dir) Node() Node {
|
||||
return d
|
||||
}
|
||||
|
||||
// ForgetAll ensures the directory and all its children are purged
|
||||
// from the cache.
|
||||
func (d *Dir) ForgetAll() {
|
||||
d.ForgetPath("", fs.EntryDirectory)
|
||||
}
|
||||
|
||||
// ForgetPath clears the cache for itself and all subdirectories if
|
||||
// they match the given path. The path is specified relative from the
|
||||
// directory it is called from.
|
||||
// It is not possible to traverse the directory tree upwards, i.e.
|
||||
// you cannot clear the cache for the Dir's ancestors or siblings.
|
||||
func (d *Dir) ForgetPath(relativePath string, entryType fs.EntryType) {
|
||||
// if we are requested to forget a file, we use its parent
|
||||
absPath := path.Join(d.path, relativePath)
|
||||
if entryType != fs.EntryDirectory {
|
||||
absPath = path.Dir(absPath)
|
||||
}
|
||||
if absPath == "." || absPath == "/" {
|
||||
absPath = ""
|
||||
}
|
||||
|
||||
d.walk(absPath, func(dir *Dir) {
|
||||
fs.Debugf(dir.path, "forgetting directory cache")
|
||||
dir.read = time.Time{}
|
||||
dir.items = make(map[string]Node)
|
||||
})
|
||||
}
|
||||
|
||||
// walk runs a function on all cached directories whose path matches
|
||||
// the given absolute one. It will be called on a directory's children
|
||||
// first. It will not apply the function to parent nodes, regardless
|
||||
// of the given path.
|
||||
func (d *Dir) walk(absPath string, fun func(*Dir)) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
for _, node := range d.items {
|
||||
if dir, ok := node.(*Dir); ok {
|
||||
dir.walk(absPath, fun)
|
||||
}
|
||||
}
|
||||
|
||||
if d.path == absPath || absPath == "" || strings.HasPrefix(d.path, absPath+"/") {
|
||||
fun(d)
|
||||
}
|
||||
}
|
||||
|
||||
// rename should be called after the directory is renamed
|
||||
//
|
||||
// Reset the directory to new state, discarding all the objects and
|
||||
// reading everything again
|
||||
func (d *Dir) rename(newParent *Dir, fsDir fs.Directory) {
|
||||
d.ForgetAll()
|
||||
d.parent = newParent
|
||||
d.entry = fsDir
|
||||
d.path = fsDir.Remote()
|
||||
d.modTime = fsDir.ModTime()
|
||||
d.read = time.Time{}
|
||||
}
|
||||
|
||||
// addObject adds a new object or directory to the directory
|
||||
//
|
||||
// note that we add new objects rather than updating old ones
|
||||
func (d *Dir) addObject(node Node) {
|
||||
d.mu.Lock()
|
||||
d.items[node.Name()] = node
|
||||
d.mu.Unlock()
|
||||
}
|
||||
|
||||
// delObject removes an object from the directory
|
||||
func (d *Dir) delObject(leaf string) {
|
||||
d.mu.Lock()
|
||||
delete(d.items, leaf)
|
||||
d.mu.Unlock()
|
||||
}
|
||||
|
||||
// read the directory and sets d.items - must be called with the lock held
|
||||
func (d *Dir) _readDir() error {
|
||||
when := time.Now()
|
||||
if d.read.IsZero() {
|
||||
// fs.Debugf(d.path, "Reading directory")
|
||||
} else {
|
||||
age := when.Sub(d.read)
|
||||
if age < d.vfs.Opt.DirCacheTime {
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(d.path, "Re-reading directory (%v old)", age)
|
||||
}
|
||||
entries, err := list.DirSorted(d.f, false, d.path)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// We treat directory not found as empty because we
|
||||
// create directories on the fly
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = d._readDirFromEntries(entries, nil, time.Time{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.read = when
|
||||
return nil
|
||||
}
|
||||
|
||||
// update d.items for each dir in the DirTree below this one and
|
||||
// set the last read time - must be called with the lock held
|
||||
func (d *Dir) _readDirFromDirTree(dirTree walk.DirTree, when time.Time) error {
|
||||
return d._readDirFromEntries(dirTree[d.path], dirTree, when)
|
||||
}
|
||||
|
||||
// update d.items and if dirTree is not nil update each dir in the DirTree below this one and
|
||||
// set the last read time - must be called with the lock held
|
||||
func (d *Dir) _readDirFromEntries(entries fs.DirEntries, dirTree walk.DirTree, when time.Time) error {
|
||||
var err error
|
||||
// Cache the items by name
|
||||
found := make(map[string]struct{})
|
||||
for _, entry := range entries {
|
||||
name := path.Base(entry.Remote())
|
||||
if name == "." || name == ".." {
|
||||
continue
|
||||
}
|
||||
node := d.items[name]
|
||||
found[name] = struct{}{}
|
||||
switch item := entry.(type) {
|
||||
case fs.Object:
|
||||
obj := item
|
||||
// Reuse old file value if it exists
|
||||
if file, ok := node.(*File); node != nil && ok {
|
||||
file.setObjectNoUpdate(obj)
|
||||
} else {
|
||||
node = newFile(d, obj, name)
|
||||
}
|
||||
case fs.Directory:
|
||||
// Reuse old dir value if it exists
|
||||
if node == nil || !node.IsDir() {
|
||||
node = newDir(d.vfs, d.f, d, item)
|
||||
}
|
||||
if dirTree != nil {
|
||||
dir := node.(*Dir)
|
||||
dir.mu.Lock()
|
||||
err = dir._readDirFromDirTree(dirTree, when)
|
||||
if err != nil {
|
||||
dir.read = time.Time{}
|
||||
} else {
|
||||
dir.read = when
|
||||
}
|
||||
dir.mu.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = errors.Errorf("unknown type %T", item)
|
||||
fs.Errorf(d, "readDir error: %v", err)
|
||||
return err
|
||||
}
|
||||
d.items[name] = node
|
||||
}
|
||||
// delete unused entries
|
||||
for name := range d.items {
|
||||
if _, ok := found[name]; !ok {
|
||||
delete(d.items, name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readDirTree forces a refresh of the complete directory tree
|
||||
func (d *Dir) readDirTree() error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
when := time.Now()
|
||||
d.read = time.Time{}
|
||||
fs.Debugf(d.path, "Reading directory tree")
|
||||
dt, err := walk.NewDirTree(d.f, d.path, false, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = d._readDirFromDirTree(dt, when)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(d.path, "Reading directory tree done in %s", time.Since(when))
|
||||
d.read = when
|
||||
return nil
|
||||
}
|
||||
|
||||
// readDir forces a refresh of the directory
|
||||
func (d *Dir) readDir() error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.read = time.Time{}
|
||||
return d._readDir()
|
||||
}
|
||||
|
||||
// stat a single item in the directory
|
||||
//
|
||||
// returns ENOENT if not found.
|
||||
func (d *Dir) stat(leaf string) (Node, error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
err := d._readDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item, ok := d.items[leaf]
|
||||
if !ok {
|
||||
return nil, ENOENT
|
||||
}
|
||||
return item, nil
|
||||
}
|
||||
|
||||
// Check to see if a directory is empty
|
||||
func (d *Dir) isEmpty() (bool, error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
err := d._readDir()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(d.items) == 0, nil
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the directory
|
||||
func (d *Dir) ModTime() time.Time {
|
||||
// fs.Debugf(d.path, "Dir.ModTime %v", d.modTime)
|
||||
return d.modTime
|
||||
}
|
||||
|
||||
// Size of the directory
|
||||
func (d *Dir) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// SetModTime sets the modTime for this dir
|
||||
func (d *Dir) SetModTime(modTime time.Time) error {
|
||||
if d.vfs.Opt.ReadOnly {
|
||||
return EROFS
|
||||
}
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.modTime = modTime
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat looks up a specific entry in the receiver.
|
||||
//
|
||||
// Stat should return a Node corresponding to the entry. If the
|
||||
// name does not exist in the directory, Stat should return ENOENT.
|
||||
//
|
||||
// Stat need not to handle the names "." and "..".
|
||||
func (d *Dir) Stat(name string) (node Node, err error) {
|
||||
// fs.Debugf(path, "Dir.Stat")
|
||||
node, err = d.stat(name)
|
||||
if err != nil {
|
||||
if err != ENOENT {
|
||||
fs.Errorf(d, "Dir.Stat error: %v", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// fs.Debugf(path, "Dir.Stat OK")
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// ReadDirAll reads the contents of the directory sorted
|
||||
func (d *Dir) ReadDirAll() (items Nodes, err error) {
|
||||
// fs.Debugf(d.path, "Dir.ReadDirAll")
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
err = d._readDir()
|
||||
if err != nil {
|
||||
fs.Debugf(d.path, "Dir.ReadDirAll error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
for _, item := range d.items {
|
||||
items = append(items, item)
|
||||
}
|
||||
sort.Sort(items)
|
||||
// fs.Debugf(d.path, "Dir.ReadDirAll OK with %d entries", len(items))
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// accessModeMask masks off the read modes from the flags
|
||||
const accessModeMask = (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)
|
||||
|
||||
// Open the directory according to the flags provided
|
||||
func (d *Dir) Open(flags int) (fd Handle, err error) {
|
||||
rdwrMode := flags & accessModeMask
|
||||
if rdwrMode != os.O_RDONLY {
|
||||
fs.Errorf(d, "Can only open directories read only")
|
||||
return nil, EPERM
|
||||
}
|
||||
return newDirHandle(d), nil
|
||||
}
|
||||
|
||||
// Create makes a new file node
|
||||
func (d *Dir) Create(name string, flags int) (*File, error) {
|
||||
// fs.Debugf(path, "Dir.Create")
|
||||
if d.vfs.Opt.ReadOnly {
|
||||
return nil, EROFS
|
||||
}
|
||||
// This gets added to the directory when the file is opened for write
|
||||
return newFile(d, nil, name), nil
|
||||
}
|
||||
|
||||
// Mkdir creates a new directory
|
||||
func (d *Dir) Mkdir(name string) (*Dir, error) {
|
||||
if d.vfs.Opt.ReadOnly {
|
||||
return nil, EROFS
|
||||
}
|
||||
path := path.Join(d.path, name)
|
||||
// fs.Debugf(path, "Dir.Mkdir")
|
||||
err := d.f.Mkdir(path)
|
||||
if err != nil {
|
||||
fs.Errorf(d, "Dir.Mkdir failed to create directory: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
fsDir := fs.NewDir(path, time.Now())
|
||||
dir := newDir(d.vfs, d.f, d, fsDir)
|
||||
d.addObject(dir)
|
||||
// fs.Debugf(path, "Dir.Mkdir OK")
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
// Remove the directory
|
||||
func (d *Dir) Remove() error {
|
||||
if d.vfs.Opt.ReadOnly {
|
||||
return EROFS
|
||||
}
|
||||
// Check directory is empty first
|
||||
empty, err := d.isEmpty()
|
||||
if err != nil {
|
||||
fs.Errorf(d, "Dir.Remove dir error: %v", err)
|
||||
return err
|
||||
}
|
||||
if !empty {
|
||||
fs.Errorf(d, "Dir.Remove not empty")
|
||||
return ENOTEMPTY
|
||||
}
|
||||
// remove directory
|
||||
err = d.f.Rmdir(d.path)
|
||||
if err != nil {
|
||||
fs.Errorf(d, "Dir.Remove failed to remove directory: %v", err)
|
||||
return err
|
||||
}
|
||||
// Remove the item from the parent directory listing
|
||||
if d.parent != nil {
|
||||
d.parent.delObject(d.Name())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveAll removes the directory and any contents recursively
|
||||
func (d *Dir) RemoveAll() error {
|
||||
if d.vfs.Opt.ReadOnly {
|
||||
return EROFS
|
||||
}
|
||||
// Remove contents of the directory
|
||||
nodes, err := d.ReadDirAll()
|
||||
if err != nil {
|
||||
fs.Errorf(d, "Dir.RemoveAll failed to read directory: %v", err)
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
err = node.RemoveAll()
|
||||
if err != nil {
|
||||
fs.Errorf(node.Path(), "Dir.RemoveAll failed to remove: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return d.Remove()
|
||||
}
|
||||
|
||||
// DirEntry returns the underlying fs.DirEntry
|
||||
func (d *Dir) DirEntry() (entry fs.DirEntry) {
|
||||
return d.entry
|
||||
}
|
||||
|
||||
// RemoveName removes the entry with the given name from the receiver,
|
||||
// which must be a directory. The entry to be removed may correspond
|
||||
// to a file (unlink) or to a directory (rmdir).
|
||||
func (d *Dir) RemoveName(name string) error {
|
||||
if d.vfs.Opt.ReadOnly {
|
||||
return EROFS
|
||||
}
|
||||
// fs.Debugf(path, "Dir.Remove")
|
||||
node, err := d.stat(name)
|
||||
if err != nil {
|
||||
fs.Errorf(d, "Dir.Remove error: %v", err)
|
||||
return err
|
||||
}
|
||||
return node.Remove()
|
||||
}
|
||||
|
||||
// Rename the file
|
||||
func (d *Dir) Rename(oldName, newName string, destDir *Dir) error {
|
||||
if d.vfs.Opt.ReadOnly {
|
||||
return EROFS
|
||||
}
|
||||
oldPath := path.Join(d.path, oldName)
|
||||
newPath := path.Join(destDir.path, newName)
|
||||
// fs.Debugf(oldPath, "Dir.Rename to %q", newPath)
|
||||
oldNode, err := d.stat(oldName)
|
||||
if err != nil {
|
||||
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
|
||||
return err
|
||||
}
|
||||
switch x := oldNode.DirEntry().(type) {
|
||||
case nil:
|
||||
if oldFile, ok := oldNode.(*File); ok {
|
||||
if err = oldFile.rename(destDir, newName); err != nil {
|
||||
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
fs.Errorf(oldPath, "Dir.Rename can't rename open file that is not a vfs.File")
|
||||
return EPERM
|
||||
}
|
||||
case fs.Object:
|
||||
if oldFile, ok := oldNode.(*File); ok {
|
||||
if err = oldFile.rename(destDir, newName); err != nil {
|
||||
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err := errors.Errorf("Fs %q can't rename file that is not a vfs.File", d.f)
|
||||
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
|
||||
return err
|
||||
}
|
||||
case fs.Directory:
|
||||
doDirMove := d.f.Features().DirMove
|
||||
if doDirMove == nil {
|
||||
err := errors.Errorf("Fs %q can't rename directories (no DirMove)", d.f)
|
||||
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
|
||||
return err
|
||||
}
|
||||
srcRemote := x.Remote()
|
||||
dstRemote := newPath
|
||||
err = doDirMove(d.f, srcRemote, dstRemote)
|
||||
if err != nil {
|
||||
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
|
||||
return err
|
||||
}
|
||||
newDir := fs.NewDirCopy(x).SetRemote(newPath)
|
||||
// Update the node with the new details
|
||||
if oldNode != nil {
|
||||
if oldDir, ok := oldNode.(*Dir); ok {
|
||||
fs.Debugf(x, "Updating dir with %v %p", newDir, oldDir)
|
||||
oldDir.rename(destDir, newDir)
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = errors.Errorf("unknown type %T", oldNode)
|
||||
fs.Errorf(d.path, "Dir.ReadDirAll error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Show moved - delete from old dir and add to new
|
||||
d.delObject(oldName)
|
||||
destDir.addObject(oldNode)
|
||||
|
||||
// fs.Debugf(newPath, "Dir.Rename renamed from %q", oldPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync the directory
|
||||
//
|
||||
// Note that we don't do anything except return OK
|
||||
func (d *Dir) Sync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// VFS returns the instance of the VFS
|
||||
func (d *Dir) VFS() *VFS {
|
||||
return d.vfs
|
||||
}
|
||||
|
||||
// Truncate changes the size of the named file.
|
||||
func (d *Dir) Truncate(size int64) error {
|
||||
return ENOSYS
|
||||
}
|
||||
107
.rclone_repo/vfs/dir_handle.go
Executable file
107
.rclone_repo/vfs/dir_handle.go
Executable file
@@ -0,0 +1,107 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// DirHandle represents an open directory
|
||||
type DirHandle struct {
|
||||
baseHandle
|
||||
d *Dir
|
||||
fis []os.FileInfo // where Readdir got to
|
||||
}
|
||||
|
||||
// newDirHandle opens a directory for read
|
||||
func newDirHandle(d *Dir) *DirHandle {
|
||||
return &DirHandle{
|
||||
d: d,
|
||||
}
|
||||
}
|
||||
|
||||
// String converts it to printable
|
||||
func (fh *DirHandle) String() string {
|
||||
if fh == nil {
|
||||
return "<nil *DirHandle>"
|
||||
}
|
||||
if fh.d == nil {
|
||||
return "<nil *DirHandle.d>"
|
||||
}
|
||||
return fh.d.String() + " (r)"
|
||||
}
|
||||
|
||||
// Stat returns info about the current directory
|
||||
func (fh *DirHandle) Stat() (fi os.FileInfo, err error) {
|
||||
return fh.d, nil
|
||||
}
|
||||
|
||||
// Node returns the Node assocuated with this - satisfies Noder interface
|
||||
func (fh *DirHandle) Node() Node {
|
||||
return fh.d
|
||||
}
|
||||
|
||||
// Readdir reads the contents of the directory associated with file and returns
|
||||
// a slice of up to n FileInfo values, as would be returned by Lstat, in
|
||||
// directory order. Subsequent calls on the same file will yield further
|
||||
// FileInfos.
|
||||
//
|
||||
// If n > 0, Readdir returns at most n FileInfo structures. In this case, if
|
||||
// Readdir returns an empty slice, it will return a non-nil error explaining
|
||||
// why. At the end of a directory, the error is io.EOF.
|
||||
//
|
||||
// If n <= 0, Readdir returns all the FileInfo from the directory in a single
|
||||
// slice. In this case, if Readdir succeeds (reads all the way to the end of
|
||||
// the directory), it returns the slice and a nil error. If it encounters an
|
||||
// error before the end of the directory, Readdir returns the FileInfo read
|
||||
// until that point and a non-nil error.
|
||||
func (fh *DirHandle) Readdir(n int) (fis []os.FileInfo, err error) {
|
||||
if fh.fis == nil {
|
||||
nodes, err := fh.d.ReadDirAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fh.fis = []os.FileInfo{}
|
||||
for _, node := range nodes {
|
||||
fh.fis = append(fh.fis, node)
|
||||
}
|
||||
}
|
||||
nn := len(fh.fis)
|
||||
if n > 0 {
|
||||
if nn == 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
if nn > n {
|
||||
nn = n
|
||||
}
|
||||
}
|
||||
fis, fh.fis = fh.fis[:nn], fh.fis[nn:]
|
||||
return fis, nil
|
||||
}
|
||||
|
||||
// Readdirnames reads and returns a slice of names from the directory f.
|
||||
//
|
||||
// If n > 0, Readdirnames returns at most n names. In this case, if
|
||||
// Readdirnames returns an empty slice, it will return a non-nil error
|
||||
// explaining why. At the end of a directory, the error is io.EOF.
|
||||
//
|
||||
// If n <= 0, Readdirnames returns all the names from the directory in a single
|
||||
// slice. In this case, if Readdirnames succeeds (reads all the way to the end
|
||||
// of the directory), it returns the slice and a nil error. If it encounters an
|
||||
// error before the end of the directory, Readdirnames returns the names read
|
||||
// until that point and a non-nil error.
|
||||
func (fh *DirHandle) Readdirnames(n int) (names []string, err error) {
|
||||
nodes, err := fh.Readdir(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
names = append(names, node.Name())
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// Close closes the handle
|
||||
func (fh *DirHandle) Close() (err error) {
|
||||
fh.fis = nil
|
||||
return nil
|
||||
}
|
||||
112
.rclone_repo/vfs/dir_handle_test.go
Executable file
112
.rclone_repo/vfs/dir_handle_test.go
Executable file
@@ -0,0 +1,112 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDirHandleMethods(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
_, dir, _ := dirCreate(t, r)
|
||||
|
||||
h, err := dir.Open(os.O_RDONLY)
|
||||
require.NoError(t, err)
|
||||
fh, ok := h.(*DirHandle)
|
||||
assert.True(t, ok)
|
||||
|
||||
// String
|
||||
assert.Equal(t, "dir/ (r)", fh.String())
|
||||
assert.Equal(t, "<nil *DirHandle>", (*DirHandle)(nil).String())
|
||||
assert.Equal(t, "<nil *DirHandle.d>", newDirHandle(nil).String())
|
||||
|
||||
// Stat
|
||||
fi, err := fh.Stat()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, dir, fi)
|
||||
|
||||
// Node
|
||||
assert.Equal(t, dir, fh.Node())
|
||||
|
||||
// Close
|
||||
require.NoError(t, h.Close())
|
||||
assert.Equal(t, []os.FileInfo(nil), fh.fis)
|
||||
}
|
||||
|
||||
func TestDirHandleReaddir(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs := New(r.Fremote, nil)
|
||||
|
||||
file1 := r.WriteObject("dir/file1", "file1 contents", t1)
|
||||
file2 := r.WriteObject("dir/file2", "file2- contents", t2)
|
||||
file3 := r.WriteObject("dir/subdir/file3", "file3-- contents", t3)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2, file3)
|
||||
|
||||
node, err := vfs.Stat("dir")
|
||||
require.NoError(t, err)
|
||||
dir := node.(*Dir)
|
||||
|
||||
// Read in one chunk
|
||||
fh, err := dir.Open(os.O_RDONLY)
|
||||
require.NoError(t, err)
|
||||
|
||||
fis, err := fh.Readdir(-1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(fis))
|
||||
assert.Equal(t, "file1", fis[0].Name())
|
||||
assert.Equal(t, "file2", fis[1].Name())
|
||||
assert.Equal(t, "subdir", fis[2].Name())
|
||||
assert.False(t, fis[0].IsDir())
|
||||
assert.False(t, fis[1].IsDir())
|
||||
assert.True(t, fis[2].IsDir())
|
||||
|
||||
require.NoError(t, fh.Close())
|
||||
|
||||
// Read in multiple chunks
|
||||
fh, err = dir.Open(os.O_RDONLY)
|
||||
require.NoError(t, err)
|
||||
|
||||
fis, err = fh.Readdir(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(fis))
|
||||
assert.Equal(t, "file1", fis[0].Name())
|
||||
assert.Equal(t, "file2", fis[1].Name())
|
||||
assert.False(t, fis[0].IsDir())
|
||||
assert.False(t, fis[1].IsDir())
|
||||
|
||||
fis, err = fh.Readdir(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(fis))
|
||||
assert.Equal(t, "subdir", fis[0].Name())
|
||||
assert.True(t, fis[0].IsDir())
|
||||
|
||||
fis, err = fh.Readdir(2)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
require.Equal(t, 0, len(fis))
|
||||
|
||||
require.NoError(t, fh.Close())
|
||||
|
||||
}
|
||||
|
||||
func TestDirHandleReaddirnames(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
_, dir, _ := dirCreate(t, r)
|
||||
|
||||
fh, err := dir.Open(os.O_RDONLY)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Smoke test only since heavy lifting done in Readdir
|
||||
fis, err := fh.Readdirnames(-1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(fis))
|
||||
assert.Equal(t, "file1", fis[0])
|
||||
|
||||
require.NoError(t, fh.Close())
|
||||
}
|
||||
462
.rclone_repo/vfs/dir_test.go
Executable file
462
.rclone_repo/vfs/dir_test.go
Executable file
@@ -0,0 +1,462 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func dirCreate(t *testing.T, r *fstest.Run) (*VFS, *Dir, fstest.Item) {
|
||||
vfs := New(r.Fremote, nil)
|
||||
|
||||
file1 := r.WriteObject("dir/file1", "file1 contents", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
node, err := vfs.Stat("dir")
|
||||
require.NoError(t, err)
|
||||
require.True(t, node.IsDir())
|
||||
|
||||
return vfs, node.(*Dir), file1
|
||||
}
|
||||
|
||||
func TestDirMethods(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, dir, _ := dirCreate(t, r)
|
||||
|
||||
// String
|
||||
assert.Equal(t, "dir/", dir.String())
|
||||
assert.Equal(t, "<nil *Dir>", (*Dir)(nil).String())
|
||||
|
||||
// IsDir
|
||||
assert.Equal(t, true, dir.IsDir())
|
||||
|
||||
// IsFile
|
||||
assert.Equal(t, false, dir.IsFile())
|
||||
|
||||
// Mode
|
||||
assert.Equal(t, vfs.Opt.DirPerms, dir.Mode())
|
||||
|
||||
// Name
|
||||
assert.Equal(t, "dir", dir.Name())
|
||||
|
||||
// Path
|
||||
assert.Equal(t, "dir", dir.Path())
|
||||
|
||||
// Sys
|
||||
assert.Equal(t, nil, dir.Sys())
|
||||
|
||||
// Inode
|
||||
assert.NotEqual(t, uint64(0), dir.Inode())
|
||||
|
||||
// Node
|
||||
assert.Equal(t, dir, dir.Node())
|
||||
|
||||
// ModTime
|
||||
assert.WithinDuration(t, t1, dir.ModTime(), 100*365*24*60*60*time.Second)
|
||||
|
||||
// Size
|
||||
assert.Equal(t, int64(0), dir.Size())
|
||||
|
||||
// Sync
|
||||
assert.NoError(t, dir.Sync())
|
||||
|
||||
// DirEntry
|
||||
assert.Equal(t, dir.entry, dir.DirEntry())
|
||||
|
||||
// VFS
|
||||
assert.Equal(t, vfs, dir.VFS())
|
||||
}
|
||||
|
||||
func TestDirForgetAll(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, dir, file1 := dirCreate(t, r)
|
||||
|
||||
// Make sure / and dir are in cache
|
||||
_, err := vfs.Stat(file1.Path)
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(root.items))
|
||||
assert.Equal(t, 1, len(dir.items))
|
||||
|
||||
dir.ForgetAll()
|
||||
assert.Equal(t, 1, len(root.items))
|
||||
assert.Equal(t, 0, len(dir.items))
|
||||
|
||||
root.ForgetAll()
|
||||
assert.Equal(t, 0, len(root.items))
|
||||
assert.Equal(t, 0, len(dir.items))
|
||||
}
|
||||
|
||||
func TestDirForgetPath(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, dir, file1 := dirCreate(t, r)
|
||||
|
||||
// Make sure / and dir are in cache
|
||||
_, err := vfs.Stat(file1.Path)
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(root.items))
|
||||
assert.Equal(t, 1, len(dir.items))
|
||||
|
||||
root.ForgetPath("dir", fs.EntryDirectory)
|
||||
assert.Equal(t, 1, len(root.items))
|
||||
assert.Equal(t, 0, len(dir.items))
|
||||
|
||||
root.ForgetPath("not/in/cache", fs.EntryDirectory)
|
||||
assert.Equal(t, 1, len(root.items))
|
||||
assert.Equal(t, 0, len(dir.items))
|
||||
}
|
||||
|
||||
func TestDirWalk(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, _, file1 := dirCreate(t, r)
|
||||
|
||||
file2 := r.WriteObject("fil/a/b/c", "super long file", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Forget the cache since we put another object in
|
||||
root.ForgetAll()
|
||||
|
||||
// Read the directories in
|
||||
_, err = vfs.Stat("dir")
|
||||
require.NoError(t, err)
|
||||
_, err = vfs.Stat("fil/a/b")
|
||||
require.NoError(t, err)
|
||||
fil, err := vfs.Stat("fil")
|
||||
require.NoError(t, err)
|
||||
|
||||
var result []string
|
||||
fn := func(d *Dir) {
|
||||
result = append(result, d.path)
|
||||
}
|
||||
|
||||
result = nil
|
||||
root.walk("", fn)
|
||||
sort.Strings(result) // sort as there is a map traversal involved
|
||||
assert.Equal(t, []string{"", "dir", "fil", "fil/a", "fil/a/b"}, result)
|
||||
|
||||
result = nil
|
||||
root.walk("dir", fn)
|
||||
assert.Equal(t, []string{"dir"}, result)
|
||||
|
||||
result = nil
|
||||
root.walk("not found", fn)
|
||||
assert.Equal(t, []string(nil), result)
|
||||
|
||||
result = nil
|
||||
root.walk("fil", fn)
|
||||
assert.Equal(t, []string{"fil/a/b", "fil/a", "fil"}, result)
|
||||
|
||||
result = nil
|
||||
fil.(*Dir).walk("fil", fn)
|
||||
assert.Equal(t, []string{"fil/a/b", "fil/a", "fil"}, result)
|
||||
|
||||
result = nil
|
||||
root.walk("fil/a", fn)
|
||||
assert.Equal(t, []string{"fil/a/b", "fil/a"}, result)
|
||||
|
||||
result = nil
|
||||
fil.(*Dir).walk("fil/a", fn)
|
||||
assert.Equal(t, []string{"fil/a/b", "fil/a"}, result)
|
||||
|
||||
result = nil
|
||||
root.walk("fil/a", fn)
|
||||
assert.Equal(t, []string{"fil/a/b", "fil/a"}, result)
|
||||
|
||||
result = nil
|
||||
root.walk("fil/a/b", fn)
|
||||
assert.Equal(t, []string{"fil/a/b"}, result)
|
||||
}
|
||||
|
||||
func TestDirSetModTime(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, dir, _ := dirCreate(t, r)
|
||||
|
||||
err := dir.SetModTime(t1)
|
||||
require.NoError(t, err)
|
||||
assert.WithinDuration(t, t1, dir.ModTime(), time.Second)
|
||||
|
||||
err = dir.SetModTime(t2)
|
||||
require.NoError(t, err)
|
||||
assert.WithinDuration(t, t2, dir.ModTime(), time.Second)
|
||||
|
||||
vfs.Opt.ReadOnly = true
|
||||
err = dir.SetModTime(t2)
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
|
||||
func TestDirStat(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
_, dir, _ := dirCreate(t, r)
|
||||
|
||||
node, err := dir.Stat("file1")
|
||||
require.NoError(t, err)
|
||||
_, ok := node.(*File)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, int64(14), node.Size())
|
||||
assert.Equal(t, "file1", node.Name())
|
||||
|
||||
node, err = dir.Stat("not found")
|
||||
assert.Equal(t, ENOENT, err)
|
||||
}
|
||||
|
||||
// This lists dir and checks the listing is as expected
|
||||
func checkListing(t *testing.T, dir *Dir, want []string) {
|
||||
var got []string
|
||||
nodes, err := dir.ReadDirAll()
|
||||
require.NoError(t, err)
|
||||
for _, node := range nodes {
|
||||
got = append(got, fmt.Sprintf("%s,%d,%v", node.Name(), node.Size(), node.IsDir()))
|
||||
}
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
func TestDirReadDirAll(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs := New(r.Fremote, nil)
|
||||
|
||||
file1 := r.WriteObject("dir/file1", "file1 contents", t1)
|
||||
file2 := r.WriteObject("dir/file2", "file2- contents", t2)
|
||||
file3 := r.WriteObject("dir/subdir/file3", "file3-- contents", t3)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2, file3)
|
||||
|
||||
node, err := vfs.Stat("dir")
|
||||
require.NoError(t, err)
|
||||
dir := node.(*Dir)
|
||||
|
||||
checkListing(t, dir, []string{"file1,14,false", "file2,15,false", "subdir,0,true"})
|
||||
|
||||
node, err = vfs.Stat("")
|
||||
require.NoError(t, err)
|
||||
dir = node.(*Dir)
|
||||
|
||||
checkListing(t, dir, []string{"dir,0,true"})
|
||||
|
||||
node, err = vfs.Stat("dir/subdir")
|
||||
require.NoError(t, err)
|
||||
dir = node.(*Dir)
|
||||
|
||||
checkListing(t, dir, []string{"file3,16,false"})
|
||||
}
|
||||
|
||||
func TestDirOpen(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
_, dir, _ := dirCreate(t, r)
|
||||
|
||||
fd, err := dir.Open(os.O_RDONLY)
|
||||
require.NoError(t, err)
|
||||
_, ok := fd.(*DirHandle)
|
||||
assert.True(t, ok)
|
||||
require.NoError(t, fd.Close())
|
||||
|
||||
fd, err = dir.Open(os.O_WRONLY)
|
||||
assert.Equal(t, EPERM, err)
|
||||
}
|
||||
|
||||
func TestDirCreate(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, dir, _ := dirCreate(t, r)
|
||||
|
||||
file, err := dir.Create("potato", os.O_WRONLY|os.O_CREATE)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), file.Size())
|
||||
|
||||
fd, err := file.Open(os.O_WRONLY | os.O_CREATE)
|
||||
require.NoError(t, err)
|
||||
|
||||
// FIXME Note that this fails with the current implementation
|
||||
// until the file has been opened.
|
||||
|
||||
// file2, err := vfs.Stat("dir/potato")
|
||||
// require.NoError(t, err)
|
||||
// assert.Equal(t, file, file2)
|
||||
|
||||
n, err := fd.Write([]byte("hello"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
|
||||
require.NoError(t, fd.Close())
|
||||
|
||||
file2, err := vfs.Stat("dir/potato")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(5), file2.Size())
|
||||
|
||||
vfs.Opt.ReadOnly = true
|
||||
_, err = dir.Create("sausage", os.O_WRONLY|os.O_CREATE)
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
|
||||
func TestDirMkdir(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, dir, file1 := dirCreate(t, r)
|
||||
|
||||
_, err := dir.Mkdir("file1")
|
||||
assert.Error(t, err)
|
||||
|
||||
sub, err := dir.Mkdir("sub")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check the vfs
|
||||
checkListing(t, dir, []string{"file1,14,false", "sub,0,true"})
|
||||
checkListing(t, sub, []string(nil))
|
||||
|
||||
// check the underlying r.Fremote
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"dir", "dir/sub"}, r.Fremote.Precision())
|
||||
|
||||
vfs.Opt.ReadOnly = true
|
||||
_, err = dir.Mkdir("sausage")
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
|
||||
func TestDirRemove(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, dir, _ := dirCreate(t, r)
|
||||
|
||||
// check directory is there
|
||||
node, err := vfs.Stat("dir")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, node.IsDir())
|
||||
|
||||
err = dir.Remove()
|
||||
assert.Equal(t, ENOTEMPTY, err)
|
||||
|
||||
// Delete the sub file
|
||||
node, err = vfs.Stat("dir/file1")
|
||||
require.NoError(t, err)
|
||||
err = node.Remove()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Remove the now empty directory
|
||||
err = dir.Remove()
|
||||
require.NoError(t, err)
|
||||
|
||||
// check directory is not there
|
||||
node, err = vfs.Stat("dir")
|
||||
assert.Equal(t, ENOENT, err)
|
||||
|
||||
// check the vfs
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
checkListing(t, root, []string(nil))
|
||||
|
||||
// check the underlying r.Fremote
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, r.Fremote.Precision())
|
||||
|
||||
// read only check
|
||||
vfs.Opt.ReadOnly = true
|
||||
err = dir.Remove()
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
|
||||
func TestDirRemoveAll(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, dir, _ := dirCreate(t, r)
|
||||
|
||||
// Remove the directory and contents
|
||||
err := dir.RemoveAll()
|
||||
require.NoError(t, err)
|
||||
|
||||
// check the vfs
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
checkListing(t, root, []string(nil))
|
||||
|
||||
// check the underlying r.Fremote
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, r.Fremote.Precision())
|
||||
|
||||
// read only check
|
||||
vfs.Opt.ReadOnly = true
|
||||
err = dir.RemoveAll()
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
|
||||
func TestDirRemoveName(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, dir, _ := dirCreate(t, r)
|
||||
|
||||
err := dir.RemoveName("file1")
|
||||
require.NoError(t, err)
|
||||
checkListing(t, dir, []string(nil))
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
checkListing(t, root, []string{"dir,0,true"})
|
||||
|
||||
// check the underlying r.Fremote
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{"dir"}, r.Fremote.Precision())
|
||||
|
||||
// read only check
|
||||
vfs.Opt.ReadOnly = true
|
||||
err = dir.RemoveName("potato")
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
|
||||
func TestDirRename(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, dir, file1 := dirCreate(t, r)
|
||||
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = dir.Rename("not found", "tuba", dir)
|
||||
assert.Equal(t, ENOENT, err)
|
||||
|
||||
// Rename a directory
|
||||
err = root.Rename("dir", "dir2", root)
|
||||
assert.NoError(t, err)
|
||||
checkListing(t, root, []string{"dir2,0,true"})
|
||||
checkListing(t, dir, []string{"file1,14,false"})
|
||||
|
||||
// check the underlying r.Fremote
|
||||
file1.Path = "dir2/file1"
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"dir2"}, r.Fremote.Precision())
|
||||
|
||||
// refetch dir
|
||||
node, err := vfs.Stat("dir2")
|
||||
assert.NoError(t, err)
|
||||
dir = node.(*Dir)
|
||||
|
||||
// Rename a file
|
||||
err = dir.Rename("file1", "file2", root)
|
||||
assert.NoError(t, err)
|
||||
checkListing(t, root, []string{"dir2,0,true", "file2,14,false"})
|
||||
checkListing(t, dir, []string(nil))
|
||||
|
||||
// check the underlying r.Fremote
|
||||
file1.Path = "file2"
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"dir2"}, r.Fremote.Precision())
|
||||
|
||||
// read only check
|
||||
vfs.Opt.ReadOnly = true
|
||||
err = dir.Rename("potato", "tuba", dir)
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
49
.rclone_repo/vfs/errors.go
Executable file
49
.rclone_repo/vfs/errors.go
Executable file
@@ -0,0 +1,49 @@
|
||||
// Cross platform errors
|
||||
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Error describes low level errors in a cross platform way.
|
||||
type Error byte
|
||||
|
||||
// NB if changing errors translateError in cmd/mount/fs.go, cmd/cmount/fs.go
|
||||
|
||||
// Low level errors
|
||||
const (
|
||||
OK Error = iota
|
||||
ENOTEMPTY
|
||||
ESPIPE
|
||||
EBADF
|
||||
EROFS
|
||||
ENOSYS
|
||||
)
|
||||
|
||||
// Errors which have exact counterparts in os
|
||||
var (
|
||||
ENOENT = os.ErrNotExist
|
||||
EEXIST = os.ErrExist
|
||||
EPERM = os.ErrPermission
|
||||
EINVAL = os.ErrInvalid
|
||||
// ECLOSED see errors_{old,new}.go
|
||||
)
|
||||
|
||||
var errorNames = []string{
|
||||
OK: "Success",
|
||||
ENOTEMPTY: "Directory not empty",
|
||||
ESPIPE: "Illegal seek",
|
||||
EBADF: "Bad file descriptor",
|
||||
EROFS: "Read only file system",
|
||||
ENOSYS: "Function not implemented",
|
||||
}
|
||||
|
||||
// Error renders the error as a string
|
||||
func (e Error) Error() string {
|
||||
if int(e) >= len(errorNames) {
|
||||
return fmt.Sprintf("Low level error %d", e)
|
||||
}
|
||||
return errorNames[e]
|
||||
}
|
||||
10
.rclone_repo/vfs/errors_new.go
Executable file
10
.rclone_repo/vfs/errors_new.go
Executable file
@@ -0,0 +1,10 @@
|
||||
// Errors for go1.8+
|
||||
|
||||
//+build go1.8
|
||||
|
||||
package vfs
|
||||
|
||||
import "os"
|
||||
|
||||
// ECLOSED is returned when a handle is closed twice
|
||||
var ECLOSED = os.ErrClosed
|
||||
10
.rclone_repo/vfs/errors_old.go
Executable file
10
.rclone_repo/vfs/errors_old.go
Executable file
@@ -0,0 +1,10 @@
|
||||
// Errors for pre go1.8
|
||||
|
||||
//+build !go1.8
|
||||
|
||||
package vfs
|
||||
|
||||
import "errors"
|
||||
|
||||
// ECLOSED is returned when a handle is closed twice
|
||||
var ECLOSED = errors.New("file already closed")
|
||||
13
.rclone_repo/vfs/errors_test.go
Executable file
13
.rclone_repo/vfs/errors_test.go
Executable file
@@ -0,0 +1,13 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestErrorError(t *testing.T) {
|
||||
assert.Equal(t, "Success", OK.Error())
|
||||
assert.Equal(t, "Function not implemented", ENOSYS.Error())
|
||||
assert.Equal(t, "Low level error 99", Error(99).Error())
|
||||
}
|
||||
610
.rclone_repo/vfs/file.go
Executable file
610
.rclone_repo/vfs/file.go
Executable file
@@ -0,0 +1,610 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/log"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// File represents a file
|
||||
type File struct {
|
||||
inode uint64 // inode number
|
||||
size int64 // size of file - read and written with atomic int64 - must be 64 bit aligned
|
||||
d *Dir // parent directory - read only
|
||||
|
||||
mu sync.Mutex // protects the following
|
||||
o fs.Object // NB o may be nil if file is being written
|
||||
leaf string // leaf name of the object
|
||||
rwOpenCount int // number of open files on this handle
|
||||
writers []Handle // writers for this file
|
||||
nwriters int32 // len(writers) which is read/updated with atomic
|
||||
readWriters int // how many RWFileHandle are open for writing
|
||||
readWriterClosing bool // is a RWFileHandle currently cosing?
|
||||
modified bool // has the cache file be modified by a RWFileHandle?
|
||||
pendingModTime time.Time // will be applied once o becomes available, i.e. after file was written
|
||||
pendingRenameFun func() error // will be run/renamed after all writers close
|
||||
|
||||
muRW sync.Mutex // synchonize RWFileHandle.openPending(), RWFileHandle.close() and File.Remove
|
||||
}
|
||||
|
||||
// newFile creates a new File
|
||||
func newFile(d *Dir, o fs.Object, leaf string) *File {
|
||||
return &File{
|
||||
d: d,
|
||||
o: o,
|
||||
leaf: leaf,
|
||||
inode: newInode(),
|
||||
}
|
||||
}
|
||||
|
||||
// String converts it to printable
|
||||
func (f *File) String() string {
|
||||
if f == nil {
|
||||
return "<nil *File>"
|
||||
}
|
||||
return f.Path()
|
||||
}
|
||||
|
||||
// IsFile returns true for File - satisfies Node interface
|
||||
func (f *File) IsFile() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsDir returns false for File - satisfies Node interface
|
||||
func (f *File) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Mode bits of the file or directory - satisfies Node interface
|
||||
func (f *File) Mode() (mode os.FileMode) {
|
||||
return f.d.vfs.Opt.FilePerms
|
||||
}
|
||||
|
||||
// Name (base) of the directory - satisfies Node interface
|
||||
func (f *File) Name() (name string) {
|
||||
return f.leaf
|
||||
}
|
||||
|
||||
// Path returns the full path of the file
|
||||
func (f *File) Path() string {
|
||||
return path.Join(f.d.path, f.leaf)
|
||||
}
|
||||
|
||||
// Sys returns underlying data source (can be nil) - satisfies Node interface
|
||||
func (f *File) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Inode returns the inode number - satisfies Node interface
|
||||
func (f *File) Inode() uint64 {
|
||||
return f.inode
|
||||
}
|
||||
|
||||
// Node returns the Node assocuated with this - satisfies Noder interface
|
||||
func (f *File) Node() Node {
|
||||
return f
|
||||
}
|
||||
|
||||
// applyPendingRename runs a previously set rename operation if there are no
|
||||
// more remaining writers. Call without lock held.
|
||||
func (f *File) applyPendingRename() {
|
||||
fun := f.pendingRenameFun
|
||||
if fun == nil || f.writingInProgress() {
|
||||
return
|
||||
}
|
||||
fs.Debugf(f.o, "Running delayed rename now")
|
||||
if err := fun(); err != nil {
|
||||
fs.Errorf(f.Path(), "delayed File.Rename error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// rename attempts to immediately rename a file if there are no open writers.
|
||||
// Otherwise it will queue the rename operation on the remote until no writers
|
||||
// remain.
|
||||
func (f *File) rename(destDir *Dir, newName string) error {
|
||||
// FIXME: could Copy then Delete if Move not available
|
||||
// - though care needed if case insensitive...
|
||||
doMove := f.d.f.Features().Move
|
||||
if doMove == nil {
|
||||
err := errors.Errorf("Fs %q can't rename files (no Move)", f.d.f)
|
||||
fs.Errorf(f.Path(), "Dir.Rename error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
renameCall := func() error {
|
||||
newPath := path.Join(destDir.path, newName)
|
||||
newObject, err := doMove(f.o, newPath)
|
||||
if err != nil {
|
||||
fs.Errorf(f.Path(), "File.Rename error: %v", err)
|
||||
return err
|
||||
}
|
||||
// Update the node with the new details
|
||||
fs.Debugf(f.o, "Updating file with %v %p", newObject, f)
|
||||
// f.rename(destDir, newObject)
|
||||
f.mu.Lock()
|
||||
f.o = newObject
|
||||
f.d = destDir
|
||||
f.leaf = path.Base(newObject.Remote())
|
||||
f.pendingRenameFun = nil
|
||||
f.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
if f.writingInProgress() {
|
||||
fs.Debugf(f.o, "File is currently open, delaying rename %p", f)
|
||||
f.mu.Lock()
|
||||
f.d = destDir
|
||||
f.leaf = newName
|
||||
f.pendingRenameFun = renameCall
|
||||
f.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
return renameCall()
|
||||
}
|
||||
|
||||
// addWriter adds a write handle to the file
|
||||
func (f *File) addWriter(h Handle) {
|
||||
f.mu.Lock()
|
||||
f.writers = append(f.writers, h)
|
||||
atomic.AddInt32(&f.nwriters, 1)
|
||||
if _, ok := h.(*RWFileHandle); ok {
|
||||
f.readWriters++
|
||||
}
|
||||
f.mu.Unlock()
|
||||
}
|
||||
|
||||
// delWriter removes a write handle from the file
|
||||
func (f *File) delWriter(h Handle, modifiedCacheFile bool) (lastWriterAndModified bool) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
var found = -1
|
||||
for i := range f.writers {
|
||||
if f.writers[i] == h {
|
||||
found = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if found >= 0 {
|
||||
f.writers = append(f.writers[:found], f.writers[found+1:]...)
|
||||
atomic.AddInt32(&f.nwriters, -1)
|
||||
} else {
|
||||
fs.Debugf(f.o, "File.delWriter couldn't find handle")
|
||||
}
|
||||
if _, ok := h.(*RWFileHandle); ok {
|
||||
f.readWriters--
|
||||
}
|
||||
f.readWriterClosing = true
|
||||
if modifiedCacheFile {
|
||||
f.modified = true
|
||||
}
|
||||
lastWriterAndModified = len(f.writers) == 0 && f.modified
|
||||
if lastWriterAndModified {
|
||||
f.modified = false
|
||||
}
|
||||
defer f.applyPendingRename()
|
||||
return
|
||||
}
|
||||
|
||||
// addRWOpen should be called by ReadWriteHandle when they have
|
||||
// actually opened the file for read or write.
|
||||
func (f *File) addRWOpen() {
|
||||
f.mu.Lock()
|
||||
f.rwOpenCount++
|
||||
f.mu.Unlock()
|
||||
}
|
||||
|
||||
// delRWOpen should be called by ReadWriteHandle when they have closed
|
||||
// an actually opene file for read or write.
|
||||
func (f *File) delRWOpen() {
|
||||
f.mu.Lock()
|
||||
f.rwOpenCount--
|
||||
f.mu.Unlock()
|
||||
}
|
||||
|
||||
// rwOpens returns how many active open ReadWriteHandles there are.
|
||||
// Note that file handles which are in pending open state aren't
|
||||
// counted.
|
||||
func (f *File) rwOpens() int {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
return f.rwOpenCount
|
||||
}
|
||||
|
||||
// finishWriterClose resets the readWriterClosing flag
|
||||
func (f *File) finishWriterClose() {
|
||||
f.mu.Lock()
|
||||
f.readWriterClosing = false
|
||||
f.mu.Unlock()
|
||||
f.applyPendingRename()
|
||||
}
|
||||
|
||||
// activeWriters returns the number of writers on the file
|
||||
//
|
||||
// Note that we don't take the mutex here. If we do then we can get a
|
||||
// deadlock.
|
||||
func (f *File) activeWriters() int {
|
||||
return int(atomic.LoadInt32(&f.nwriters))
|
||||
}
|
||||
|
||||
// ModTime returns the modified time of the file
|
||||
//
|
||||
// if NoModTime is set then it returns the mod time of the directory
|
||||
func (f *File) ModTime() (modTime time.Time) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
if !f.d.vfs.Opt.NoModTime {
|
||||
// if o is nil it isn't valid yet or there are writers, so return the size so far
|
||||
if f.o == nil || len(f.writers) != 0 || f.readWriterClosing {
|
||||
if !f.pendingModTime.IsZero() {
|
||||
return f.pendingModTime
|
||||
}
|
||||
} else {
|
||||
return f.o.ModTime()
|
||||
}
|
||||
}
|
||||
|
||||
return f.d.modTime
|
||||
}
|
||||
|
||||
// nonNegative returns 0 if i is -ve, i otherwise
|
||||
func nonNegative(i int64) int64 {
|
||||
if i >= 0 {
|
||||
return i
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Size of the file
|
||||
func (f *File) Size() int64 {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
// if o is nil it isn't valid yet or there are writers, so return the size so far
|
||||
if f.writingInProgress() {
|
||||
return atomic.LoadInt64(&f.size)
|
||||
}
|
||||
return nonNegative(f.o.Size())
|
||||
}
|
||||
|
||||
// SetModTime sets the modtime for the file
|
||||
func (f *File) SetModTime(modTime time.Time) error {
|
||||
if f.d.vfs.Opt.ReadOnly {
|
||||
return EROFS
|
||||
}
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
f.pendingModTime = modTime
|
||||
|
||||
// Only update the ModTime when there are no writers, setObject will do it
|
||||
if !f.writingInProgress() {
|
||||
return f.applyPendingModTime()
|
||||
}
|
||||
|
||||
// queue up for later, hoping f.o becomes available
|
||||
return nil
|
||||
}
|
||||
|
||||
// call with the mutex held
|
||||
func (f *File) applyPendingModTime() error {
|
||||
defer func() { f.pendingModTime = time.Time{} }()
|
||||
|
||||
if f.pendingModTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if f.o == nil {
|
||||
return errors.New("Cannot apply ModTime, file object is not available")
|
||||
}
|
||||
|
||||
err := f.o.SetModTime(f.pendingModTime)
|
||||
switch err {
|
||||
case nil:
|
||||
fs.Debugf(f.o, "File.applyPendingModTime OK")
|
||||
case fs.ErrorCantSetModTime, fs.ErrorCantSetModTimeWithoutDelete:
|
||||
// do nothing, in order to not break "touch somefile" if it exists already
|
||||
default:
|
||||
fs.Errorf(f, "File.applyPendingModTime error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writingInProgress returns true of there are any open writers
|
||||
func (f *File) writingInProgress() bool {
|
||||
return f.o == nil || len(f.writers) != 0 || f.readWriterClosing
|
||||
}
|
||||
|
||||
// Update the size while writing
|
||||
func (f *File) setSize(n int64) {
|
||||
atomic.StoreInt64(&f.size, n)
|
||||
}
|
||||
|
||||
// Update the object when written and add it to the directory
|
||||
func (f *File) setObject(o fs.Object) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
f.o = o
|
||||
_ = f.applyPendingModTime()
|
||||
f.d.addObject(f)
|
||||
}
|
||||
|
||||
// Update the object but don't update the directory cache - for use by
|
||||
// the directory cache
|
||||
func (f *File) setObjectNoUpdate(o fs.Object) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
f.o = o
|
||||
}
|
||||
|
||||
// Get the current fs.Object - may be nil
|
||||
func (f *File) getObject() fs.Object {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
return f.o
|
||||
}
|
||||
|
||||
// exists returns whether the file exists already
|
||||
func (f *File) exists() bool {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
return f.o != nil
|
||||
}
|
||||
|
||||
// Wait for f.o to become non nil for a short time returning it or an
|
||||
// error. Use when opening a read handle.
|
||||
//
|
||||
// Call without the mutex held
|
||||
func (f *File) waitForValidObject() (o fs.Object, err error) {
|
||||
for i := 0; i < 50; i++ {
|
||||
f.mu.Lock()
|
||||
o = f.o
|
||||
nwriters := len(f.writers)
|
||||
wclosing := f.readWriterClosing
|
||||
f.mu.Unlock()
|
||||
if o != nil {
|
||||
return o, nil
|
||||
}
|
||||
if nwriters == 0 && !wclosing {
|
||||
return nil, errors.New("can't open file - writer failed")
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return nil, ENOENT
|
||||
}
|
||||
|
||||
// openRead open the file for read
|
||||
func (f *File) openRead() (fh *ReadFileHandle, err error) {
|
||||
// if o is nil it isn't valid yet
|
||||
_, err = f.waitForValidObject()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// fs.Debugf(o, "File.openRead")
|
||||
|
||||
fh, err = newReadFileHandle(f)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "File.openRead failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// openWrite open the file for write
|
||||
func (f *File) openWrite(flags int) (fh *WriteFileHandle, err error) {
|
||||
if f.d.vfs.Opt.ReadOnly {
|
||||
return nil, EROFS
|
||||
}
|
||||
// fs.Debugf(o, "File.openWrite")
|
||||
|
||||
fh, err = newWriteFileHandle(f.d, f, f.Path(), flags)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "File.openWrite failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// openRW open the file for read and write using a temporay file
|
||||
//
|
||||
// It uses the open flags passed in.
|
||||
func (f *File) openRW(flags int) (fh *RWFileHandle, err error) {
|
||||
// FIXME chunked
|
||||
if flags&accessModeMask != os.O_RDONLY && f.d.vfs.Opt.ReadOnly {
|
||||
return nil, EROFS
|
||||
}
|
||||
// fs.Debugf(o, "File.openRW")
|
||||
|
||||
fh, err = newRWFileHandle(f.d, f, f.Path(), flags)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "File.openRW failed: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// Sync the file
|
||||
//
|
||||
// Note that we don't do anything except return OK
|
||||
func (f *File) Sync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the file
|
||||
func (f *File) Remove() error {
|
||||
f.muRW.Lock()
|
||||
defer f.muRW.Unlock()
|
||||
if f.d.vfs.Opt.ReadOnly {
|
||||
return EROFS
|
||||
}
|
||||
if f.o != nil {
|
||||
err := f.o.Remove()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "File.Remove file error: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Remove the item from the directory listing
|
||||
f.d.delObject(f.Name())
|
||||
// Remove the object from the cache
|
||||
if f.d.vfs.Opt.CacheMode >= CacheModeMinimal {
|
||||
f.d.vfs.cache.remove(f.Path())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveAll the file - same as remove for files
|
||||
func (f *File) RemoveAll() error {
|
||||
return f.Remove()
|
||||
}
|
||||
|
||||
// DirEntry returns the underlying fs.DirEntry - may be nil
|
||||
func (f *File) DirEntry() (entry fs.DirEntry) {
|
||||
return f.o
|
||||
}
|
||||
|
||||
// Dir returns the directory this file is in
|
||||
func (f *File) Dir() *Dir {
|
||||
return f.d
|
||||
}
|
||||
|
||||
// VFS returns the instance of the VFS
|
||||
func (f *File) VFS() *VFS {
|
||||
return f.d.vfs
|
||||
}
|
||||
|
||||
// Open a file according to the flags provided
|
||||
//
|
||||
// O_RDONLY open the file read-only.
|
||||
// O_WRONLY open the file write-only.
|
||||
// O_RDWR open the file read-write.
|
||||
//
|
||||
// O_APPEND append data to the file when writing.
|
||||
// O_CREATE create a new file if none exists.
|
||||
// O_EXCL used with O_CREATE, file must not exist
|
||||
// O_SYNC open for synchronous I/O.
|
||||
// O_TRUNC if possible, truncate file when opene
|
||||
//
|
||||
// We ignore O_SYNC and O_EXCL
|
||||
func (f *File) Open(flags int) (fd Handle, err error) {
|
||||
defer log.Trace(f, "flags=%s", decodeOpenFlags(flags))("fd=%v, err=%v", &fd, &err)
|
||||
var (
|
||||
write bool // if set need write support
|
||||
read bool // if set need read support
|
||||
rdwrMode = flags & accessModeMask
|
||||
)
|
||||
|
||||
// http://pubs.opengroup.org/onlinepubs/7908799/xsh/open.html
|
||||
// The result of using O_TRUNC with O_RDONLY is undefined.
|
||||
// Linux seems to truncate the file, but we prefer to return EINVAL
|
||||
if rdwrMode == os.O_RDONLY && flags&os.O_TRUNC != 0 {
|
||||
return nil, EINVAL
|
||||
}
|
||||
|
||||
// Figure out the read/write intents
|
||||
switch {
|
||||
case rdwrMode == os.O_RDONLY:
|
||||
read = true
|
||||
case rdwrMode == os.O_WRONLY:
|
||||
write = true
|
||||
case rdwrMode == os.O_RDWR:
|
||||
read = true
|
||||
write = true
|
||||
default:
|
||||
fs.Errorf(f, "Can't figure out how to open with flags: 0x%X", flags)
|
||||
return nil, EPERM
|
||||
}
|
||||
|
||||
// If append is set then set read to force openRW
|
||||
if flags&os.O_APPEND != 0 {
|
||||
read = true
|
||||
}
|
||||
|
||||
// If truncate is set then set write to force openRW
|
||||
if flags&os.O_TRUNC != 0 {
|
||||
write = true
|
||||
}
|
||||
|
||||
// FIXME discover if file is in cache or not?
|
||||
|
||||
// Open the correct sort of handle
|
||||
CacheMode := f.d.vfs.Opt.CacheMode
|
||||
if CacheMode >= CacheModeMinimal && f.d.vfs.cache.opens(f.Path()) > 0 {
|
||||
fd, err = f.openRW(flags)
|
||||
} else if read && write {
|
||||
if CacheMode >= CacheModeMinimal {
|
||||
fd, err = f.openRW(flags)
|
||||
} else {
|
||||
// Open write only and hope the user doesn't
|
||||
// want to read. If they do they will get an
|
||||
// EPERM plus an Error log.
|
||||
fd, err = f.openWrite(flags)
|
||||
}
|
||||
} else if write {
|
||||
if CacheMode >= CacheModeWrites {
|
||||
fd, err = f.openRW(flags)
|
||||
} else {
|
||||
fd, err = f.openWrite(flags)
|
||||
}
|
||||
} else if read {
|
||||
if CacheMode >= CacheModeFull {
|
||||
fd, err = f.openRW(flags)
|
||||
} else {
|
||||
fd, err = f.openRead()
|
||||
}
|
||||
} else {
|
||||
fs.Errorf(f, "Can't figure out how to open with flags: 0x%X", flags)
|
||||
return nil, EPERM
|
||||
}
|
||||
return fd, err
|
||||
}
|
||||
|
||||
// Truncate changes the size of the named file.
|
||||
func (f *File) Truncate(size int64) (err error) {
|
||||
// make a copy of fh.writers with the lock held then unlock so
|
||||
// we can call other file methods.
|
||||
f.mu.Lock()
|
||||
writers := make([]Handle, len(f.writers))
|
||||
copy(writers, f.writers)
|
||||
f.mu.Unlock()
|
||||
|
||||
// FIXME: handle closing writer
|
||||
|
||||
// If have writers then call truncate for each writer
|
||||
if len(writers) != 0 {
|
||||
fs.Debugf(f.o, "Truncating %d file handles", len(writers))
|
||||
for _, h := range writers {
|
||||
truncateErr := h.Truncate(size)
|
||||
if truncateErr != nil {
|
||||
err = truncateErr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
fs.Debugf(f.o, "Truncating file")
|
||||
|
||||
// Otherwise if no writers then truncate the file by opening
|
||||
// the file and truncating it.
|
||||
flags := os.O_WRONLY
|
||||
if size == 0 {
|
||||
flags |= os.O_TRUNC
|
||||
}
|
||||
fh, err := f.Open(flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fs.CheckClose(fh, &err)
|
||||
if size != 0 {
|
||||
return fh.Truncate(size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
184
.rclone_repo/vfs/file_test.go
Executable file
184
.rclone_repo/vfs/file_test.go
Executable file
@@ -0,0 +1,184 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func fileCreate(t *testing.T, r *fstest.Run) (*VFS, *File, fstest.Item) {
|
||||
vfs := New(r.Fremote, nil)
|
||||
|
||||
file1 := r.WriteObject("dir/file1", "file1 contents", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
node, err := vfs.Stat("dir/file1")
|
||||
require.NoError(t, err)
|
||||
require.True(t, node.IsFile())
|
||||
|
||||
return vfs, node.(*File), file1
|
||||
}
|
||||
|
||||
func TestFileMethods(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, file, _ := fileCreate(t, r)
|
||||
|
||||
// String
|
||||
assert.Equal(t, "dir/file1", file.String())
|
||||
assert.Equal(t, "<nil *File>", (*File)(nil).String())
|
||||
|
||||
// IsDir
|
||||
assert.Equal(t, false, file.IsDir())
|
||||
|
||||
// IsFile
|
||||
assert.Equal(t, true, file.IsFile())
|
||||
|
||||
// Mode
|
||||
assert.Equal(t, vfs.Opt.FilePerms, file.Mode())
|
||||
|
||||
// Name
|
||||
assert.Equal(t, "file1", file.Name())
|
||||
|
||||
// Path
|
||||
assert.Equal(t, "dir/file1", file.Path())
|
||||
|
||||
// Sys
|
||||
assert.Equal(t, nil, file.Sys())
|
||||
|
||||
// Inode
|
||||
assert.NotEqual(t, uint64(0), file.Inode())
|
||||
|
||||
// Node
|
||||
assert.Equal(t, file, file.Node())
|
||||
|
||||
// ModTime
|
||||
assert.WithinDuration(t, t1, file.ModTime(), r.Fremote.Precision())
|
||||
|
||||
// Size
|
||||
assert.Equal(t, int64(14), file.Size())
|
||||
|
||||
// Sync
|
||||
assert.NoError(t, file.Sync())
|
||||
|
||||
// DirEntry
|
||||
assert.Equal(t, file.o, file.DirEntry())
|
||||
|
||||
// Dir
|
||||
assert.Equal(t, file.d, file.Dir())
|
||||
|
||||
// VFS
|
||||
assert.Equal(t, vfs, file.VFS())
|
||||
}
|
||||
|
||||
func TestFileSetModTime(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, file, file1 := fileCreate(t, r)
|
||||
|
||||
err := file.SetModTime(t2)
|
||||
require.NoError(t, err)
|
||||
|
||||
file1.ModTime = t2
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
vfs.Opt.ReadOnly = true
|
||||
err = file.SetModTime(t2)
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
|
||||
func TestFileOpenRead(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
_, file, _ := fileCreate(t, r)
|
||||
|
||||
fd, err := file.openRead()
|
||||
require.NoError(t, err)
|
||||
|
||||
contents, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file1 contents", string(contents))
|
||||
|
||||
require.NoError(t, fd.Close())
|
||||
}
|
||||
|
||||
func TestFileOpenWrite(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, file, _ := fileCreate(t, r)
|
||||
|
||||
fd, err := file.openWrite(os.O_WRONLY | os.O_TRUNC)
|
||||
require.NoError(t, err)
|
||||
|
||||
newContents := []byte("this is some new contents")
|
||||
n, err := fd.Write(newContents)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(newContents), n)
|
||||
require.NoError(t, fd.Close())
|
||||
|
||||
assert.Equal(t, int64(25), file.Size())
|
||||
|
||||
vfs.Opt.ReadOnly = true
|
||||
_, err = file.openWrite(os.O_WRONLY | os.O_TRUNC)
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
|
||||
func TestFileRemove(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, file, _ := fileCreate(t, r)
|
||||
|
||||
err := file.Remove()
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
vfs.Opt.ReadOnly = true
|
||||
err = file.Remove()
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
|
||||
func TestFileRemoveAll(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, file, _ := fileCreate(t, r)
|
||||
|
||||
err := file.RemoveAll()
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote)
|
||||
|
||||
vfs.Opt.ReadOnly = true
|
||||
err = file.RemoveAll()
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
|
||||
func TestFileOpen(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
_, file, _ := fileCreate(t, r)
|
||||
|
||||
fd, err := file.Open(os.O_RDONLY)
|
||||
require.NoError(t, err)
|
||||
_, ok := fd.(*ReadFileHandle)
|
||||
assert.True(t, ok)
|
||||
require.NoError(t, fd.Close())
|
||||
|
||||
fd, err = file.Open(os.O_WRONLY)
|
||||
assert.NoError(t, err)
|
||||
_, ok = fd.(*WriteFileHandle)
|
||||
assert.True(t, ok)
|
||||
require.NoError(t, fd.Close())
|
||||
|
||||
fd, err = file.Open(os.O_RDWR)
|
||||
assert.NoError(t, err)
|
||||
_, ok = fd.(*WriteFileHandle)
|
||||
assert.True(t, ok)
|
||||
|
||||
fd, err = file.Open(3)
|
||||
assert.Equal(t, EPERM, err)
|
||||
}
|
||||
135
.rclone_repo/vfs/help.go
Executable file
135
.rclone_repo/vfs/help.go
Executable file
@@ -0,0 +1,135 @@
|
||||
package vfs
|
||||
|
||||
// Help contains text describing file and directory caching to add to
|
||||
// the command help.
|
||||
var Help = `
|
||||
### Directory Cache
|
||||
|
||||
Using the ` + "`--dir-cache-time`" + ` flag, you can set how long a
|
||||
directory should be considered up to date and not refreshed from the
|
||||
backend. Changes made locally in the mount may appear immediately or
|
||||
invalidate the cache. However, changes done on the remote will only
|
||||
be picked up once the cache expires.
|
||||
|
||||
Alternatively, you can send a ` + "`SIGHUP`" + ` signal to rclone for
|
||||
it to flush all directory caches, regardless of how old they are.
|
||||
Assuming only one rclone instance is running, you can reset the cache
|
||||
like this:
|
||||
|
||||
kill -SIGHUP $(pidof rclone)
|
||||
|
||||
If you configure rclone with a [remote control](/rc) then you can use
|
||||
rclone rc to flush the whole directory cache:
|
||||
|
||||
rclone rc vfs/forget
|
||||
|
||||
Or individual files or directories:
|
||||
|
||||
rclone rc vfs/forget file=path/to/file dir=path/to/dir
|
||||
|
||||
### File Buffering
|
||||
|
||||
The ` + "`--buffer-size`" + ` flag determines the amount of memory,
|
||||
that will be used to buffer data in advance.
|
||||
|
||||
Each open file descriptor will try to keep the specified amount of
|
||||
data in memory at all times. The buffered data is bound to one file
|
||||
descriptor and won't be shared between multiple open file descriptors
|
||||
of the same file.
|
||||
|
||||
This flag is a upper limit for the used memory per file descriptor.
|
||||
The buffer will only use memory for data that is downloaded but not
|
||||
not yet read. If the buffer is empty, only a small amount of memory
|
||||
will be used.
|
||||
The maximum memory used by rclone for buffering can be up to
|
||||
` + "`--buffer-size * open files`" + `.
|
||||
|
||||
### File Caching
|
||||
|
||||
**NB** File caching is **EXPERIMENTAL** - use with care!
|
||||
|
||||
These flags control the VFS file caching options. The VFS layer is
|
||||
used by rclone mount to make a cloud storage system work more like a
|
||||
normal file system.
|
||||
|
||||
You'll need to enable VFS caching if you want, for example, to read
|
||||
and write simultaneously to a file. See below for more details.
|
||||
|
||||
Note that the VFS cache works in addition to the cache backend and you
|
||||
may find that you need one or the other or both.
|
||||
|
||||
--cache-dir string Directory rclone will use for caching.
|
||||
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
|
||||
--vfs-cache-mode string Cache mode off|minimal|writes|full (default "off")
|
||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
|
||||
|
||||
If run with ` + "`-vv`" + ` rclone will print the location of the file cache. The
|
||||
files are stored in the user cache file area which is OS dependent but
|
||||
can be controlled with ` + "`--cache-dir`" + ` or setting the appropriate
|
||||
environment variable.
|
||||
|
||||
The cache has 4 different modes selected by ` + "`--vfs-cache-mode`" + `.
|
||||
The higher the cache mode the more compatible rclone becomes at the
|
||||
cost of using disk space.
|
||||
|
||||
Note that files are written back to the remote only when they are
|
||||
closed so if rclone is quit or dies with open files then these won't
|
||||
get written back to the remote. However they will still be in the on
|
||||
disk cache.
|
||||
|
||||
#### --vfs-cache-mode off
|
||||
|
||||
In this mode the cache will read directly from the remote and write
|
||||
directly to the remote without caching anything on disk.
|
||||
|
||||
This will mean some operations are not possible
|
||||
|
||||
* Files can't be opened for both read AND write
|
||||
* Files opened for write can't be seeked
|
||||
* Existing files opened for write must have O_TRUNC set
|
||||
* Files open for read with O_TRUNC will be opened write only
|
||||
* Files open for write only will behave as if O_TRUNC was supplied
|
||||
* Open modes O_APPEND, O_TRUNC are ignored
|
||||
* If an upload fails it can't be retried
|
||||
|
||||
#### --vfs-cache-mode minimal
|
||||
|
||||
This is very similar to "off" except that files opened for read AND
|
||||
write will be buffered to disks. This means that files opened for
|
||||
write will be a lot more compatible, but uses the minimal disk space.
|
||||
|
||||
These operations are not possible
|
||||
|
||||
* Files opened for write only can't be seeked
|
||||
* Existing files opened for write must have O_TRUNC set
|
||||
* Files opened for write only will ignore O_APPEND, O_TRUNC
|
||||
* If an upload fails it can't be retried
|
||||
|
||||
#### --vfs-cache-mode writes
|
||||
|
||||
In this mode files opened for read only are still read directly from
|
||||
the remote, write only and read/write files are buffered to disk
|
||||
first.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
|
||||
If an upload fails it will be retried up to --low-level-retries times.
|
||||
|
||||
#### --vfs-cache-mode full
|
||||
|
||||
In this mode all reads and writes are buffered to and from disk. When
|
||||
a file is opened for read it will be downloaded in its entirety first.
|
||||
|
||||
This may be appropriate for your needs, or you may prefer to look at
|
||||
the cache backend which does a much more sophisticated job of caching,
|
||||
including caching directory hierarchies and chunks of files.
|
||||
|
||||
In this mode, unlike the others, when a file is written to the disk,
|
||||
it will be kept on the disk after it is written to the remote. It
|
||||
will be purged on a schedule according to ` + "`--vfs-cache-max-age`" + `.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
|
||||
If an upload or download fails it will be retried up to
|
||||
--low-level-retries times.
|
||||
`
|
||||
228
.rclone_repo/vfs/make_open_tests.go
Executable file
228
.rclone_repo/vfs/make_open_tests.go
Executable file
@@ -0,0 +1,228 @@
|
||||
// This makes the open test suite
|
||||
//
|
||||
// Run with go run make_open_tests.go | gofmt > open_test.go
|
||||
//
|
||||
//+build none
|
||||
|
||||
// FIXME include read too?
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Interprets err into a vfs error
|
||||
func whichError(err error) string {
|
||||
switch err {
|
||||
case nil:
|
||||
return "nil"
|
||||
case io.EOF:
|
||||
return "io.EOF"
|
||||
}
|
||||
s := err.Error()
|
||||
switch {
|
||||
case strings.Contains(s, "no such file or directory"):
|
||||
return "ENOENT"
|
||||
case strings.Contains(s, "bad file descriptor"):
|
||||
return "EBADF"
|
||||
case strings.Contains(s, "file exists"):
|
||||
return "EEXIST"
|
||||
}
|
||||
log.Fatalf("Unknown error: %v", err)
|
||||
return ""
|
||||
}
|
||||
|
||||
// test Opening, reading and writing the file handle with the flags given
|
||||
func test(fileName string, flags int, mode string) {
|
||||
// first try with file not existing
|
||||
_, err := os.Stat(fileName)
|
||||
if !os.IsNotExist(err) {
|
||||
log.Fatalf("File must not exist")
|
||||
}
|
||||
f, openNonExistentErr := os.OpenFile(fileName, flags, 0666)
|
||||
|
||||
var readNonExistentErr error
|
||||
var writeNonExistentErr error
|
||||
if openNonExistentErr == nil {
|
||||
// read some bytes
|
||||
buf := []byte{0, 0}
|
||||
_, readNonExistentErr = f.Read(buf)
|
||||
|
||||
// write some bytes
|
||||
_, writeNonExistentErr = f.Write([]byte("hello"))
|
||||
|
||||
// close
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// write the file
|
||||
f, err = os.Create(fileName)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create: %v", err)
|
||||
}
|
||||
n, err := f.Write([]byte("hello"))
|
||||
if n != 5 || err != nil {
|
||||
log.Fatalf("failed to write n=%d: %v", n, err)
|
||||
}
|
||||
// close
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to close: %v", err)
|
||||
}
|
||||
|
||||
// then open file and try with file existing
|
||||
|
||||
f, openExistingErr := os.OpenFile(fileName, flags, 0666)
|
||||
var readExistingErr error
|
||||
var writeExistingErr error
|
||||
if openExistingErr == nil {
|
||||
// read some bytes
|
||||
buf := []byte{0, 0}
|
||||
_, readExistingErr = f.Read(buf)
|
||||
|
||||
// write some bytes
|
||||
_, writeExistingErr = f.Write([]byte("HEL"))
|
||||
|
||||
// close
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// read the file
|
||||
f, err = os.Open(fileName)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to open: %v", err)
|
||||
}
|
||||
var buf = make([]byte, 64)
|
||||
n, err = f.Read(buf)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatalf("failed to read n=%d: %v", n, err)
|
||||
}
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to close: %v", err)
|
||||
}
|
||||
contents := string(buf[:n])
|
||||
|
||||
// remove file
|
||||
err = os.Remove(fileName)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to remove: %v", err)
|
||||
}
|
||||
|
||||
// output the struct
|
||||
fmt.Printf(`{
|
||||
flags: %s,
|
||||
what: %q,
|
||||
openNonExistentErr: %s,
|
||||
readNonExistentErr: %s,
|
||||
writeNonExistentErr: %s,
|
||||
openExistingErr: %s,
|
||||
readExistingErr: %s,
|
||||
writeExistingErr: %s,
|
||||
contents: %q,
|
||||
},`,
|
||||
mode,
|
||||
mode,
|
||||
whichError(openNonExistentErr),
|
||||
whichError(readNonExistentErr),
|
||||
whichError(writeNonExistentErr),
|
||||
whichError(openExistingErr),
|
||||
whichError(readExistingErr),
|
||||
whichError(writeExistingErr),
|
||||
contents)
|
||||
}
|
||||
|
||||
func main() {
|
||||
fmt.Printf(`// data generated by go run make_open_tests.go | gofmt > open_test.go
|
||||
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"io"
|
||||
)
|
||||
|
||||
// openTest describes a test of OpenFile
|
||||
type openTest struct{
|
||||
flags int
|
||||
what string
|
||||
openNonExistentErr error
|
||||
readNonExistentErr error
|
||||
writeNonExistentErr error
|
||||
openExistingErr error
|
||||
readExistingErr error
|
||||
writeExistingErr error
|
||||
contents string
|
||||
}
|
||||
|
||||
// openTests is a suite of tests for OpenFile with all possible
|
||||
// combination of flags. This obeys Unix semantics even on Windows.
|
||||
var openTests = []openTest{
|
||||
`)
|
||||
f, err := ioutil.TempFile("", "open-test")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fileName := f.Name()
|
||||
_ = f.Close()
|
||||
err = os.Remove(fileName)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to remove: %v", err)
|
||||
}
|
||||
for _, rwMode := range []int{os.O_RDONLY, os.O_WRONLY, os.O_RDWR} {
|
||||
flags0 := rwMode
|
||||
parts0 := []string{"os.O_RDONLY", "os.O_WRONLY", "os.O_RDWR"}[rwMode : rwMode+1]
|
||||
for _, appendMode := range []int{0, os.O_APPEND} {
|
||||
flags1 := flags0 | appendMode
|
||||
parts1 := parts0
|
||||
if appendMode != 0 {
|
||||
parts1 = append(parts1, "os.O_APPEND")
|
||||
}
|
||||
for _, createMode := range []int{0, os.O_CREATE} {
|
||||
flags2 := flags1 | createMode
|
||||
parts2 := parts1
|
||||
if createMode != 0 {
|
||||
parts2 = append(parts2, "os.O_CREATE")
|
||||
}
|
||||
for _, exclMode := range []int{0, os.O_EXCL} {
|
||||
flags3 := flags2 | exclMode
|
||||
parts3 := parts2
|
||||
if exclMode != 0 {
|
||||
parts3 = append(parts2, "os.O_EXCL")
|
||||
}
|
||||
for _, syncMode := range []int{0, os.O_SYNC} {
|
||||
flags4 := flags3 | syncMode
|
||||
parts4 := parts3
|
||||
if syncMode != 0 {
|
||||
parts4 = append(parts4, "os.O_SYNC")
|
||||
}
|
||||
for _, truncMode := range []int{0, os.O_TRUNC} {
|
||||
flags5 := flags4 | truncMode
|
||||
parts5 := parts4
|
||||
if truncMode != 0 {
|
||||
parts5 = append(parts5, "os.O_TRUNC")
|
||||
}
|
||||
textMode := strings.Join(parts5, "|")
|
||||
flags := flags5
|
||||
|
||||
test(fileName, flags, textMode)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n}\n")
|
||||
}
|
||||
987
.rclone_repo/vfs/open_test.go
Executable file
987
.rclone_repo/vfs/open_test.go
Executable file
@@ -0,0 +1,987 @@
|
||||
// data generated by go run make_open_tests.go | gofmt > open_test.go
|
||||
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// openTest describes a test of OpenFile
|
||||
type openTest struct {
|
||||
flags int
|
||||
what string
|
||||
openNonExistentErr error
|
||||
readNonExistentErr error
|
||||
writeNonExistentErr error
|
||||
openExistingErr error
|
||||
readExistingErr error
|
||||
writeExistingErr error
|
||||
contents string
|
||||
}
|
||||
|
||||
// openTests is a suite of tests for OpenFile with all possible
|
||||
// combination of flags. This obeys Unix semantics even on Windows.
|
||||
var openTests = []openTest{
|
||||
{
|
||||
flags: os.O_RDONLY,
|
||||
what: "os.O_RDONLY",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: EBADF,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_SYNC,
|
||||
what: "os.O_RDONLY|os.O_SYNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: EBADF,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_EXCL,
|
||||
what: "os.O_RDONLY|os.O_EXCL",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: EBADF,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_EXCL | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_EXCL|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_EXCL | os.O_SYNC,
|
||||
what: "os.O_RDONLY|os.O_EXCL|os.O_SYNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: EBADF,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_EXCL | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_EXCL|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_CREATE,
|
||||
what: "os.O_RDONLY|os.O_CREATE",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: EBADF,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: EBADF,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_CREATE | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_CREATE|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_CREATE | os.O_SYNC,
|
||||
what: "os.O_RDONLY|os.O_CREATE|os.O_SYNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: EBADF,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: EBADF,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_CREATE | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_CREATE|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_CREATE | os.O_EXCL,
|
||||
what: "os.O_RDONLY|os.O_CREATE|os.O_EXCL",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: EBADF,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_CREATE | os.O_EXCL | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_CREATE|os.O_EXCL|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_CREATE | os.O_EXCL | os.O_SYNC,
|
||||
what: "os.O_RDONLY|os.O_CREATE|os.O_EXCL|os.O_SYNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: EBADF,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_CREATE | os.O_EXCL | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_CREATE|os.O_EXCL|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND,
|
||||
what: "os.O_RDONLY|os.O_APPEND",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: EBADF,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_SYNC,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_SYNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: EBADF,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_EXCL,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_EXCL",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: EBADF,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_EXCL | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_EXCL|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_EXCL | os.O_SYNC,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_EXCL|os.O_SYNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: EBADF,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_EXCL | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_EXCL|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_CREATE,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_CREATE",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: EBADF,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: EBADF,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_CREATE | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_CREATE|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_CREATE | os.O_SYNC,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_CREATE|os.O_SYNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: EBADF,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: EBADF,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_CREATE | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_CREATE|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_CREATE | os.O_EXCL,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_CREATE|os.O_EXCL",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: EBADF,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_CREATE | os.O_EXCL | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_CREATE|os.O_EXCL|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_CREATE | os.O_EXCL | os.O_SYNC,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_CREATE|os.O_EXCL|os.O_SYNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: EBADF,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDONLY | os.O_APPEND | os.O_CREATE | os.O_EXCL | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDONLY|os.O_APPEND|os.O_CREATE|os.O_EXCL|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: EINVAL,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EINVAL,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_WRONLY,
|
||||
what: "os.O_WRONLY",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HELlo",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_SYNC,
|
||||
what: "os.O_WRONLY|os.O_SYNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HELlo",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_EXCL,
|
||||
what: "os.O_WRONLY|os.O_EXCL",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HELlo",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_EXCL | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_EXCL|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_EXCL | os.O_SYNC,
|
||||
what: "os.O_WRONLY|os.O_EXCL|os.O_SYNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HELlo",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_EXCL | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_EXCL|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_CREATE,
|
||||
what: "os.O_WRONLY|os.O_CREATE",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HELlo",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_CREATE | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_CREATE|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_CREATE | os.O_SYNC,
|
||||
what: "os.O_WRONLY|os.O_CREATE|os.O_SYNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HELlo",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_CREATE | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_CREATE|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_CREATE | os.O_EXCL,
|
||||
what: "os.O_WRONLY|os.O_CREATE|os.O_EXCL",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_CREATE | os.O_EXCL | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_CREATE|os.O_EXCL|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_CREATE | os.O_EXCL | os.O_SYNC,
|
||||
what: "os.O_WRONLY|os.O_CREATE|os.O_EXCL|os.O_SYNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_CREATE | os.O_EXCL | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_CREATE|os.O_EXCL|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND,
|
||||
what: "os.O_WRONLY|os.O_APPEND",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "helloHEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_SYNC,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_SYNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "helloHEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_EXCL,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_EXCL",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "helloHEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_EXCL | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_EXCL|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_EXCL | os.O_SYNC,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_EXCL|os.O_SYNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "helloHEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_EXCL | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_EXCL|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_CREATE,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_CREATE",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "helloHEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_CREATE | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_CREATE | os.O_SYNC,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_SYNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "helloHEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_CREATE | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: EBADF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_CREATE | os.O_EXCL,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_EXCL",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_CREATE | os.O_EXCL | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_EXCL|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_CREATE | os.O_EXCL | os.O_SYNC,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_EXCL|os.O_SYNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_WRONLY | os.O_APPEND | os.O_CREATE | os.O_EXCL | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_EXCL|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: EBADF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDWR,
|
||||
what: "os.O_RDWR",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "heHEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: io.EOF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_SYNC,
|
||||
what: "os.O_RDWR|os.O_SYNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "heHEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: io.EOF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_EXCL,
|
||||
what: "os.O_RDWR|os.O_EXCL",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "heHEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_EXCL | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_EXCL|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: io.EOF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_EXCL | os.O_SYNC,
|
||||
what: "os.O_RDWR|os.O_EXCL|os.O_SYNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "heHEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_EXCL | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_EXCL|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: io.EOF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_CREATE,
|
||||
what: "os.O_RDWR|os.O_CREATE",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "heHEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_CREATE | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_CREATE|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: io.EOF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_CREATE | os.O_SYNC,
|
||||
what: "os.O_RDWR|os.O_CREATE|os.O_SYNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "heHEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_CREATE | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_CREATE|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: io.EOF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_CREATE | os.O_EXCL,
|
||||
what: "os.O_RDWR|os.O_CREATE|os.O_EXCL",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_CREATE | os.O_EXCL | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_CREATE | os.O_EXCL | os.O_SYNC,
|
||||
what: "os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_CREATE | os.O_EXCL | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND,
|
||||
what: "os.O_RDWR|os.O_APPEND",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "helloHEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: io.EOF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_SYNC,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_SYNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "helloHEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: io.EOF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_EXCL,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_EXCL",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "helloHEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_EXCL | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_EXCL|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: io.EOF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_EXCL | os.O_SYNC,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_EXCL|os.O_SYNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "helloHEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_EXCL | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_EXCL|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: ENOENT,
|
||||
readNonExistentErr: nil,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: io.EOF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_CREATE,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_CREATE",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "helloHEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_CREATE | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: io.EOF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_CREATE | os.O_SYNC,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_SYNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "helloHEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_CREATE | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: nil,
|
||||
readExistingErr: io.EOF,
|
||||
writeExistingErr: nil,
|
||||
contents: "HEL",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_CREATE | os.O_EXCL,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_EXCL",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_CREATE | os.O_EXCL | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_EXCL|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_CREATE | os.O_EXCL | os.O_SYNC,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_EXCL|os.O_SYNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
}, {
|
||||
flags: os.O_RDWR | os.O_APPEND | os.O_CREATE | os.O_EXCL | os.O_SYNC | os.O_TRUNC,
|
||||
what: "os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_EXCL|os.O_SYNC|os.O_TRUNC",
|
||||
openNonExistentErr: nil,
|
||||
readNonExistentErr: io.EOF,
|
||||
writeNonExistentErr: nil,
|
||||
openExistingErr: EEXIST,
|
||||
readExistingErr: nil,
|
||||
writeExistingErr: nil,
|
||||
contents: "hello",
|
||||
},
|
||||
}
|
||||
171
.rclone_repo/vfs/rc.go
Executable file
171
.rclone_repo/vfs/rc.go
Executable file
@@ -0,0 +1,171 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Add remote control for the VFS
|
||||
func (vfs *VFS) addRC() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "vfs/forget",
|
||||
Fn: func(in rc.Params) (out rc.Params, err error) {
|
||||
root, err := vfs.Root()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
forgotten := []string{}
|
||||
if len(in) == 0 {
|
||||
root.ForgetAll()
|
||||
} else {
|
||||
for k, v := range in {
|
||||
path, ok := v.(string)
|
||||
if !ok {
|
||||
return out, errors.Errorf("value must be string %q=%v", k, v)
|
||||
}
|
||||
path = strings.Trim(path, "/")
|
||||
if strings.HasPrefix(k, "file") {
|
||||
root.ForgetPath(path, fs.EntryObject)
|
||||
} else if strings.HasPrefix(k, "dir") {
|
||||
root.ForgetPath(path, fs.EntryDirectory)
|
||||
} else {
|
||||
return out, errors.Errorf("unknown key %q", k)
|
||||
}
|
||||
forgotten = append(forgotten, path)
|
||||
}
|
||||
}
|
||||
out = rc.Params{
|
||||
"forgotten": forgotten,
|
||||
}
|
||||
return out, nil
|
||||
},
|
||||
Title: "Forget files or directories in the directory cache.",
|
||||
Help: `
|
||||
This forgets the paths in the directory cache causing them to be
|
||||
re-read from the remote when needed.
|
||||
|
||||
If no paths are passed in then it will forget all the paths in the
|
||||
directory cache.
|
||||
|
||||
rclone rc vfs/forget
|
||||
|
||||
Otherwise pass files or dirs in as file=path or dir=path. Any
|
||||
parameter key starting with file will forget that file and any
|
||||
starting with dir will forget that dir, eg
|
||||
|
||||
rclone rc vfs/forget file=hello file2=goodbye dir=home/junk
|
||||
|
||||
`,
|
||||
})
|
||||
rc.Add(rc.Call{
|
||||
Path: "vfs/refresh",
|
||||
Fn: func(in rc.Params) (out rc.Params, err error) {
|
||||
root, err := vfs.Root()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
getDir := func(path string) (*Dir, error) {
|
||||
path = strings.Trim(path, "/")
|
||||
segments := strings.Split(path, "/")
|
||||
var node Node = root
|
||||
for _, s := range segments {
|
||||
if dir, ok := node.(*Dir); ok {
|
||||
node, err = dir.stat(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if dir, ok := node.(*Dir); ok {
|
||||
return dir, nil
|
||||
}
|
||||
return nil, EINVAL
|
||||
}
|
||||
|
||||
recursive := false
|
||||
{
|
||||
const k = "recursive"
|
||||
|
||||
if v, ok := in[k]; ok {
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
return out, errors.Errorf("value must be string %q=%v", k, v)
|
||||
}
|
||||
recursive, err = strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return out, errors.Errorf("invalid value %q=%v", k, v)
|
||||
}
|
||||
delete(in, k)
|
||||
}
|
||||
}
|
||||
|
||||
result := map[string]string{}
|
||||
if len(in) == 0 {
|
||||
if recursive {
|
||||
err = root.readDirTree()
|
||||
} else {
|
||||
err = root.readDir()
|
||||
}
|
||||
if err != nil {
|
||||
result[""] = err.Error()
|
||||
} else {
|
||||
result[""] = "OK"
|
||||
}
|
||||
} else {
|
||||
for k, v := range in {
|
||||
path, ok := v.(string)
|
||||
if !ok {
|
||||
return out, errors.Errorf("value must be string %q=%v", k, v)
|
||||
}
|
||||
if strings.HasPrefix(k, "dir") {
|
||||
dir, err := getDir(path)
|
||||
if err != nil {
|
||||
result[path] = err.Error()
|
||||
} else {
|
||||
if recursive {
|
||||
err = dir.readDirTree()
|
||||
} else {
|
||||
err = dir.readDir()
|
||||
}
|
||||
if err != nil {
|
||||
result[path] = err.Error()
|
||||
} else {
|
||||
result[path] = "OK"
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
return out, errors.Errorf("unknown key %q", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
out = rc.Params{
|
||||
"result": result,
|
||||
}
|
||||
return out, nil
|
||||
},
|
||||
Title: "Refresh the directory cache.",
|
||||
Help: `
|
||||
This reads the directories for the specified paths and freshens the
|
||||
directory cache.
|
||||
|
||||
If no paths are passed in then it will refresh the root directory.
|
||||
|
||||
rclone rc vfs/refresh
|
||||
|
||||
Otherwise pass directories in as dir=path. Any parameter key
|
||||
starting with dir will refresh that directory, eg
|
||||
|
||||
rclone rc vfs/refresh dir=home/junk dir2=data/misc
|
||||
|
||||
If the parameter recursive=true is given the whole directory tree
|
||||
will get refreshed. This refresh will use --fast-list if enabled.
|
||||
|
||||
`,
|
||||
})
|
||||
}
|
||||
427
.rclone_repo/vfs/read.go
Executable file
427
.rclone_repo/vfs/read.go
Executable file
@@ -0,0 +1,427 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/chunkedreader"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ReadFileHandle is an open for read file handle on a File
|
||||
type ReadFileHandle struct {
|
||||
baseHandle
|
||||
mu sync.Mutex
|
||||
closed bool // set if handle has been closed
|
||||
r *accounting.Account
|
||||
readCalled bool // set if read has been called
|
||||
size int64 // size of the object
|
||||
offset int64 // offset of read of o
|
||||
roffset int64 // offset of Read() calls
|
||||
noSeek bool
|
||||
file *File
|
||||
hash *hash.MultiHasher
|
||||
opened bool
|
||||
remote string
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var (
|
||||
_ io.Reader = (*ReadFileHandle)(nil)
|
||||
_ io.ReaderAt = (*ReadFileHandle)(nil)
|
||||
_ io.Seeker = (*ReadFileHandle)(nil)
|
||||
_ io.Closer = (*ReadFileHandle)(nil)
|
||||
)
|
||||
|
||||
func newReadFileHandle(f *File) (*ReadFileHandle, error) {
|
||||
var mhash *hash.MultiHasher
|
||||
var err error
|
||||
o := f.getObject()
|
||||
if !f.d.vfs.Opt.NoChecksum {
|
||||
mhash, err = hash.NewMultiHasherTypes(o.Fs().Hashes())
|
||||
if err != nil {
|
||||
fs.Errorf(o.Fs(), "newReadFileHandle hash error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
fh := &ReadFileHandle{
|
||||
remote: o.Remote(),
|
||||
noSeek: f.d.vfs.Opt.NoSeek,
|
||||
file: f,
|
||||
hash: mhash,
|
||||
size: nonNegative(o.Size()),
|
||||
}
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// openPending opens the file if there is a pending open
|
||||
// call with the lock held
|
||||
func (fh *ReadFileHandle) openPending() (err error) {
|
||||
if fh.opened {
|
||||
return nil
|
||||
}
|
||||
o := fh.file.getObject()
|
||||
r, err := chunkedreader.New(o, int64(fh.file.d.vfs.Opt.ChunkSize), int64(fh.file.d.vfs.Opt.ChunkSizeLimit)).Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fh.r = accounting.NewAccount(r, o).WithBuffer() // account the transfer
|
||||
fh.opened = true
|
||||
accounting.Stats.Transferring(o.Remote())
|
||||
return nil
|
||||
}
|
||||
|
||||
// String converts it to printable
|
||||
func (fh *ReadFileHandle) String() string {
|
||||
if fh == nil {
|
||||
return "<nil *ReadFileHandle>"
|
||||
}
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.file == nil {
|
||||
return "<nil *ReadFileHandle.file>"
|
||||
}
|
||||
return fh.file.String() + " (r)"
|
||||
}
|
||||
|
||||
// Node returns the Node assocuated with this - satisfies Noder interface
|
||||
func (fh *ReadFileHandle) Node() Node {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
return fh.file
|
||||
}
|
||||
|
||||
// seek to a new offset
|
||||
//
|
||||
// if reopen is true, then we won't attempt to use an io.Seeker interface
|
||||
//
|
||||
// Must be called with fh.mu held
|
||||
func (fh *ReadFileHandle) seek(offset int64, reopen bool) (err error) {
|
||||
if fh.noSeek {
|
||||
return ESPIPE
|
||||
}
|
||||
fh.hash = nil
|
||||
if !reopen {
|
||||
ar := fh.r.GetAsyncReader()
|
||||
// try to fullfill the seek with buffer discard
|
||||
if ar != nil && ar.SkipBytes(int(offset-fh.offset)) {
|
||||
fh.offset = offset
|
||||
return nil
|
||||
}
|
||||
}
|
||||
fh.r.StopBuffering() // stop the background reading first
|
||||
oldReader := fh.r.GetReader()
|
||||
r, ok := oldReader.(*chunkedreader.ChunkedReader)
|
||||
if !ok {
|
||||
fs.Logf(fh.remote, "ReadFileHandle.Read expected reader to be a ChunkedReader, got %T", oldReader)
|
||||
reopen = true
|
||||
}
|
||||
if !reopen {
|
||||
fs.Debugf(fh.remote, "ReadFileHandle.seek from %d to %d (fs.RangeSeeker)", fh.offset, offset)
|
||||
_, err = r.RangeSeek(offset, io.SeekStart, -1)
|
||||
if err != nil {
|
||||
fs.Debugf(fh.remote, "ReadFileHandle.Read fs.RangeSeeker failed: %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(fh.remote, "ReadFileHandle.seek from %d to %d", fh.offset, offset)
|
||||
// close old one
|
||||
err = oldReader.Close()
|
||||
if err != nil {
|
||||
fs.Debugf(fh.remote, "ReadFileHandle.Read seek close old failed: %v", err)
|
||||
}
|
||||
// re-open with a seek
|
||||
o := fh.file.getObject()
|
||||
r = chunkedreader.New(o, int64(fh.file.d.vfs.Opt.ChunkSize), int64(fh.file.d.vfs.Opt.ChunkSizeLimit))
|
||||
_, err := r.Seek(offset, 0)
|
||||
if err != nil {
|
||||
fs.Debugf(fh.remote, "ReadFileHandle.Read seek failed: %v", err)
|
||||
return err
|
||||
}
|
||||
r, err = r.Open()
|
||||
if err != nil {
|
||||
fs.Debugf(fh.remote, "ReadFileHandle.Read seek failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
fh.r.UpdateReader(r)
|
||||
fh.offset = offset
|
||||
return nil
|
||||
}
|
||||
|
||||
// Seek the file - returns ESPIPE if seeking isn't possible
|
||||
func (fh *ReadFileHandle) Seek(offset int64, whence int) (n int64, err error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.noSeek {
|
||||
return 0, ESPIPE
|
||||
}
|
||||
size := fh.size
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
fh.roffset = 0
|
||||
case io.SeekEnd:
|
||||
fh.roffset = size
|
||||
}
|
||||
fh.roffset += offset
|
||||
// we don't check the offset - the next Read will
|
||||
return fh.roffset, nil
|
||||
}
|
||||
|
||||
// ReadAt reads len(p) bytes into p starting at offset off in the
|
||||
// underlying input source. It returns the number of bytes read (0 <=
|
||||
// n <= len(p)) and any error encountered.
|
||||
//
|
||||
// When ReadAt returns n < len(p), it returns a non-nil error
|
||||
// explaining why more bytes were not returned. In this respect,
|
||||
// ReadAt is stricter than Read.
|
||||
//
|
||||
// Even if ReadAt returns n < len(p), it may use all of p as scratch
|
||||
// space during the call. If some data is available but not len(p)
|
||||
// bytes, ReadAt blocks until either all the data is available or an
|
||||
// error occurs. In this respect ReadAt is different from Read.
|
||||
//
|
||||
// If the n = len(p) bytes returned by ReadAt are at the end of the
|
||||
// input source, ReadAt may return either err == EOF or err == nil.
|
||||
//
|
||||
// If ReadAt is reading from an input source with a seek offset,
|
||||
// ReadAt should not affect nor be affected by the underlying seek
|
||||
// offset.
|
||||
//
|
||||
// Clients of ReadAt can execute parallel ReadAt calls on the same
|
||||
// input source.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (fh *ReadFileHandle) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
return fh.readAt(p, off)
|
||||
}
|
||||
|
||||
// Implementation of ReadAt - call with lock held
|
||||
func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) {
|
||||
err = fh.openPending() // FIXME pending open could be more efficient in the presense of seek (and retries)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// fs.Debugf(fh.remote, "ReadFileHandle.Read size %d offset %d", reqSize, off)
|
||||
if fh.closed {
|
||||
fs.Errorf(fh.remote, "ReadFileHandle.Read error: %v", EBADF)
|
||||
return 0, ECLOSED
|
||||
}
|
||||
doSeek := off != fh.offset
|
||||
if doSeek && fh.noSeek {
|
||||
return 0, ESPIPE
|
||||
}
|
||||
var newOffset int64
|
||||
retries := 0
|
||||
reqSize := len(p)
|
||||
doReopen := false
|
||||
for {
|
||||
if doSeek {
|
||||
// Are we attempting to seek beyond the end of the
|
||||
// file - if so just return EOF leaving the underlying
|
||||
// file in an unchanged state.
|
||||
if off >= fh.size {
|
||||
fs.Debugf(fh.remote, "ReadFileHandle.Read attempt to read beyond end of file: %d > %d", off, fh.size)
|
||||
return 0, io.EOF
|
||||
}
|
||||
// Otherwise do the seek
|
||||
err = fh.seek(off, doReopen)
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
if err == nil {
|
||||
if reqSize > 0 {
|
||||
fh.readCalled = true
|
||||
}
|
||||
n, err = io.ReadFull(fh.r, p)
|
||||
newOffset = fh.offset + int64(n)
|
||||
// if err == nil && rand.Intn(10) == 0 {
|
||||
// err = errors.New("random error")
|
||||
// }
|
||||
if err == nil {
|
||||
break
|
||||
} else if (err == io.ErrUnexpectedEOF || err == io.EOF) && newOffset == fh.size {
|
||||
// Have read to end of file - reset error
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
if retries >= fs.Config.LowLevelRetries {
|
||||
break
|
||||
}
|
||||
retries++
|
||||
fs.Errorf(fh.remote, "ReadFileHandle.Read error: low level retry %d/%d: %v", retries, fs.Config.LowLevelRetries, err)
|
||||
doSeek = true
|
||||
doReopen = true
|
||||
}
|
||||
if err != nil {
|
||||
fs.Errorf(fh.remote, "ReadFileHandle.Read error: %v", err)
|
||||
} else {
|
||||
fh.offset = newOffset
|
||||
// fs.Debugf(fh.remote, "ReadFileHandle.Read OK")
|
||||
|
||||
if fh.hash != nil {
|
||||
_, err = fh.hash.Write(p[:n])
|
||||
if err != nil {
|
||||
fs.Errorf(fh.remote, "ReadFileHandle.Read HashError: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// If we have no error and we didn't fill the buffer, must be EOF
|
||||
if n != len(p) {
|
||||
err = io.EOF
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (fh *ReadFileHandle) checkHash() error {
|
||||
if fh.hash == nil || !fh.readCalled || fh.offset < fh.size {
|
||||
return nil
|
||||
}
|
||||
|
||||
o := fh.file.getObject()
|
||||
for hashType, dstSum := range fh.hash.Sums() {
|
||||
srcSum, err := o.Hash(hashType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !hash.Equals(dstSum, srcSum) {
|
||||
return errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, dstSum, srcSum)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p. It returns the number of bytes read (0
|
||||
// <= n <= len(p)) and any error encountered. Even if Read returns n < len(p),
|
||||
// it may use all of p as scratch space during the call. If some data is
|
||||
// available but not len(p) bytes, Read conventionally returns what is
|
||||
// available instead of waiting for more.
|
||||
//
|
||||
// When Read encounters an error or end-of-file condition after successfully
|
||||
// reading n > 0 bytes, it returns the number of bytes read. It may return the
|
||||
// (non-nil) error from the same call or return the error (and n == 0) from a
|
||||
// subsequent call. An instance of this general case is that a Reader returning
|
||||
// a non-zero number of bytes at the end of the input stream may return either
|
||||
// err == EOF or err == nil. The next Read should return 0, EOF.
|
||||
//
|
||||
// Callers should always process the n > 0 bytes returned before considering
|
||||
// the error err. Doing so correctly handles I/O errors that happen after
|
||||
// reading some bytes and also both of the allowed EOF behaviors.
|
||||
//
|
||||
// Implementations of Read are discouraged from returning a zero byte count
|
||||
// with a nil error, except when len(p) == 0. Callers should treat a return of
|
||||
// 0 and nil as indicating that nothing happened; in particular it does not
|
||||
// indicate EOF.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (fh *ReadFileHandle) Read(p []byte) (n int, err error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.roffset >= fh.size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err = fh.readAt(p, fh.roffset)
|
||||
fh.roffset += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// close the file handle returning EBADF if it has been
|
||||
// closed already.
|
||||
//
|
||||
// Must be called with fh.mu held
|
||||
func (fh *ReadFileHandle) close() error {
|
||||
if fh.closed {
|
||||
return ECLOSED
|
||||
}
|
||||
fh.closed = true
|
||||
|
||||
if fh.opened {
|
||||
accounting.Stats.DoneTransferring(fh.remote, true)
|
||||
// Close first so that we have hashes
|
||||
err := fh.r.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Now check the hash
|
||||
err = fh.checkHash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the file
|
||||
func (fh *ReadFileHandle) Close() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
return fh.close()
|
||||
}
|
||||
|
||||
// Flush is called each time the file or directory is closed.
|
||||
// Because there can be multiple file descriptors referring to a
|
||||
// single opened file, Flush can be called multiple times.
|
||||
func (fh *ReadFileHandle) Flush() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if !fh.opened {
|
||||
return nil
|
||||
}
|
||||
// fs.Debugf(fh.remote, "ReadFileHandle.Flush")
|
||||
|
||||
if err := fh.checkHash(); err != nil {
|
||||
fs.Errorf(fh.remote, "ReadFileHandle.Flush error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// fs.Debugf(fh.remote, "ReadFileHandle.Flush OK")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Release is called when we are finished with the file handle
|
||||
//
|
||||
// It isn't called directly from userspace so the error is ignored by
|
||||
// the kernel
|
||||
func (fh *ReadFileHandle) Release() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if !fh.opened {
|
||||
return nil
|
||||
}
|
||||
if fh.closed {
|
||||
fs.Debugf(fh.remote, "ReadFileHandle.Release nothing to do")
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(fh.remote, "ReadFileHandle.Release closing")
|
||||
err := fh.close()
|
||||
if err != nil {
|
||||
fs.Errorf(fh.remote, "ReadFileHandle.Release error: %v", err)
|
||||
} else {
|
||||
// fs.Debugf(fh.remote, "ReadFileHandle.Release OK")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Size returns the size of the underlying file
|
||||
func (fh *ReadFileHandle) Size() int64 {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
return fh.size
|
||||
}
|
||||
|
||||
// Stat returns info about the file
|
||||
func (fh *ReadFileHandle) Stat() (os.FileInfo, error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
return fh.file, nil
|
||||
}
|
||||
238
.rclone_repo/vfs/read_test.go
Executable file
238
.rclone_repo/vfs/read_test.go
Executable file
@@ -0,0 +1,238 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Open a file for write
|
||||
func readHandleCreate(t *testing.T, r *fstest.Run) (*VFS, *ReadFileHandle) {
|
||||
vfs := New(r.Fremote, nil)
|
||||
|
||||
file1 := r.WriteObject("dir/file1", "0123456789abcdef", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
h, err := vfs.OpenFile("dir/file1", os.O_RDONLY, 0777)
|
||||
require.NoError(t, err)
|
||||
fh, ok := h.(*ReadFileHandle)
|
||||
require.True(t, ok)
|
||||
|
||||
return vfs, fh
|
||||
}
|
||||
|
||||
// read data from the string
|
||||
func readString(t *testing.T, fh *ReadFileHandle, n int) string {
|
||||
buf := make([]byte, n)
|
||||
n, err := fh.Read(buf)
|
||||
if err != io.EOF {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
return string(buf[:n])
|
||||
}
|
||||
|
||||
func TestReadFileHandleMethods(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
_, fh := readHandleCreate(t, r)
|
||||
|
||||
// String
|
||||
assert.Equal(t, "dir/file1 (r)", fh.String())
|
||||
assert.Equal(t, "<nil *ReadFileHandle>", (*ReadFileHandle)(nil).String())
|
||||
assert.Equal(t, "<nil *ReadFileHandle.file>", new(ReadFileHandle).String())
|
||||
|
||||
// Node
|
||||
node := fh.Node()
|
||||
assert.Equal(t, "file1", node.Name())
|
||||
|
||||
// Size
|
||||
assert.Equal(t, int64(16), fh.Size())
|
||||
|
||||
// Read 1
|
||||
assert.Equal(t, "0", readString(t, fh, 1))
|
||||
|
||||
// Read remainder
|
||||
assert.Equal(t, "123456789abcdef", readString(t, fh, 256))
|
||||
|
||||
// Read EOF
|
||||
buf := make([]byte, 16)
|
||||
_, err := fh.Read(buf)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
|
||||
// Stat
|
||||
var fi os.FileInfo
|
||||
fi, err = fh.Stat()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), fi.Size())
|
||||
assert.Equal(t, "file1", fi.Name())
|
||||
|
||||
// Close
|
||||
assert.False(t, fh.closed)
|
||||
assert.Equal(t, nil, fh.Close())
|
||||
assert.True(t, fh.closed)
|
||||
|
||||
// Close again
|
||||
assert.Equal(t, ECLOSED, fh.Close())
|
||||
}
|
||||
|
||||
func TestReadFileHandleSeek(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
_, fh := readHandleCreate(t, r)
|
||||
|
||||
assert.Equal(t, "0", readString(t, fh, 1))
|
||||
|
||||
// 0 means relative to the origin of the file,
|
||||
n, err := fh.Seek(5, io.SeekStart)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(5), n)
|
||||
assert.Equal(t, "5", readString(t, fh, 1))
|
||||
|
||||
// 1 means relative to the current offset
|
||||
n, err = fh.Seek(-3, io.SeekCurrent)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(3), n)
|
||||
assert.Equal(t, "3", readString(t, fh, 1))
|
||||
|
||||
// 2 means relative to the end.
|
||||
n, err = fh.Seek(-3, io.SeekEnd)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(13), n)
|
||||
assert.Equal(t, "d", readString(t, fh, 1))
|
||||
|
||||
// Seek off the end
|
||||
_, err = fh.Seek(100, io.SeekStart)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Get the error on read
|
||||
buf := make([]byte, 16)
|
||||
l, err := fh.Read(buf)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 0, l)
|
||||
|
||||
// Check if noSeek is set we get an error
|
||||
fh.noSeek = true
|
||||
_, err = fh.Seek(0, io.SeekStart)
|
||||
assert.Equal(t, ESPIPE, err)
|
||||
|
||||
// Close
|
||||
assert.Equal(t, nil, fh.Close())
|
||||
}
|
||||
|
||||
func TestReadFileHandleReadAt(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
_, fh := readHandleCreate(t, r)
|
||||
|
||||
// read from start
|
||||
buf := make([]byte, 1)
|
||||
n, err := fh.ReadAt(buf, 0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, n)
|
||||
assert.Equal(t, "0", string(buf[:n]))
|
||||
|
||||
// seek forwards
|
||||
n, err = fh.ReadAt(buf, 5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, n)
|
||||
assert.Equal(t, "5", string(buf[:n]))
|
||||
|
||||
// seek backwards
|
||||
n, err = fh.ReadAt(buf, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, n)
|
||||
assert.Equal(t, "1", string(buf[:n]))
|
||||
|
||||
// read exactly to the end
|
||||
buf = make([]byte, 6)
|
||||
n, err = fh.ReadAt(buf, 10)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 6, n)
|
||||
assert.Equal(t, "abcdef", string(buf[:n]))
|
||||
|
||||
// read off the end
|
||||
buf = make([]byte, 256)
|
||||
n, err = fh.ReadAt(buf, 10)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 6, n)
|
||||
assert.Equal(t, "abcdef", string(buf[:n]))
|
||||
|
||||
// read starting off the end
|
||||
n, err = fh.ReadAt(buf, 100)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 0, n)
|
||||
|
||||
// check noSeek gives an error
|
||||
fh.noSeek = true
|
||||
_, err = fh.ReadAt(buf, 100)
|
||||
assert.Equal(t, ESPIPE, err)
|
||||
|
||||
// Properly close the file
|
||||
assert.NoError(t, fh.Close())
|
||||
|
||||
// check reading on closed file
|
||||
fh.noSeek = true
|
||||
_, err = fh.ReadAt(buf, 100)
|
||||
assert.Equal(t, ECLOSED, err)
|
||||
}
|
||||
|
||||
func TestReadFileHandleFlush(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
_, fh := readHandleCreate(t, r)
|
||||
|
||||
// Check Flush does nothing if read not called
|
||||
err := fh.Flush()
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, fh.closed)
|
||||
|
||||
// Read data
|
||||
buf := make([]byte, 256)
|
||||
n, err := fh.Read(buf)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 16, n)
|
||||
|
||||
// Check Flush does nothing if read called
|
||||
err = fh.Flush()
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, fh.closed)
|
||||
|
||||
// Check flush does nothing if called again
|
||||
err = fh.Flush()
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, fh.closed)
|
||||
|
||||
// Properly close the file
|
||||
assert.NoError(t, fh.Close())
|
||||
}
|
||||
|
||||
func TestReadFileHandleRelease(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
_, fh := readHandleCreate(t, r)
|
||||
|
||||
// Check Release does nothing if file not read from
|
||||
err := fh.Release()
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, fh.closed)
|
||||
|
||||
// Read data
|
||||
buf := make([]byte, 256)
|
||||
n, err := fh.Read(buf)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 16, n)
|
||||
|
||||
// Check Release closes file
|
||||
err = fh.Release()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, fh.closed)
|
||||
|
||||
// Check Release does nothing if called again
|
||||
err = fh.Release()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, fh.closed)
|
||||
}
|
||||
538
.rclone_repo/vfs/read_write.go
Executable file
538
.rclone_repo/vfs/read_write.go
Executable file
@@ -0,0 +1,538 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/log"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RWFileHandle is a handle that can be open for read and write.
|
||||
//
|
||||
// It will be open to a temporary file which, when closed, will be
|
||||
// transferred to the remote.
|
||||
type RWFileHandle struct {
|
||||
*os.File
|
||||
mu sync.Mutex
|
||||
closed bool // set if handle has been closed
|
||||
remote string
|
||||
file *File
|
||||
d *Dir
|
||||
opened bool
|
||||
flags int // open flags
|
||||
osPath string // path to the file in the cache
|
||||
writeCalled bool // if any Write() methods have been called
|
||||
changed bool // file contents was changed in any other way
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var (
|
||||
_ io.Reader = (*RWFileHandle)(nil)
|
||||
_ io.ReaderAt = (*RWFileHandle)(nil)
|
||||
_ io.Writer = (*RWFileHandle)(nil)
|
||||
_ io.WriterAt = (*RWFileHandle)(nil)
|
||||
_ io.Seeker = (*RWFileHandle)(nil)
|
||||
_ io.Closer = (*RWFileHandle)(nil)
|
||||
)
|
||||
|
||||
func newRWFileHandle(d *Dir, f *File, remote string, flags int) (fh *RWFileHandle, err error) {
|
||||
// if O_CREATE and O_EXCL are set and if path already exists, then return EEXIST
|
||||
if flags&(os.O_CREATE|os.O_EXCL) == os.O_CREATE|os.O_EXCL && f.exists() {
|
||||
return nil, EEXIST
|
||||
}
|
||||
|
||||
fh = &RWFileHandle{
|
||||
file: f,
|
||||
d: d,
|
||||
remote: remote,
|
||||
flags: flags,
|
||||
}
|
||||
|
||||
// mark the file as open in the cache - must be done before the mkdir
|
||||
fh.d.vfs.cache.open(fh.remote)
|
||||
|
||||
// Make a place for the file
|
||||
fh.osPath, err = d.vfs.cache.mkdir(remote)
|
||||
if err != nil {
|
||||
fh.d.vfs.cache.close(fh.remote)
|
||||
return nil, errors.Wrap(err, "open RW handle failed to make cache directory")
|
||||
}
|
||||
|
||||
rdwrMode := fh.flags & accessModeMask
|
||||
if rdwrMode != os.O_RDONLY {
|
||||
fh.file.addWriter(fh)
|
||||
}
|
||||
|
||||
// truncate or create files immediately to prepare the cache
|
||||
if fh.flags&os.O_TRUNC != 0 || fh.flags&(os.O_CREATE) != 0 && !f.exists() {
|
||||
if err := fh.openPending(false); err != nil {
|
||||
fh.file.delWriter(fh, false)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// copy an object to or from the remote while accounting for it
|
||||
func copyObj(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
|
||||
if operations.NeedTransfer(dst, src) {
|
||||
accounting.Stats.Transferring(src.Remote())
|
||||
newDst, err = operations.Copy(f, dst, remote, src)
|
||||
accounting.Stats.DoneTransferring(src.Remote(), err == nil)
|
||||
} else {
|
||||
newDst = dst
|
||||
}
|
||||
return newDst, err
|
||||
}
|
||||
|
||||
// openPending opens the file if there is a pending open
|
||||
//
|
||||
// call with the lock held
|
||||
func (fh *RWFileHandle) openPending(truncate bool) (err error) {
|
||||
if fh.opened {
|
||||
return nil
|
||||
}
|
||||
|
||||
fh.file.muRW.Lock()
|
||||
defer fh.file.muRW.Unlock()
|
||||
|
||||
o := fh.file.getObject()
|
||||
|
||||
var fd *os.File
|
||||
cacheFileOpenFlags := fh.flags
|
||||
// if not truncating the file, need to read it first
|
||||
if fh.flags&os.O_TRUNC == 0 && !truncate {
|
||||
// If the remote object exists AND its cached file exists locally AND there are no
|
||||
// other RW handles with it open, then attempt to update it.
|
||||
if o != nil && fh.file.rwOpens() == 0 {
|
||||
cacheObj, err := fh.d.vfs.cache.f.NewObject(fh.remote)
|
||||
if err == nil && cacheObj != nil {
|
||||
_, err = copyObj(fh.d.vfs.cache.f, cacheObj, fh.remote, o)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "open RW handle failed to update cached file")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// try to open a exising cache file
|
||||
fd, err = os.OpenFile(fh.osPath, cacheFileOpenFlags&^os.O_CREATE, 0600)
|
||||
if os.IsNotExist(err) {
|
||||
// cache file does not exist, so need to fetch it if we have an object to fetch
|
||||
// it from
|
||||
if o != nil {
|
||||
_, err = copyObj(fh.d.vfs.cache.f, nil, fh.remote, o)
|
||||
if err != nil {
|
||||
cause := errors.Cause(err)
|
||||
if cause != fs.ErrorObjectNotFound && cause != fs.ErrorDirNotFound {
|
||||
// return any non NotFound errors
|
||||
return errors.Wrap(err, "open RW handle failed to cache file")
|
||||
}
|
||||
// continue here with err=fs.Error{Object,Dir}NotFound
|
||||
}
|
||||
}
|
||||
// if err == nil, then we have cached the file successfully, otherwise err is
|
||||
// indicating some kind of non existent file/directory either
|
||||
// os.IsNotExist(err) or fs.Error{Object,Dir}NotFound
|
||||
if err != nil {
|
||||
if fh.flags&os.O_CREATE != 0 {
|
||||
// if the object wasn't found AND O_CREATE is set then
|
||||
// ignore error as we are about to create the file
|
||||
fh.file.setSize(0)
|
||||
fh.changed = true
|
||||
} else {
|
||||
return errors.Wrap(err, "open RW handle failed to cache file")
|
||||
}
|
||||
}
|
||||
} else if err != nil {
|
||||
return errors.Wrap(err, "cache open file failed")
|
||||
} else {
|
||||
fs.Debugf(fh.logPrefix(), "Opened existing cached copy with flags=%s", decodeOpenFlags(fh.flags))
|
||||
}
|
||||
} else {
|
||||
// Set the size to 0 since we are truncating and flag we need to write it back
|
||||
fh.file.setSize(0)
|
||||
fh.changed = true
|
||||
if fh.flags&os.O_CREATE == 0 && fh.file.exists() {
|
||||
// create an empty file if it exists on the source
|
||||
err = ioutil.WriteFile(fh.osPath, []byte{}, 0600)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cache open failed to create zero length file")
|
||||
}
|
||||
}
|
||||
// Windows doesn't seem to deal well with O_TRUNC and
|
||||
// certain access modes so so truncate the file if it
|
||||
// exists in these cases.
|
||||
if runtime.GOOS == "windows" && fh.flags&os.O_APPEND != 0 {
|
||||
cacheFileOpenFlags &^= os.O_TRUNC
|
||||
_, err = os.Stat(fh.osPath)
|
||||
if err == nil {
|
||||
err = os.Truncate(fh.osPath, 0)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cache open failed to truncate")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if fd == nil {
|
||||
fs.Debugf(fh.logPrefix(), "Opening cached copy with flags=%s", decodeOpenFlags(fh.flags))
|
||||
fd, err = os.OpenFile(fh.osPath, cacheFileOpenFlags, 0600)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cache open file failed")
|
||||
}
|
||||
}
|
||||
fh.File = fd
|
||||
fh.opened = true
|
||||
fh.file.addRWOpen()
|
||||
fh.d.addObject(fh.file) // make sure the directory has this object in it now
|
||||
return nil
|
||||
}
|
||||
|
||||
// String converts it to printable
|
||||
func (fh *RWFileHandle) String() string {
|
||||
if fh == nil {
|
||||
return "<nil *RWFileHandle>"
|
||||
}
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.file == nil {
|
||||
return "<nil *RWFileHandle.file>"
|
||||
}
|
||||
return fh.file.String() + " (rw)"
|
||||
}
|
||||
|
||||
// Node returns the Node assocuated with this - satisfies Noder interface
|
||||
func (fh *RWFileHandle) Node() Node {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
return fh.file
|
||||
}
|
||||
|
||||
// Returns whether the file needs to be written back.
|
||||
//
|
||||
// If write hasn't been called and the file hasn't been changed in any other
|
||||
// way we haven't modified it so we don't need to transfer it
|
||||
//
|
||||
// Must be called with fh.mu held
|
||||
func (fh *RWFileHandle) modified() bool {
|
||||
if !fh.writeCalled && !fh.changed {
|
||||
fs.Debugf(fh.logPrefix(), "not modified so not transferring")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// close the file handle returning EBADF if it has been
|
||||
// closed already.
|
||||
//
|
||||
// Must be called with fh.mu held
|
||||
//
|
||||
// Note that we leave the file around in the cache on error conditions
|
||||
// to give the user a chance to recover it.
|
||||
func (fh *RWFileHandle) close() (err error) {
|
||||
defer log.Trace(fh.logPrefix(), "")("err=%v", &err)
|
||||
fh.file.muRW.Lock()
|
||||
defer fh.file.muRW.Unlock()
|
||||
|
||||
if fh.closed {
|
||||
return ECLOSED
|
||||
}
|
||||
fh.closed = true
|
||||
defer func() {
|
||||
if fh.opened {
|
||||
fh.file.delRWOpen()
|
||||
}
|
||||
fh.d.vfs.cache.close(fh.remote)
|
||||
}()
|
||||
rdwrMode := fh.flags & accessModeMask
|
||||
writer := rdwrMode != os.O_RDONLY
|
||||
|
||||
// If read only then return
|
||||
if !fh.opened && rdwrMode == os.O_RDONLY {
|
||||
return nil
|
||||
}
|
||||
|
||||
isCopied := false
|
||||
if writer {
|
||||
isCopied = fh.file.delWriter(fh, fh.modified())
|
||||
defer fh.file.finishWriterClose()
|
||||
}
|
||||
|
||||
// If we aren't creating or truncating the file then
|
||||
// we haven't modified it so don't need to transfer it
|
||||
if fh.flags&(os.O_CREATE|os.O_TRUNC) != 0 {
|
||||
if err := fh.openPending(false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if writer && fh.opened {
|
||||
fi, err := fh.File.Stat()
|
||||
if err != nil {
|
||||
fs.Errorf(fh.logPrefix(), "Failed to stat cache file: %v", err)
|
||||
} else {
|
||||
fh.file.setSize(fi.Size())
|
||||
}
|
||||
}
|
||||
|
||||
// Close the underlying file
|
||||
if fh.opened {
|
||||
err = fh.File.Close()
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to close cache file")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if isCopied {
|
||||
// Transfer the temp file to the remote
|
||||
cacheObj, err := fh.d.vfs.cache.f.NewObject(fh.remote)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to find cache file")
|
||||
fs.Errorf(fh.logPrefix(), "%v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
o, err := copyObj(fh.d.vfs.f, fh.file.getObject(), fh.remote, cacheObj)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to transfer file from cache to remote")
|
||||
fs.Errorf(fh.logPrefix(), "%v", err)
|
||||
return err
|
||||
}
|
||||
fh.file.setObject(o)
|
||||
fs.Debugf(o, "transferred to remote")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the file
|
||||
func (fh *RWFileHandle) Close() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
return fh.close()
|
||||
}
|
||||
|
||||
// Flush is called each time the file or directory is closed.
|
||||
// Because there can be multiple file descriptors referring to a
|
||||
// single opened file, Flush can be called multiple times.
|
||||
func (fh *RWFileHandle) Flush() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if !fh.opened {
|
||||
return nil
|
||||
}
|
||||
if fh.closed {
|
||||
fs.Debugf(fh.logPrefix(), "RWFileHandle.Flush nothing to do")
|
||||
return nil
|
||||
}
|
||||
// fs.Debugf(fh.logPrefix(), "RWFileHandle.Flush")
|
||||
if !fh.opened {
|
||||
fs.Debugf(fh.logPrefix(), "RWFileHandle.Flush ignoring flush on unopened handle")
|
||||
return nil
|
||||
}
|
||||
|
||||
// If Write hasn't been called then ignore the Flush - Release
|
||||
// will pick it up
|
||||
if !fh.writeCalled {
|
||||
fs.Debugf(fh.logPrefix(), "RWFileHandle.Flush ignoring flush on unwritten handle")
|
||||
return nil
|
||||
}
|
||||
err := fh.close()
|
||||
if err != nil {
|
||||
fs.Errorf(fh.logPrefix(), "RWFileHandle.Flush error: %v", err)
|
||||
} else {
|
||||
// fs.Debugf(fh.logPrefix(), "RWFileHandle.Flush OK")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Release is called when we are finished with the file handle
|
||||
//
|
||||
// It isn't called directly from userspace so the error is ignored by
|
||||
// the kernel
|
||||
func (fh *RWFileHandle) Release() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.closed {
|
||||
fs.Debugf(fh.logPrefix(), "RWFileHandle.Release nothing to do")
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(fh.logPrefix(), "RWFileHandle.Release closing")
|
||||
err := fh.close()
|
||||
if err != nil {
|
||||
fs.Errorf(fh.logPrefix(), "RWFileHandle.Release error: %v", err)
|
||||
} else {
|
||||
// fs.Debugf(fh.logPrefix(), "RWFileHandle.Release OK")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Size returns the size of the underlying file
|
||||
func (fh *RWFileHandle) Size() int64 {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if !fh.opened {
|
||||
return fh.file.Size()
|
||||
}
|
||||
fi, err := fh.File.Stat()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return fi.Size()
|
||||
}
|
||||
|
||||
// Stat returns info about the file
|
||||
func (fh *RWFileHandle) Stat() (os.FileInfo, error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
return fh.file, nil
|
||||
}
|
||||
|
||||
// readFn is a general purpose read function - pass in a closure to do
|
||||
// the actual read
|
||||
func (fh *RWFileHandle) readFn(read func() (int, error)) (n int, err error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.closed {
|
||||
return 0, ECLOSED
|
||||
}
|
||||
if fh.flags&accessModeMask == os.O_WRONLY {
|
||||
return 0, EBADF
|
||||
}
|
||||
if err = fh.openPending(false); err != nil {
|
||||
return n, err
|
||||
}
|
||||
return read()
|
||||
}
|
||||
|
||||
// Read bytes from the file
|
||||
func (fh *RWFileHandle) Read(b []byte) (n int, err error) {
|
||||
return fh.readFn(func() (int, error) {
|
||||
return fh.File.Read(b)
|
||||
})
|
||||
}
|
||||
|
||||
// ReadAt bytes from the file at off
|
||||
func (fh *RWFileHandle) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
return fh.readFn(func() (int, error) {
|
||||
return fh.File.ReadAt(b, off)
|
||||
})
|
||||
}
|
||||
|
||||
// Seek to new file position
|
||||
func (fh *RWFileHandle) Seek(offset int64, whence int) (ret int64, err error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.closed {
|
||||
return 0, ECLOSED
|
||||
}
|
||||
if !fh.opened && offset == 0 && whence != 2 {
|
||||
return 0, nil
|
||||
}
|
||||
if err = fh.openPending(false); err != nil {
|
||||
return ret, err
|
||||
}
|
||||
return fh.File.Seek(offset, whence)
|
||||
}
|
||||
|
||||
// writeFn general purpose write call
|
||||
//
|
||||
// Pass a closure to do the actual write
|
||||
func (fh *RWFileHandle) writeFn(write func() error) (err error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.closed {
|
||||
return ECLOSED
|
||||
}
|
||||
if fh.flags&accessModeMask == os.O_RDONLY {
|
||||
return EBADF
|
||||
}
|
||||
if err = fh.openPending(false); err != nil {
|
||||
return err
|
||||
}
|
||||
fh.writeCalled = true
|
||||
err = write()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fi, err := fh.File.Stat()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat cache file")
|
||||
}
|
||||
fh.file.setSize(fi.Size())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write bytes to the file
|
||||
func (fh *RWFileHandle) Write(b []byte) (n int, err error) {
|
||||
err = fh.writeFn(func() error {
|
||||
n, err = fh.File.Write(b)
|
||||
return err
|
||||
})
|
||||
return n, err
|
||||
}
|
||||
|
||||
// WriteAt bytes to the file at off
|
||||
func (fh *RWFileHandle) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
err = fh.writeFn(func() error {
|
||||
n, err = fh.File.WriteAt(b, off)
|
||||
return err
|
||||
})
|
||||
return n, err
|
||||
}
|
||||
|
||||
// WriteString a string to the file
|
||||
func (fh *RWFileHandle) WriteString(s string) (n int, err error) {
|
||||
err = fh.writeFn(func() error {
|
||||
n, err = fh.File.WriteString(s)
|
||||
return err
|
||||
})
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Truncate file to given size
|
||||
func (fh *RWFileHandle) Truncate(size int64) (err error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.closed {
|
||||
return ECLOSED
|
||||
}
|
||||
if err = fh.openPending(size == 0); err != nil {
|
||||
return err
|
||||
}
|
||||
fh.changed = true
|
||||
fh.file.setSize(size)
|
||||
return fh.File.Truncate(size)
|
||||
}
|
||||
|
||||
// Sync commits the current contents of the file to stable storage. Typically,
|
||||
// this means flushing the file system's in-memory copy of recently written
|
||||
// data to disk.
|
||||
func (fh *RWFileHandle) Sync() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.closed {
|
||||
return ECLOSED
|
||||
}
|
||||
if !fh.opened {
|
||||
return nil
|
||||
}
|
||||
if fh.flags&accessModeMask == os.O_RDONLY {
|
||||
return nil
|
||||
}
|
||||
return fh.File.Sync()
|
||||
}
|
||||
|
||||
func (fh *RWFileHandle) logPrefix() string {
|
||||
return fmt.Sprintf("%s(%p)", fh.remote, fh)
|
||||
}
|
||||
595
.rclone_repo/vfs/read_write_test.go
Executable file
595
.rclone_repo/vfs/read_write_test.go
Executable file
@@ -0,0 +1,595 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func cleanup(t *testing.T, r *fstest.Run, vfs *VFS) {
|
||||
assert.NoError(t, vfs.CleanUp())
|
||||
vfs.Shutdown()
|
||||
r.Finalise()
|
||||
}
|
||||
|
||||
// Open a file for write
|
||||
func rwHandleCreateReadOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) {
|
||||
opt := DefaultOpt
|
||||
opt.CacheMode = CacheModeFull
|
||||
vfs := New(r.Fremote, &opt)
|
||||
|
||||
file1 := r.WriteObject("dir/file1", "0123456789abcdef", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
h, err := vfs.OpenFile("dir/file1", os.O_RDONLY, 0777)
|
||||
require.NoError(t, err)
|
||||
fh, ok := h.(*RWFileHandle)
|
||||
require.True(t, ok)
|
||||
|
||||
return vfs, fh
|
||||
}
|
||||
|
||||
// Open a file for write
|
||||
func rwHandleCreateWriteOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) {
|
||||
opt := DefaultOpt
|
||||
opt.CacheMode = CacheModeFull
|
||||
vfs := New(r.Fremote, &opt)
|
||||
|
||||
h, err := vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE, 0777)
|
||||
require.NoError(t, err)
|
||||
fh, ok := h.(*RWFileHandle)
|
||||
require.True(t, ok)
|
||||
|
||||
return vfs, fh
|
||||
}
|
||||
|
||||
// read data from the string
|
||||
func rwReadString(t *testing.T, fh *RWFileHandle, n int) string {
|
||||
buf := make([]byte, n)
|
||||
n, err := fh.Read(buf)
|
||||
if err != io.EOF {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
return string(buf[:n])
|
||||
}
|
||||
|
||||
func TestRWFileHandleMethodsRead(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
vfs, fh := rwHandleCreateReadOnly(t, r)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
// String
|
||||
assert.Equal(t, "dir/file1 (rw)", fh.String())
|
||||
assert.Equal(t, "<nil *RWFileHandle>", (*RWFileHandle)(nil).String())
|
||||
assert.Equal(t, "<nil *RWFileHandle.file>", new(RWFileHandle).String())
|
||||
|
||||
// Node
|
||||
node := fh.Node()
|
||||
assert.Equal(t, "file1", node.Name())
|
||||
|
||||
// Size
|
||||
assert.Equal(t, int64(16), fh.Size())
|
||||
|
||||
// No opens yet
|
||||
assert.Equal(t, 0, fh.file.rwOpens())
|
||||
|
||||
// Read 1
|
||||
assert.Equal(t, "0", rwReadString(t, fh, 1))
|
||||
|
||||
// Open after the read
|
||||
assert.Equal(t, 1, fh.file.rwOpens())
|
||||
|
||||
// Read remainder
|
||||
assert.Equal(t, "123456789abcdef", rwReadString(t, fh, 256))
|
||||
|
||||
// Read EOF
|
||||
buf := make([]byte, 16)
|
||||
_, err := fh.Read(buf)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
|
||||
// Sync
|
||||
err = fh.Sync()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Stat
|
||||
var fi os.FileInfo
|
||||
fi, err = fh.Stat()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), fi.Size())
|
||||
assert.Equal(t, "file1", fi.Name())
|
||||
|
||||
// Close
|
||||
assert.False(t, fh.closed)
|
||||
assert.Equal(t, nil, fh.Close())
|
||||
assert.True(t, fh.closed)
|
||||
|
||||
// No opens again
|
||||
assert.Equal(t, 0, fh.file.rwOpens())
|
||||
|
||||
// Close again
|
||||
assert.Equal(t, ECLOSED, fh.Close())
|
||||
}
|
||||
|
||||
func TestRWFileHandleSeek(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
vfs, fh := rwHandleCreateReadOnly(t, r)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
assert.Equal(t, fh.opened, false)
|
||||
|
||||
// Check null seeks don't open the file
|
||||
n, err := fh.Seek(0, io.SeekStart)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(0), n)
|
||||
assert.Equal(t, fh.opened, false)
|
||||
n, err = fh.Seek(0, io.SeekCurrent)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(0), n)
|
||||
assert.Equal(t, fh.opened, false)
|
||||
|
||||
assert.Equal(t, "0", rwReadString(t, fh, 1))
|
||||
|
||||
// 0 means relative to the origin of the file,
|
||||
n, err = fh.Seek(5, io.SeekStart)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(5), n)
|
||||
assert.Equal(t, "5", rwReadString(t, fh, 1))
|
||||
|
||||
// 1 means relative to the current offset
|
||||
n, err = fh.Seek(-3, io.SeekCurrent)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(3), n)
|
||||
assert.Equal(t, "3", rwReadString(t, fh, 1))
|
||||
|
||||
// 2 means relative to the end.
|
||||
n, err = fh.Seek(-3, io.SeekEnd)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(13), n)
|
||||
assert.Equal(t, "d", rwReadString(t, fh, 1))
|
||||
|
||||
// Seek off the end
|
||||
_, err = fh.Seek(100, io.SeekStart)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Get the error on read
|
||||
buf := make([]byte, 16)
|
||||
l, err := fh.Read(buf)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 0, l)
|
||||
|
||||
// Close
|
||||
assert.Equal(t, nil, fh.Close())
|
||||
}
|
||||
|
||||
func TestRWFileHandleReadAt(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
vfs, fh := rwHandleCreateReadOnly(t, r)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
// read from start
|
||||
buf := make([]byte, 1)
|
||||
n, err := fh.ReadAt(buf, 0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, n)
|
||||
assert.Equal(t, "0", string(buf[:n]))
|
||||
|
||||
// seek forwards
|
||||
n, err = fh.ReadAt(buf, 5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, n)
|
||||
assert.Equal(t, "5", string(buf[:n]))
|
||||
|
||||
// seek backwards
|
||||
n, err = fh.ReadAt(buf, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, n)
|
||||
assert.Equal(t, "1", string(buf[:n]))
|
||||
|
||||
// read exactly to the end
|
||||
buf = make([]byte, 6)
|
||||
n, err = fh.ReadAt(buf, 10)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 6, n)
|
||||
assert.Equal(t, "abcdef", string(buf[:n]))
|
||||
|
||||
// read off the end
|
||||
buf = make([]byte, 256)
|
||||
n, err = fh.ReadAt(buf, 10)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 6, n)
|
||||
assert.Equal(t, "abcdef", string(buf[:n]))
|
||||
|
||||
// read starting off the end
|
||||
n, err = fh.ReadAt(buf, 100)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 0, n)
|
||||
|
||||
// Properly close the file
|
||||
assert.NoError(t, fh.Close())
|
||||
|
||||
// check reading on closed file
|
||||
_, err = fh.ReadAt(buf, 100)
|
||||
assert.Equal(t, ECLOSED, err)
|
||||
}
|
||||
|
||||
func TestRWFileHandleFlushRead(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
vfs, fh := rwHandleCreateReadOnly(t, r)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
// Check Flush does nothing if read not called
|
||||
err := fh.Flush()
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, fh.closed)
|
||||
|
||||
// Read data
|
||||
buf := make([]byte, 256)
|
||||
n, err := fh.Read(buf)
|
||||
assert.True(t, err == io.EOF || err == nil)
|
||||
assert.Equal(t, 16, n)
|
||||
|
||||
// Check Flush does nothing if read called
|
||||
err = fh.Flush()
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, fh.closed)
|
||||
|
||||
// Check flush does nothing if called again
|
||||
err = fh.Flush()
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, fh.closed)
|
||||
|
||||
// Properly close the file
|
||||
assert.NoError(t, fh.Close())
|
||||
}
|
||||
|
||||
func TestRWFileHandleReleaseRead(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
vfs, fh := rwHandleCreateReadOnly(t, r)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
// Read data
|
||||
buf := make([]byte, 256)
|
||||
n, err := fh.Read(buf)
|
||||
assert.True(t, err == io.EOF || err == nil)
|
||||
assert.Equal(t, 16, n)
|
||||
|
||||
// Check Release closes file
|
||||
err = fh.Release()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, fh.closed)
|
||||
|
||||
// Check Release does nothing if called again
|
||||
err = fh.Release()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, fh.closed)
|
||||
}
|
||||
|
||||
/// ------------------------------------------------------------
|
||||
|
||||
func TestRWFileHandleMethodsWrite(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
vfs, fh := rwHandleCreateWriteOnly(t, r)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
// 1 opens since we opened with O_CREATE and the file didn't
|
||||
// exist in the cache
|
||||
assert.Equal(t, 1, fh.file.rwOpens())
|
||||
|
||||
// String
|
||||
assert.Equal(t, "file1 (rw)", fh.String())
|
||||
assert.Equal(t, "<nil *RWFileHandle>", (*RWFileHandle)(nil).String())
|
||||
assert.Equal(t, "<nil *RWFileHandle.file>", new(RWFileHandle).String())
|
||||
|
||||
// Node
|
||||
node := fh.Node()
|
||||
assert.Equal(t, "file1", node.Name())
|
||||
|
||||
offset := func() int64 {
|
||||
n, err := fh.Seek(0, io.SeekCurrent)
|
||||
require.NoError(t, err)
|
||||
return n
|
||||
}
|
||||
|
||||
// Offset #1
|
||||
assert.Equal(t, int64(0), offset())
|
||||
assert.Equal(t, int64(0), node.Size())
|
||||
|
||||
// Size #1
|
||||
assert.Equal(t, int64(0), fh.Size())
|
||||
|
||||
// Write
|
||||
n, err := fh.Write([]byte("hello"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
|
||||
// Open after the write
|
||||
assert.Equal(t, 1, fh.file.rwOpens())
|
||||
|
||||
// Offset #2
|
||||
assert.Equal(t, int64(5), offset())
|
||||
assert.Equal(t, int64(5), node.Size())
|
||||
|
||||
// Size #2
|
||||
assert.Equal(t, int64(5), fh.Size())
|
||||
|
||||
// WriteString
|
||||
n, err = fh.WriteString(" world!")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 7, n)
|
||||
|
||||
// Sync
|
||||
err = fh.Sync()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Stat
|
||||
var fi os.FileInfo
|
||||
fi, err = fh.Stat()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(12), fi.Size())
|
||||
assert.Equal(t, "file1", fi.Name())
|
||||
|
||||
// Truncate
|
||||
err = fh.Truncate(11)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Close
|
||||
assert.NoError(t, fh.Close())
|
||||
|
||||
// No opens again
|
||||
assert.Equal(t, 0, fh.file.rwOpens())
|
||||
|
||||
// Check double close
|
||||
err = fh.Close()
|
||||
assert.Equal(t, ECLOSED, err)
|
||||
|
||||
// check vfs
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
checkListing(t, root, []string{"file1,11,false"})
|
||||
|
||||
// check the underlying r.Fremote but not the modtime
|
||||
file1 := fstest.NewItem("file1", "hello world", t1)
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{}, fs.ModTimeNotSupported)
|
||||
}
|
||||
|
||||
func TestRWFileHandleWriteAt(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
vfs, fh := rwHandleCreateWriteOnly(t, r)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
offset := func() int64 {
|
||||
n, err := fh.Seek(0, io.SeekCurrent)
|
||||
require.NoError(t, err)
|
||||
return n
|
||||
}
|
||||
|
||||
// Preconditions
|
||||
assert.Equal(t, int64(0), offset())
|
||||
assert.True(t, fh.opened)
|
||||
assert.False(t, fh.writeCalled)
|
||||
assert.True(t, fh.changed)
|
||||
|
||||
// Write the data
|
||||
n, err := fh.WriteAt([]byte("hello**"), 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 7, n)
|
||||
|
||||
// After write
|
||||
assert.Equal(t, int64(0), offset())
|
||||
assert.True(t, fh.writeCalled)
|
||||
|
||||
// Write more data
|
||||
n, err = fh.WriteAt([]byte(" world"), 5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 6, n)
|
||||
|
||||
// Close
|
||||
assert.NoError(t, fh.Close())
|
||||
|
||||
// Check can't write on closed handle
|
||||
n, err = fh.WriteAt([]byte("hello"), 0)
|
||||
assert.Equal(t, ECLOSED, err)
|
||||
assert.Equal(t, 0, n)
|
||||
|
||||
// check vfs
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
checkListing(t, root, []string{"file1,11,false"})
|
||||
|
||||
// check the underlying r.Fremote but not the modtime
|
||||
file1 := fstest.NewItem("file1", "hello world", t1)
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{}, fs.ModTimeNotSupported)
|
||||
}
|
||||
|
||||
func TestRWFileHandleWriteNoWrite(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
vfs, fh := rwHandleCreateWriteOnly(t, r)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
// Close the file without writing to it
|
||||
err := fh.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create a different file (not in the cache)
|
||||
h, err := vfs.OpenFile("file2", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Close it with Flush and Release
|
||||
err = h.Flush()
|
||||
assert.NoError(t, err)
|
||||
err = h.Release()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check vfs
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
checkListing(t, root, []string{"file1,0,false", "file2,0,false"})
|
||||
|
||||
// check the underlying r.Fremote but not the modtime
|
||||
file1 := fstest.NewItem("file1", "", t1)
|
||||
file2 := fstest.NewItem("file2", "", t1)
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file2}, []string{}, fs.ModTimeNotSupported)
|
||||
}
|
||||
|
||||
func TestRWFileHandleFlushWrite(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
vfs, fh := rwHandleCreateWriteOnly(t, r)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
// Check that the file has been create and is open
|
||||
assert.True(t, fh.opened)
|
||||
|
||||
// Write some data
|
||||
n, err := fh.Write([]byte("hello"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
|
||||
// Check Flush closes file if write called
|
||||
err = fh.Flush()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, fh.closed)
|
||||
|
||||
// Check flush does nothing if called again
|
||||
err = fh.Flush()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, fh.closed)
|
||||
}
|
||||
|
||||
func TestRWFileHandleReleaseWrite(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
vfs, fh := rwHandleCreateWriteOnly(t, r)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
// Write some data
|
||||
n, err := fh.Write([]byte("hello"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
|
||||
// Check Release closes file
|
||||
err = fh.Release()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, fh.closed)
|
||||
|
||||
// Check Release does nothing if called again
|
||||
err = fh.Release()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, fh.closed)
|
||||
}
|
||||
|
||||
func testRWFileHandleOpenTest(t *testing.T, vfs *VFS, test *openTest) {
|
||||
fileName := "open-test-file"
|
||||
|
||||
// first try with file not existing
|
||||
_, err := vfs.Stat(fileName)
|
||||
require.True(t, os.IsNotExist(err), test.what)
|
||||
|
||||
f, openNonExistentErr := vfs.OpenFile(fileName, test.flags, 0666)
|
||||
|
||||
var readNonExistentErr error
|
||||
var writeNonExistentErr error
|
||||
if openNonExistentErr == nil {
|
||||
// read some bytes
|
||||
buf := []byte{0, 0}
|
||||
_, readNonExistentErr = f.Read(buf)
|
||||
|
||||
// write some bytes
|
||||
_, writeNonExistentErr = f.Write([]byte("hello"))
|
||||
|
||||
// close
|
||||
err = f.Close()
|
||||
require.NoError(t, err, test.what)
|
||||
}
|
||||
|
||||
// write the file
|
||||
f, err = vfs.OpenFile(fileName, os.O_WRONLY|os.O_CREATE, 0777)
|
||||
require.NoError(t, err, test.what)
|
||||
_, err = f.Write([]byte("hello"))
|
||||
require.NoError(t, err, test.what)
|
||||
err = f.Close()
|
||||
require.NoError(t, err, test.what)
|
||||
|
||||
// then open file and try with file existing
|
||||
|
||||
f, openExistingErr := vfs.OpenFile(fileName, test.flags, 0666)
|
||||
var readExistingErr error
|
||||
var writeExistingErr error
|
||||
if openExistingErr == nil {
|
||||
// read some bytes
|
||||
buf := []byte{0, 0}
|
||||
_, readExistingErr = f.Read(buf)
|
||||
|
||||
// write some bytes
|
||||
_, writeExistingErr = f.Write([]byte("HEL"))
|
||||
|
||||
// close
|
||||
err = f.Close()
|
||||
require.NoError(t, err, test.what)
|
||||
}
|
||||
|
||||
// read the file
|
||||
f, err = vfs.OpenFile(fileName, os.O_RDONLY, 0)
|
||||
require.NoError(t, err, test.what)
|
||||
buf, err := ioutil.ReadAll(f)
|
||||
require.NoError(t, err, test.what)
|
||||
err = f.Close()
|
||||
require.NoError(t, err, test.what)
|
||||
contents := string(buf)
|
||||
|
||||
// remove file
|
||||
node, err := vfs.Stat(fileName)
|
||||
require.NoError(t, err, test.what)
|
||||
err = node.Remove()
|
||||
require.NoError(t, err, test.what)
|
||||
|
||||
// check
|
||||
assert.Equal(t, test.openNonExistentErr, openNonExistentErr, "openNonExistentErr: %s: want=%v, got=%v", test.what, test.openNonExistentErr, openNonExistentErr)
|
||||
assert.Equal(t, test.readNonExistentErr, readNonExistentErr, "readNonExistentErr: %s: want=%v, got=%v", test.what, test.readNonExistentErr, readNonExistentErr)
|
||||
assert.Equal(t, test.writeNonExistentErr, writeNonExistentErr, "writeNonExistentErr: %s: want=%v, got=%v", test.what, test.writeNonExistentErr, writeNonExistentErr)
|
||||
assert.Equal(t, test.openExistingErr, openExistingErr, "openExistingErr: %s: want=%v, got=%v", test.what, test.openExistingErr, openExistingErr)
|
||||
assert.Equal(t, test.readExistingErr, readExistingErr, "readExistingErr: %s: want=%v, got=%v", test.what, test.readExistingErr, readExistingErr)
|
||||
assert.Equal(t, test.writeExistingErr, writeExistingErr, "writeExistingErr: %s: want=%v, got=%v", test.what, test.writeExistingErr, writeExistingErr)
|
||||
assert.Equal(t, test.contents, contents, test.what)
|
||||
}
|
||||
|
||||
func TestRWFileHandleOpenTests(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
opt := DefaultOpt
|
||||
opt.CacheMode = CacheModeFull
|
||||
vfs := New(r.Fremote, &opt)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
for _, test := range openTests {
|
||||
testRWFileHandleOpenTest(t, vfs, &test)
|
||||
}
|
||||
}
|
||||
|
||||
// tests mod time on open files
|
||||
func TestRWFileModTimeWithOpenWriters(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, fh := rwHandleCreateWriteOnly(t, r)
|
||||
|
||||
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
|
||||
|
||||
_, err := fh.Write([]byte{104, 105})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fh.Node().SetModTime(mtime)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fh.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := vfs.Stat("file1")
|
||||
require.NoError(t, err)
|
||||
|
||||
// avoid errors because of timezone differences
|
||||
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
|
||||
}
|
||||
509
.rclone_repo/vfs/vfs.go
Executable file
509
.rclone_repo/vfs/vfs.go
Executable file
@@ -0,0 +1,509 @@
|
||||
// Package vfs provides a virtual filing system layer over rclone's
|
||||
// native objects.
|
||||
//
|
||||
// It attempts to behave in a similar way to Go's filing system
|
||||
// manipulation code in the os package. The same named function
|
||||
// should behave in an identical fashion. The objects also obey Go's
|
||||
// standard interfaces.
|
||||
//
|
||||
// Note that paths don't start or end with /, so the root directory
|
||||
// may be referred to as "". However Stat strips slashes so you can
|
||||
// use paths with slashes in.
|
||||
//
|
||||
// It also includes directory caching
|
||||
//
|
||||
// The vfs package returns Error values to signal precisely which
|
||||
// error conditions have ocurred. It may also return general errors
|
||||
// it receives. It tries to use os Error values (eg os.ErrExist)
|
||||
// where possible.
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/log"
|
||||
)
|
||||
|
||||
// DefaultOpt is the default values uses for Opt
|
||||
var DefaultOpt = Options{
|
||||
NoModTime: false,
|
||||
NoChecksum: false,
|
||||
NoSeek: false,
|
||||
DirCacheTime: 5 * 60 * time.Second,
|
||||
PollInterval: time.Minute,
|
||||
ReadOnly: false,
|
||||
Umask: 0,
|
||||
UID: ^uint32(0), // these values instruct WinFSP-FUSE to use the current user
|
||||
GID: ^uint32(0), // overriden for non windows in mount_unix.go
|
||||
DirPerms: os.FileMode(0777) | os.ModeDir,
|
||||
FilePerms: os.FileMode(0666),
|
||||
CacheMode: CacheModeOff,
|
||||
CacheMaxAge: 3600 * time.Second,
|
||||
CachePollInterval: 60 * time.Second,
|
||||
ChunkSize: 128 * fs.MebiByte,
|
||||
ChunkSizeLimit: -1,
|
||||
}
|
||||
|
||||
// Node represents either a directory (*Dir) or a file (*File)
|
||||
type Node interface {
|
||||
os.FileInfo
|
||||
IsFile() bool
|
||||
Inode() uint64
|
||||
SetModTime(modTime time.Time) error
|
||||
Sync() error
|
||||
Remove() error
|
||||
RemoveAll() error
|
||||
DirEntry() fs.DirEntry
|
||||
VFS() *VFS
|
||||
Open(flags int) (Handle, error)
|
||||
Truncate(size int64) error
|
||||
Path() string
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var (
|
||||
_ Node = (*File)(nil)
|
||||
_ Node = (*Dir)(nil)
|
||||
)
|
||||
|
||||
// Nodes is a slice of Node
|
||||
type Nodes []Node
|
||||
|
||||
// Sort functions
|
||||
func (ns Nodes) Len() int { return len(ns) }
|
||||
func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
|
||||
func (ns Nodes) Less(i, j int) bool { return ns[i].Path() < ns[j].Path() }
|
||||
|
||||
// Noder represents something which can return a node
|
||||
type Noder interface {
|
||||
fmt.Stringer
|
||||
Node() Node
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var (
|
||||
_ Noder = (*File)(nil)
|
||||
_ Noder = (*Dir)(nil)
|
||||
_ Noder = (*ReadFileHandle)(nil)
|
||||
_ Noder = (*WriteFileHandle)(nil)
|
||||
_ Noder = (*RWFileHandle)(nil)
|
||||
_ Noder = (*DirHandle)(nil)
|
||||
)
|
||||
|
||||
// OsFiler is the methods on *os.File
|
||||
type OsFiler interface {
|
||||
Chdir() error
|
||||
Chmod(mode os.FileMode) error
|
||||
Chown(uid, gid int) error
|
||||
Close() error
|
||||
Fd() uintptr
|
||||
Name() string
|
||||
Read(b []byte) (n int, err error)
|
||||
ReadAt(b []byte, off int64) (n int, err error)
|
||||
Readdir(n int) ([]os.FileInfo, error)
|
||||
Readdirnames(n int) (names []string, err error)
|
||||
Seek(offset int64, whence int) (ret int64, err error)
|
||||
Stat() (os.FileInfo, error)
|
||||
Sync() error
|
||||
Truncate(size int64) error
|
||||
Write(b []byte) (n int, err error)
|
||||
WriteAt(b []byte, off int64) (n int, err error)
|
||||
WriteString(s string) (n int, err error)
|
||||
}
|
||||
|
||||
// Handle is the interface statisified by open files or directories.
|
||||
// It is the methods on *os.File, plus a few more useful for FUSE
|
||||
// filingsystems. Not all of them are supported.
|
||||
type Handle interface {
|
||||
OsFiler
|
||||
// Additional methods useful for FUSE filesystems
|
||||
Flush() error
|
||||
Release() error
|
||||
Node() Node
|
||||
// Size() int64
|
||||
}
|
||||
|
||||
// baseHandle implements all the missing methods
|
||||
type baseHandle struct{}
|
||||
|
||||
func (h baseHandle) Chdir() error { return ENOSYS }
|
||||
func (h baseHandle) Chmod(mode os.FileMode) error { return ENOSYS }
|
||||
func (h baseHandle) Chown(uid, gid int) error { return ENOSYS }
|
||||
func (h baseHandle) Close() error { return ENOSYS }
|
||||
func (h baseHandle) Fd() uintptr { return 0 }
|
||||
func (h baseHandle) Name() string { return "" }
|
||||
func (h baseHandle) Read(b []byte) (n int, err error) { return 0, ENOSYS }
|
||||
func (h baseHandle) ReadAt(b []byte, off int64) (n int, err error) { return 0, ENOSYS }
|
||||
func (h baseHandle) Readdir(n int) ([]os.FileInfo, error) { return nil, ENOSYS }
|
||||
func (h baseHandle) Readdirnames(n int) (names []string, err error) { return nil, ENOSYS }
|
||||
func (h baseHandle) Seek(offset int64, whence int) (ret int64, err error) { return 0, ENOSYS }
|
||||
func (h baseHandle) Stat() (os.FileInfo, error) { return nil, ENOSYS }
|
||||
func (h baseHandle) Sync() error { return nil }
|
||||
func (h baseHandle) Truncate(size int64) error { return ENOSYS }
|
||||
func (h baseHandle) Write(b []byte) (n int, err error) { return 0, ENOSYS }
|
||||
func (h baseHandle) WriteAt(b []byte, off int64) (n int, err error) { return 0, ENOSYS }
|
||||
func (h baseHandle) WriteString(s string) (n int, err error) { return 0, ENOSYS }
|
||||
func (h baseHandle) Flush() (err error) { return ENOSYS }
|
||||
func (h baseHandle) Release() (err error) { return ENOSYS }
|
||||
func (h baseHandle) Node() Node { return nil }
|
||||
|
||||
//func (h baseHandle) Size() int64 { return 0 }
|
||||
|
||||
// Check interfaces
|
||||
var (
|
||||
_ OsFiler = (*os.File)(nil)
|
||||
_ Handle = (*baseHandle)(nil)
|
||||
_ Handle = (*ReadFileHandle)(nil)
|
||||
_ Handle = (*WriteFileHandle)(nil)
|
||||
_ Handle = (*DirHandle)(nil)
|
||||
)
|
||||
|
||||
// VFS represents the top level filing system
|
||||
type VFS struct {
|
||||
f fs.Fs
|
||||
root *Dir
|
||||
Opt Options
|
||||
cache *cache
|
||||
cancel context.CancelFunc
|
||||
usageMu sync.Mutex
|
||||
usageTime time.Time
|
||||
usage *fs.Usage
|
||||
}
|
||||
|
||||
// Options is options for creating the vfs
|
||||
type Options struct {
|
||||
NoSeek bool // don't allow seeking if set
|
||||
NoChecksum bool // don't check checksums if set
|
||||
ReadOnly bool // if set VFS is read only
|
||||
NoModTime bool // don't read mod times for files
|
||||
DirCacheTime time.Duration // how long to consider directory listing cache valid
|
||||
PollInterval time.Duration
|
||||
Umask int
|
||||
UID uint32
|
||||
GID uint32
|
||||
DirPerms os.FileMode
|
||||
FilePerms os.FileMode
|
||||
ChunkSize fs.SizeSuffix // if > 0 read files in chunks
|
||||
ChunkSizeLimit fs.SizeSuffix // if > ChunkSize double the chunk size after each chunk until reached
|
||||
CacheMode CacheMode
|
||||
CacheMaxAge time.Duration
|
||||
CachePollInterval time.Duration
|
||||
}
|
||||
|
||||
// New creates a new VFS and root directory. If opt is nil, then
|
||||
// DefaultOpt will be used
|
||||
func New(f fs.Fs, opt *Options) *VFS {
|
||||
fsDir := fs.NewDir("", time.Now())
|
||||
vfs := &VFS{
|
||||
f: f,
|
||||
}
|
||||
|
||||
// Make a copy of the options
|
||||
if opt != nil {
|
||||
vfs.Opt = *opt
|
||||
} else {
|
||||
vfs.Opt = DefaultOpt
|
||||
}
|
||||
|
||||
// Mask the permissions with the umask
|
||||
vfs.Opt.DirPerms &= ^os.FileMode(vfs.Opt.Umask)
|
||||
vfs.Opt.FilePerms &= ^os.FileMode(vfs.Opt.Umask)
|
||||
|
||||
// Make sure directories are returned as directories
|
||||
vfs.Opt.DirPerms |= os.ModeDir
|
||||
|
||||
// Create root directory
|
||||
vfs.root = newDir(vfs, f, nil, fsDir)
|
||||
|
||||
// Start polling if required
|
||||
if vfs.Opt.PollInterval > 0 {
|
||||
if do := vfs.f.Features().ChangeNotify; do != nil {
|
||||
do(vfs.notifyFunc, vfs.Opt.PollInterval)
|
||||
} else {
|
||||
fs.Infof(f, "poll-interval is not supported by this remote")
|
||||
}
|
||||
}
|
||||
|
||||
vfs.SetCacheMode(vfs.Opt.CacheMode)
|
||||
|
||||
// add the remote control
|
||||
vfs.addRC()
|
||||
return vfs
|
||||
}
|
||||
|
||||
// SetCacheMode change the cache mode
|
||||
func (vfs *VFS) SetCacheMode(cacheMode CacheMode) {
|
||||
vfs.Shutdown()
|
||||
vfs.cache = nil
|
||||
if vfs.Opt.CacheMode > CacheModeOff {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cache, err := newCache(ctx, vfs.f, &vfs.Opt) // FIXME pass on context or get from Opt?
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to create vfs cache - disabling: %v", err)
|
||||
vfs.Opt.CacheMode = CacheModeOff
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
vfs.cancel = cancel
|
||||
vfs.cache = cache
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown stops any background go-routines
|
||||
func (vfs *VFS) Shutdown() {
|
||||
if vfs.cancel != nil {
|
||||
vfs.cancel()
|
||||
vfs.cancel = nil
|
||||
}
|
||||
}
|
||||
|
||||
// CleanUp deletes the contents of the on disk cache
|
||||
func (vfs *VFS) CleanUp() error {
|
||||
if vfs.Opt.CacheMode == CacheModeOff {
|
||||
return nil
|
||||
}
|
||||
return vfs.cache.cleanUp()
|
||||
}
|
||||
|
||||
// FlushDirCache empties the directory cache
|
||||
func (vfs *VFS) FlushDirCache() {
|
||||
vfs.root.ForgetAll()
|
||||
}
|
||||
|
||||
// WaitForWriters sleeps until all writers have finished or
|
||||
// time.Duration has elapsed
|
||||
func (vfs *VFS) WaitForWriters(timeout time.Duration) {
|
||||
defer log.Trace(nil, "timeout=%v", timeout)("")
|
||||
const tickTime = 1 * time.Second
|
||||
deadline := time.NewTimer(timeout)
|
||||
defer deadline.Stop()
|
||||
tick := time.NewTimer(tickTime)
|
||||
defer tick.Stop()
|
||||
tick.Stop()
|
||||
for {
|
||||
writers := 0
|
||||
vfs.root.walk("", func(d *Dir) {
|
||||
fs.Debugf(d.path, "Looking for writers")
|
||||
// NB d.mu is held by walk() here
|
||||
for leaf, item := range d.items {
|
||||
fs.Debugf(leaf, "reading active writers")
|
||||
if file, ok := item.(*File); ok {
|
||||
n := file.activeWriters()
|
||||
if n != 0 {
|
||||
fs.Debugf(file, "active writers %d", n)
|
||||
}
|
||||
writers += n
|
||||
}
|
||||
}
|
||||
})
|
||||
if writers == 0 {
|
||||
return
|
||||
}
|
||||
fs.Debugf(nil, "Still %d writers active, waiting %v", writers, tickTime)
|
||||
tick.Reset(tickTime)
|
||||
select {
|
||||
case <-tick.C:
|
||||
break
|
||||
case <-deadline.C:
|
||||
fs.Errorf(nil, "Exiting even though %d writers are active after %v", writers, timeout)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Root returns the root node
|
||||
func (vfs *VFS) Root() (*Dir, error) {
|
||||
// fs.Debugf(vfs.f, "Root()")
|
||||
return vfs.root, nil
|
||||
}
|
||||
|
||||
var inodeCount uint64
|
||||
|
||||
// newInode creates a new unique inode number
|
||||
func newInode() (inode uint64) {
|
||||
return atomic.AddUint64(&inodeCount, 1)
|
||||
}
|
||||
|
||||
// Stat finds the Node by path starting from the root
|
||||
//
|
||||
// It is the equivalent of os.Stat - Node contains the os.FileInfo
|
||||
// interface.
|
||||
func (vfs *VFS) Stat(path string) (node Node, err error) {
|
||||
path = strings.Trim(path, "/")
|
||||
node = vfs.root
|
||||
for path != "" {
|
||||
i := strings.IndexRune(path, '/')
|
||||
var name string
|
||||
if i < 0 {
|
||||
name, path = path, ""
|
||||
} else {
|
||||
name, path = path[:i], path[i+1:]
|
||||
}
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
dir, ok := node.(*Dir)
|
||||
if !ok {
|
||||
// We need to look in a directory, but found a file
|
||||
return nil, ENOENT
|
||||
}
|
||||
node, err = dir.Stat(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// StatParent finds the parent directory and the leaf name of a path
|
||||
func (vfs *VFS) StatParent(name string) (dir *Dir, leaf string, err error) {
|
||||
name = strings.Trim(name, "/")
|
||||
parent, leaf := path.Split(name)
|
||||
node, err := vfs.Stat(parent)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if node.IsFile() {
|
||||
return nil, "", os.ErrExist
|
||||
}
|
||||
dir = node.(*Dir)
|
||||
return dir, leaf, nil
|
||||
}
|
||||
|
||||
// decodeOpenFlags returns a string representing the open flags
|
||||
func decodeOpenFlags(flags int) string {
|
||||
var out []string
|
||||
rdwrMode := flags & accessModeMask
|
||||
switch rdwrMode {
|
||||
case os.O_RDONLY:
|
||||
out = append(out, "O_RDONLY")
|
||||
case os.O_WRONLY:
|
||||
out = append(out, "O_WRONLY")
|
||||
case os.O_RDWR:
|
||||
out = append(out, "O_RDWR")
|
||||
default:
|
||||
out = append(out, fmt.Sprintf("0x%X", rdwrMode))
|
||||
}
|
||||
if flags&os.O_APPEND != 0 {
|
||||
out = append(out, "O_APPEND")
|
||||
}
|
||||
if flags&os.O_CREATE != 0 {
|
||||
out = append(out, "O_CREATE")
|
||||
}
|
||||
if flags&os.O_EXCL != 0 {
|
||||
out = append(out, "O_EXCL")
|
||||
}
|
||||
if flags&os.O_SYNC != 0 {
|
||||
out = append(out, "O_SYNC")
|
||||
}
|
||||
if flags&os.O_TRUNC != 0 {
|
||||
out = append(out, "O_TRUNC")
|
||||
}
|
||||
flags &^= accessModeMask | os.O_APPEND | os.O_CREATE | os.O_EXCL | os.O_SYNC | os.O_TRUNC
|
||||
if flags != 0 {
|
||||
out = append(out, fmt.Sprintf("0x%X", flags))
|
||||
}
|
||||
return strings.Join(out, "|")
|
||||
}
|
||||
|
||||
// OpenFile a file according to the flags and perm provided
|
||||
func (vfs *VFS) OpenFile(name string, flags int, perm os.FileMode) (fd Handle, err error) {
|
||||
defer log.Trace(name, "flags=%s, perm=%v", decodeOpenFlags(flags), perm)("fd=%v, err=%v", &fd, &err)
|
||||
|
||||
// http://pubs.opengroup.org/onlinepubs/7908799/xsh/open.html
|
||||
// The result of using O_TRUNC with O_RDONLY is undefined.
|
||||
// Linux seems to truncate the file, but we prefer to return EINVAL
|
||||
if flags&accessModeMask == os.O_RDONLY && flags&os.O_TRUNC != 0 {
|
||||
return nil, EINVAL
|
||||
}
|
||||
|
||||
node, err := vfs.Stat(name)
|
||||
if err != nil {
|
||||
if err != ENOENT || flags&os.O_CREATE == 0 {
|
||||
return nil, err
|
||||
}
|
||||
// If not found and O_CREATE then create the file
|
||||
dir, leaf, err := vfs.StatParent(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node, err = dir.Create(leaf, flags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return node.Open(flags)
|
||||
}
|
||||
|
||||
// Rename oldName to newName
|
||||
func (vfs *VFS) Rename(oldName, newName string) error {
|
||||
// find the parent directories
|
||||
oldDir, oldLeaf, err := vfs.StatParent(oldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newDir, newLeaf, err := vfs.StatParent(newName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = oldDir.Rename(oldLeaf, newLeaf, newDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Statfs returns into about the filing system if known
|
||||
//
|
||||
// The values will be -1 if they aren't known
|
||||
//
|
||||
// This information is cached for the DirCacheTime interval
|
||||
func (vfs *VFS) Statfs() (total, used, free int64) {
|
||||
// defer log.Trace("/", "")("total=%d, used=%d, free=%d", &total, &used, &free)
|
||||
vfs.usageMu.Lock()
|
||||
defer vfs.usageMu.Unlock()
|
||||
total, used, free = -1, -1, -1
|
||||
doAbout := vfs.f.Features().About
|
||||
if doAbout == nil {
|
||||
return
|
||||
}
|
||||
if vfs.usageTime.IsZero() || time.Since(vfs.usageTime) >= vfs.Opt.DirCacheTime {
|
||||
var err error
|
||||
vfs.usage, err = doAbout()
|
||||
vfs.usageTime = time.Now()
|
||||
if err != nil {
|
||||
fs.Errorf(vfs.f, "Statfs failed: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if u := vfs.usage; u != nil {
|
||||
if u.Total != nil {
|
||||
total = *u.Total
|
||||
}
|
||||
if u.Free != nil {
|
||||
free = *u.Free
|
||||
}
|
||||
if u.Used != nil {
|
||||
used = *u.Used
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// notifyFunc removes the last path segement for directories and calls ForgetPath with the result.
|
||||
//
|
||||
// This ensures that new or renamed directories appear in their parent.
|
||||
func (vfs *VFS) notifyFunc(relativePath string, entryType fs.EntryType) {
|
||||
if entryType == fs.EntryDirectory {
|
||||
relativePath = path.Dir(relativePath)
|
||||
}
|
||||
vfs.root.ForgetPath(relativePath, entryType)
|
||||
}
|
||||
293
.rclone_repo/vfs/vfs_test.go
Executable file
293
.rclone_repo/vfs/vfs_test.go
Executable file
@@ -0,0 +1,293 @@
|
||||
// Test suite for vfs
|
||||
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/all" // import all the backends
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Some times used in the tests
|
||||
var (
|
||||
t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
t2 = fstest.Time("2011-12-25T12:59:59.123456789Z")
|
||||
t3 = fstest.Time("2011-12-30T12:59:59.000000000Z")
|
||||
)
|
||||
|
||||
// TestMain drives the tests
|
||||
func TestMain(m *testing.M) {
|
||||
fstest.TestMain(m)
|
||||
}
|
||||
|
||||
// Check baseHandle performs as advertised
|
||||
func TestVFSbaseHandle(t *testing.T) {
|
||||
fh := baseHandle{}
|
||||
|
||||
err := fh.Chdir()
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
err = fh.Chmod(0)
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
err = fh.Chown(0, 0)
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
err = fh.Close()
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
fd := fh.Fd()
|
||||
assert.Equal(t, uintptr(0), fd)
|
||||
|
||||
name := fh.Name()
|
||||
assert.Equal(t, "", name)
|
||||
|
||||
_, err = fh.Read(nil)
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
_, err = fh.ReadAt(nil, 0)
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
_, err = fh.Readdir(0)
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
_, err = fh.Readdirnames(0)
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
_, err = fh.Seek(0, io.SeekStart)
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
_, err = fh.Stat()
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
err = fh.Sync()
|
||||
assert.Equal(t, nil, err)
|
||||
|
||||
err = fh.Truncate(0)
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
_, err = fh.Write(nil)
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
_, err = fh.WriteAt(nil, 0)
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
_, err = fh.WriteString("")
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
err = fh.Flush()
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
err = fh.Release()
|
||||
assert.Equal(t, ENOSYS, err)
|
||||
|
||||
node := fh.Node()
|
||||
assert.Nil(t, node)
|
||||
}
|
||||
|
||||
// TestNew sees if the New command works properly
|
||||
func TestVFSNew(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
// Check making a VFS with nil options
|
||||
vfs := New(r.Fremote, nil)
|
||||
assert.Equal(t, vfs.Opt, DefaultOpt)
|
||||
assert.Equal(t, vfs.f, r.Fremote)
|
||||
|
||||
// Check the initialisation
|
||||
var opt = DefaultOpt
|
||||
opt.DirPerms = 0777
|
||||
opt.FilePerms = 0666
|
||||
opt.Umask = 0002
|
||||
vfs = New(r.Fremote, &opt)
|
||||
assert.Equal(t, os.FileMode(0775)|os.ModeDir, vfs.Opt.DirPerms)
|
||||
assert.Equal(t, os.FileMode(0664), vfs.Opt.FilePerms)
|
||||
}
|
||||
|
||||
// TestRoot checks root directory is present and correct
|
||||
func TestVFSRoot(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs := New(r.Fremote, nil)
|
||||
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, vfs.root, root)
|
||||
assert.True(t, root.IsDir())
|
||||
assert.Equal(t, vfs.Opt.DirPerms.Perm(), root.Mode().Perm())
|
||||
}
|
||||
|
||||
func TestVFSStat(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs := New(r.Fremote, nil)
|
||||
|
||||
file1 := r.WriteObject("file1", "file1 contents", t1)
|
||||
file2 := r.WriteObject("dir/file2", "file2 contents", t2)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
|
||||
node, err := vfs.Stat("file1")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, node.IsFile())
|
||||
assert.Equal(t, "file1", node.Name())
|
||||
|
||||
node, err = vfs.Stat("dir")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, node.IsDir())
|
||||
assert.Equal(t, "dir", node.Name())
|
||||
|
||||
node, err = vfs.Stat("dir/file2")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, node.IsFile())
|
||||
assert.Equal(t, "file2", node.Name())
|
||||
|
||||
node, err = vfs.Stat("not found")
|
||||
assert.Equal(t, os.ErrNotExist, err)
|
||||
|
||||
node, err = vfs.Stat("dir/not found")
|
||||
assert.Equal(t, os.ErrNotExist, err)
|
||||
|
||||
node, err = vfs.Stat("not found/not found")
|
||||
assert.Equal(t, os.ErrNotExist, err)
|
||||
|
||||
node, err = vfs.Stat("file1/under a file")
|
||||
assert.Equal(t, os.ErrNotExist, err)
|
||||
}
|
||||
|
||||
func TestVFSStatParent(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs := New(r.Fremote, nil)
|
||||
|
||||
file1 := r.WriteObject("file1", "file1 contents", t1)
|
||||
file2 := r.WriteObject("dir/file2", "file2 contents", t2)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
|
||||
node, leaf, err := vfs.StatParent("file1")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, node.IsDir())
|
||||
assert.Equal(t, "/", node.Name())
|
||||
assert.Equal(t, "file1", leaf)
|
||||
|
||||
node, leaf, err = vfs.StatParent("dir/file2")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, node.IsDir())
|
||||
assert.Equal(t, "dir", node.Name())
|
||||
assert.Equal(t, "file2", leaf)
|
||||
|
||||
node, leaf, err = vfs.StatParent("not found")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, node.IsDir())
|
||||
assert.Equal(t, "/", node.Name())
|
||||
assert.Equal(t, "not found", leaf)
|
||||
|
||||
_, _, err = vfs.StatParent("not found dir/not found")
|
||||
assert.Equal(t, os.ErrNotExist, err)
|
||||
|
||||
_, _, err = vfs.StatParent("file1/under a file")
|
||||
assert.Equal(t, os.ErrExist, err)
|
||||
}
|
||||
|
||||
func TestVFSOpenFile(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs := New(r.Fremote, nil)
|
||||
|
||||
file1 := r.WriteObject("file1", "file1 contents", t1)
|
||||
file2 := r.WriteObject("dir/file2", "file2 contents", t2)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
|
||||
fd, err := vfs.OpenFile("file1", os.O_RDONLY, 0777)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, fd)
|
||||
require.NoError(t, fd.Close())
|
||||
|
||||
fd, err = vfs.OpenFile("dir", os.O_RDONLY, 0777)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, fd)
|
||||
require.NoError(t, fd.Close())
|
||||
|
||||
fd, err = vfs.OpenFile("dir/new_file.txt", os.O_RDONLY, 0777)
|
||||
assert.Equal(t, os.ErrNotExist, err)
|
||||
assert.Nil(t, fd)
|
||||
|
||||
fd, err = vfs.OpenFile("dir/new_file.txt", os.O_WRONLY|os.O_CREATE, 0777)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, fd)
|
||||
require.NoError(t, fd.Close())
|
||||
|
||||
fd, err = vfs.OpenFile("not found/new_file.txt", os.O_WRONLY|os.O_CREATE, 0777)
|
||||
assert.Equal(t, os.ErrNotExist, err)
|
||||
assert.Nil(t, fd)
|
||||
}
|
||||
|
||||
func TestVFSRename(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs := New(r.Fremote, nil)
|
||||
|
||||
file1 := r.WriteObject("dir/file2", "file2 contents", t2)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
err := vfs.Rename("dir/file2", "dir/file1")
|
||||
require.NoError(t, err)
|
||||
file1.Path = "dir/file1"
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
err = vfs.Rename("dir/file1", "file0")
|
||||
require.NoError(t, err)
|
||||
file1.Path = "file0"
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
err = vfs.Rename("not found/file0", "file0")
|
||||
assert.Equal(t, os.ErrNotExist, err)
|
||||
|
||||
err = vfs.Rename("file0", "not found/file0")
|
||||
assert.Equal(t, os.ErrNotExist, err)
|
||||
}
|
||||
|
||||
func TestVFSStatfs(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs := New(r.Fremote, nil)
|
||||
|
||||
// pre-conditions
|
||||
assert.Nil(t, vfs.usage)
|
||||
assert.True(t, vfs.usageTime.IsZero())
|
||||
|
||||
// read
|
||||
total, used, free := vfs.Statfs()
|
||||
require.NotNil(t, vfs.usage)
|
||||
assert.False(t, vfs.usageTime.IsZero())
|
||||
if vfs.usage.Total != nil {
|
||||
assert.Equal(t, *vfs.usage.Total, total)
|
||||
} else {
|
||||
assert.Equal(t, -1, total)
|
||||
}
|
||||
if vfs.usage.Free != nil {
|
||||
assert.Equal(t, *vfs.usage.Free, free)
|
||||
} else {
|
||||
assert.Equal(t, -1, free)
|
||||
}
|
||||
if vfs.usage.Used != nil {
|
||||
assert.Equal(t, *vfs.usage.Used, used)
|
||||
} else {
|
||||
assert.Equal(t, -1, used)
|
||||
}
|
||||
|
||||
// read cached
|
||||
oldUsage := vfs.usage
|
||||
oldTime := vfs.usageTime
|
||||
total2, used2, free2 := vfs.Statfs()
|
||||
assert.Equal(t, oldUsage, vfs.usage)
|
||||
assert.Equal(t, total, total2)
|
||||
assert.Equal(t, used, used2)
|
||||
assert.Equal(t, free, free2)
|
||||
assert.Equal(t, oldTime, vfs.usageTime)
|
||||
}
|
||||
29
.rclone_repo/vfs/vfsflags/vfsflags.go
Executable file
29
.rclone_repo/vfs/vfsflags/vfsflags.go
Executable file
@@ -0,0 +1,29 @@
|
||||
// Package vfsflags implements command line flags to set up a vfs
|
||||
package vfsflags
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Options set by command line flags
|
||||
var (
|
||||
Opt = vfs.DefaultOpt
|
||||
)
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &Opt.NoModTime, "no-modtime", "", Opt.NoModTime, "Don't read/write the modification time (can speed things up).")
|
||||
flags.BoolVarP(flagSet, &Opt.NoChecksum, "no-checksum", "", Opt.NoChecksum, "Don't compare checksums on up/download.")
|
||||
flags.BoolVarP(flagSet, &Opt.NoSeek, "no-seek", "", Opt.NoSeek, "Don't allow seeking in files.")
|
||||
flags.DurationVarP(flagSet, &Opt.DirCacheTime, "dir-cache-time", "", Opt.DirCacheTime, "Time to cache directory entries for.")
|
||||
flags.DurationVarP(flagSet, &Opt.PollInterval, "poll-interval", "", Opt.PollInterval, "Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable.")
|
||||
flags.BoolVarP(flagSet, &Opt.ReadOnly, "read-only", "", Opt.ReadOnly, "Mount read-only.")
|
||||
flags.FVarP(flagSet, &Opt.CacheMode, "vfs-cache-mode", "", "Cache mode off|minimal|writes|full")
|
||||
flags.DurationVarP(flagSet, &Opt.CachePollInterval, "vfs-cache-poll-interval", "", Opt.CachePollInterval, "Interval to poll the cache for stale objects.")
|
||||
flags.DurationVarP(flagSet, &Opt.CacheMaxAge, "vfs-cache-max-age", "", Opt.CacheMaxAge, "Max age of objects in the cache.")
|
||||
flags.FVarP(flagSet, &Opt.ChunkSize, "vfs-read-chunk-size", "", "Read the source objects in chunks.")
|
||||
flags.FVarP(flagSet, &Opt.ChunkSizeLimit, "vfs-read-chunk-size-limit", "", "If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited.")
|
||||
platformFlags(flagSet)
|
||||
}
|
||||
11
.rclone_repo/vfs/vfsflags/vfsflags_non_unix.go
Executable file
11
.rclone_repo/vfs/vfsflags/vfsflags_non_unix.go
Executable file
@@ -0,0 +1,11 @@
|
||||
// +build !linux,!darwin,!freebsd
|
||||
|
||||
package vfsflags
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// add any extra platform specific flags
|
||||
func platformFlags(flags *pflag.FlagSet) {
|
||||
}
|
||||
20
.rclone_repo/vfs/vfsflags/vfsflags_unix.go
Executable file
20
.rclone_repo/vfs/vfsflags/vfsflags_unix.go
Executable file
@@ -0,0 +1,20 @@
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package vfsflags
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// add any extra platform specific flags
|
||||
func platformFlags(flagSet *pflag.FlagSet) {
|
||||
flags.IntVarP(flagSet, &Opt.Umask, "umask", "", Opt.Umask, "Override the permission bits set by the filesystem.")
|
||||
Opt.Umask = unix.Umask(0) // read the umask
|
||||
unix.Umask(Opt.Umask) // set it back to what it was
|
||||
Opt.UID = uint32(unix.Geteuid())
|
||||
Opt.GID = uint32(unix.Getegid())
|
||||
flags.Uint32VarP(flagSet, &Opt.UID, "uid", "", Opt.UID, "Override the uid field set by the filesystem.")
|
||||
flags.Uint32VarP(flagSet, &Opt.GID, "gid", "", Opt.GID, "Override the gid field set by the filesystem.")
|
||||
}
|
||||
315
.rclone_repo/vfs/write.go
Executable file
315
.rclone_repo/vfs/write.go
Executable file
@@ -0,0 +1,315 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
)
|
||||
|
||||
// WriteFileHandle is an open for write handle on a File
|
||||
type WriteFileHandle struct {
|
||||
baseHandle
|
||||
mu sync.Mutex
|
||||
closed bool // set if handle has been closed
|
||||
remote string
|
||||
pipeWriter *io.PipeWriter
|
||||
o fs.Object
|
||||
result chan error
|
||||
file *File
|
||||
writeCalled bool // set the first time Write() is called
|
||||
offset int64
|
||||
opened bool
|
||||
flags int
|
||||
truncated bool
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var (
|
||||
_ io.Writer = (*WriteFileHandle)(nil)
|
||||
_ io.WriterAt = (*WriteFileHandle)(nil)
|
||||
_ io.Closer = (*WriteFileHandle)(nil)
|
||||
)
|
||||
|
||||
func newWriteFileHandle(d *Dir, f *File, remote string, flags int) (*WriteFileHandle, error) {
|
||||
fh := &WriteFileHandle{
|
||||
remote: remote,
|
||||
flags: flags,
|
||||
result: make(chan error, 1),
|
||||
file: f,
|
||||
}
|
||||
fh.file.addWriter(fh)
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// returns whether it is OK to truncate the file
|
||||
func (fh *WriteFileHandle) safeToTruncate() bool {
|
||||
return fh.truncated || fh.flags&os.O_TRUNC != 0 || !fh.file.exists()
|
||||
}
|
||||
|
||||
// openPending opens the file if there is a pending open
|
||||
//
|
||||
// call with the lock held
|
||||
func (fh *WriteFileHandle) openPending() (err error) {
|
||||
if fh.opened {
|
||||
return nil
|
||||
}
|
||||
if !fh.safeToTruncate() {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle: Can't open for write without O_TRUNC on existing file without --vfs-cache-mode >= writes")
|
||||
return EPERM
|
||||
}
|
||||
var pipeReader *io.PipeReader
|
||||
pipeReader, fh.pipeWriter = io.Pipe()
|
||||
go func() {
|
||||
// NB Rcat deals with Stats.Transferring etc
|
||||
o, err := operations.Rcat(fh.file.d.f, fh.remote, pipeReader, time.Now())
|
||||
if err != nil {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.New Rcat failed: %v", err)
|
||||
}
|
||||
// Close the pipeReader so the pipeWriter fails with ErrClosedPipe
|
||||
_ = pipeReader.Close()
|
||||
fh.o = o
|
||||
fh.result <- err
|
||||
}()
|
||||
fh.file.setSize(0)
|
||||
fh.truncated = true
|
||||
fh.file.d.addObject(fh.file) // make sure the directory has this object in it now
|
||||
fh.opened = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// String converts it to printable
|
||||
func (fh *WriteFileHandle) String() string {
|
||||
if fh == nil {
|
||||
return "<nil *WriteFileHandle>"
|
||||
}
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.file == nil {
|
||||
return "<nil *WriteFileHandle.file>"
|
||||
}
|
||||
return fh.file.String() + " (w)"
|
||||
}
|
||||
|
||||
// Node returns the Node assocuated with this - satisfies Noder interface
|
||||
func (fh *WriteFileHandle) Node() Node {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
return fh.file
|
||||
}
|
||||
|
||||
// WriteAt writes len(p) bytes from p to the underlying data stream at offset
|
||||
// off. It returns the number of bytes written from p (0 <= n <= len(p)) and
|
||||
// any error encountered that caused the write to stop early. WriteAt must
|
||||
// return a non-nil error if it returns n < len(p).
|
||||
//
|
||||
// If WriteAt is writing to a destination with a seek offset, WriteAt should
|
||||
// not affect nor be affected by the underlying seek offset.
|
||||
//
|
||||
// Clients of WriteAt can execute parallel WriteAt calls on the same
|
||||
// destination if the ranges do not overlap.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (fh *WriteFileHandle) WriteAt(p []byte, off int64) (n int, err error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
return fh.writeAt(p, off)
|
||||
}
|
||||
|
||||
// Implementatino of WriteAt - call with lock held
|
||||
func (fh *WriteFileHandle) writeAt(p []byte, off int64) (n int, err error) {
|
||||
// fs.Debugf(fh.remote, "WriteFileHandle.Write len=%d", len(p))
|
||||
if fh.closed {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.Write: error: %v", EBADF)
|
||||
return 0, ECLOSED
|
||||
}
|
||||
if fh.offset != off {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.Write: can't seek in file without --vfs-cache-mode >= writes")
|
||||
return 0, ESPIPE
|
||||
}
|
||||
if err = fh.openPending(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
fh.writeCalled = true
|
||||
n, err = fh.pipeWriter.Write(p)
|
||||
fh.offset += int64(n)
|
||||
fh.file.setSize(fh.offset)
|
||||
if err != nil {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.Write error: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
// fs.Debugf(fh.remote, "WriteFileHandle.Write OK (%d bytes written)", n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Write writes len(p) bytes from p to the underlying data stream. It returns
|
||||
// the number of bytes written from p (0 <= n <= len(p)) and any error
|
||||
// encountered that caused the write to stop early. Write must return a non-nil
|
||||
// error if it returns n < len(p). Write must not modify the slice data, even
|
||||
// temporarily.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (fh *WriteFileHandle) Write(p []byte) (n int, err error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
// Since we can't seek, just call WriteAt with the current offset
|
||||
return fh.writeAt(p, fh.offset)
|
||||
}
|
||||
|
||||
// WriteString a string to the file
|
||||
func (fh *WriteFileHandle) WriteString(s string) (n int, err error) {
|
||||
return fh.Write([]byte(s))
|
||||
}
|
||||
|
||||
// Offset returns the offset of the file pointer
|
||||
func (fh *WriteFileHandle) Offset() (offset int64) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
return fh.offset
|
||||
}
|
||||
|
||||
// close the file handle returning EBADF if it has been
|
||||
// closed already.
|
||||
//
|
||||
// Must be called with fh.mu held
|
||||
func (fh *WriteFileHandle) close() (err error) {
|
||||
if fh.closed {
|
||||
return ECLOSED
|
||||
}
|
||||
fh.closed = true
|
||||
// leave writer open until file is transferred
|
||||
defer func() {
|
||||
fh.file.delWriter(fh, false)
|
||||
fh.file.finishWriterClose()
|
||||
}()
|
||||
// If file not opened and not safe to truncate then then leave file intact
|
||||
if !fh.opened && !fh.safeToTruncate() {
|
||||
return nil
|
||||
}
|
||||
if err = fh.openPending(); err != nil {
|
||||
return err
|
||||
}
|
||||
writeCloseErr := fh.pipeWriter.Close()
|
||||
err = <-fh.result
|
||||
if err == nil {
|
||||
fh.file.setObject(fh.o)
|
||||
err = writeCloseErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Close closes the file
|
||||
func (fh *WriteFileHandle) Close() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
return fh.close()
|
||||
}
|
||||
|
||||
// Flush is called on each close() of a file descriptor. So if a
|
||||
// filesystem wants to return write errors in close() and the file has
|
||||
// cached dirty data, this is a good place to write back data and
|
||||
// return any errors. Since many applications ignore close() errors
|
||||
// this is not always useful.
|
||||
//
|
||||
// NOTE: The flush() method may be called more than once for each
|
||||
// open(). This happens if more than one file descriptor refers to an
|
||||
// opened file due to dup(), dup2() or fork() calls. It is not
|
||||
// possible to determine if a flush is final, so each flush should be
|
||||
// treated equally. Multiple write-flush sequences are relatively
|
||||
// rare, so this shouldn't be a problem.
|
||||
//
|
||||
// Filesystems shouldn't assume that flush will always be called after
|
||||
// some writes, or that if will be called at all.
|
||||
func (fh *WriteFileHandle) Flush() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.closed {
|
||||
fs.Debugf(fh.remote, "WriteFileHandle.Flush nothing to do")
|
||||
return nil
|
||||
}
|
||||
// fs.Debugf(fh.remote, "WriteFileHandle.Flush")
|
||||
// If Write hasn't been called then ignore the Flush - Release
|
||||
// will pick it up
|
||||
if !fh.writeCalled {
|
||||
fs.Debugf(fh.remote, "WriteFileHandle.Flush unwritten handle, writing 0 bytes to avoid race conditions")
|
||||
_, err := fh.writeAt([]byte{}, fh.offset)
|
||||
return err
|
||||
}
|
||||
err := fh.close()
|
||||
if err != nil {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.Flush error: %v", err)
|
||||
} else {
|
||||
// fs.Debugf(fh.remote, "WriteFileHandle.Flush OK")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Release is called when we are finished with the file handle
|
||||
//
|
||||
// It isn't called directly from userspace so the error is ignored by
|
||||
// the kernel
|
||||
func (fh *WriteFileHandle) Release() error {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.closed {
|
||||
fs.Debugf(fh.remote, "WriteFileHandle.Release nothing to do")
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(fh.remote, "WriteFileHandle.Release closing")
|
||||
err := fh.close()
|
||||
if err != nil {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.Release error: %v", err)
|
||||
} else {
|
||||
// fs.Debugf(fh.remote, "WriteFileHandle.Release OK")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Stat returns info about the file
|
||||
func (fh *WriteFileHandle) Stat() (os.FileInfo, error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
return fh.file, nil
|
||||
}
|
||||
|
||||
// Truncate file to given size
|
||||
func (fh *WriteFileHandle) Truncate(size int64) (err error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
if fh.closed {
|
||||
return ECLOSED
|
||||
}
|
||||
if size != fh.offset {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle: Truncate: Can't change size without --vfs-cache-mode >= writes")
|
||||
return EPERM
|
||||
}
|
||||
// File is correct size
|
||||
if size == 0 {
|
||||
fh.truncated = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p.
|
||||
func (fh *WriteFileHandle) Read(p []byte) (n int, err error) {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle: Read: Can't read and write to file without --vfs-cache-mode >= minimal")
|
||||
return 0, EPERM
|
||||
}
|
||||
|
||||
// ReadAt reads len(p) bytes into p starting at offset off in the
|
||||
// underlying input source. It returns the number of bytes read (0 <=
|
||||
// n <= len(p)) and any error encountered.
|
||||
func (fh *WriteFileHandle) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle: ReadAt: Can't read and write to file without --vfs-cache-mode >= minimal")
|
||||
return 0, EPERM
|
||||
}
|
||||
|
||||
// Sync commits the current contents of the file to stable storage. Typically,
|
||||
// this means flushing the file system's in-memory copy of recently written
|
||||
// data to disk.
|
||||
func (fh *WriteFileHandle) Sync() error {
|
||||
return nil
|
||||
}
|
||||
245
.rclone_repo/vfs/write_test.go
Executable file
245
.rclone_repo/vfs/write_test.go
Executable file
@@ -0,0 +1,245 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Open a file for write
|
||||
func writeHandleCreate(t *testing.T, r *fstest.Run) (*VFS, *WriteFileHandle) {
|
||||
vfs := New(r.Fremote, nil)
|
||||
|
||||
h, err := vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE, 0777)
|
||||
require.NoError(t, err)
|
||||
fh, ok := h.(*WriteFileHandle)
|
||||
require.True(t, ok)
|
||||
|
||||
return vfs, fh
|
||||
}
|
||||
|
||||
func TestWriteFileHandleMethods(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, fh := writeHandleCreate(t, r)
|
||||
|
||||
// String
|
||||
assert.Equal(t, "file1 (w)", fh.String())
|
||||
assert.Equal(t, "<nil *WriteFileHandle>", (*WriteFileHandle)(nil).String())
|
||||
assert.Equal(t, "<nil *WriteFileHandle.file>", new(WriteFileHandle).String())
|
||||
|
||||
// Node
|
||||
node := fh.Node()
|
||||
assert.Equal(t, "file1", node.Name())
|
||||
|
||||
// Offset #1
|
||||
assert.Equal(t, int64(0), fh.Offset())
|
||||
assert.Equal(t, int64(0), node.Size())
|
||||
|
||||
// Write (smoke test only since heavy lifting done in WriteAt)
|
||||
n, err := fh.Write([]byte("hello"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
|
||||
// Offset #2
|
||||
assert.Equal(t, int64(5), fh.Offset())
|
||||
assert.Equal(t, int64(5), node.Size())
|
||||
|
||||
// Stat
|
||||
var fi os.FileInfo
|
||||
fi, err = fh.Stat()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(5), fi.Size())
|
||||
assert.Equal(t, "file1", fi.Name())
|
||||
|
||||
// Read
|
||||
var buf = make([]byte, 16)
|
||||
_, err = fh.Read(buf)
|
||||
assert.Equal(t, EPERM, err)
|
||||
|
||||
// ReadAt
|
||||
_, err = fh.ReadAt(buf, 0)
|
||||
assert.Equal(t, EPERM, err)
|
||||
|
||||
// Sync
|
||||
err = fh.Sync()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Truncate - can only truncate where the file pointer is
|
||||
err = fh.Truncate(5)
|
||||
assert.NoError(t, err)
|
||||
err = fh.Truncate(6)
|
||||
assert.Equal(t, EPERM, err)
|
||||
|
||||
// Close
|
||||
assert.NoError(t, fh.Close())
|
||||
|
||||
// Check double close
|
||||
err = fh.Close()
|
||||
assert.Equal(t, ECLOSED, err)
|
||||
|
||||
// check vfs
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
checkListing(t, root, []string{"file1,5,false"})
|
||||
|
||||
// check the underlying r.Fremote but not the modtime
|
||||
file1 := fstest.NewItem("file1", "hello", t1)
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{}, fs.ModTimeNotSupported)
|
||||
|
||||
// Check trying to open the file now it exists then closing it
|
||||
// immediately is OK
|
||||
h, err := vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE, 0777)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, h.Close())
|
||||
checkListing(t, root, []string{"file1,5,false"})
|
||||
|
||||
// Check trying to open the file and writing it now it exists
|
||||
// returns an error
|
||||
h, err = vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE, 0777)
|
||||
require.NoError(t, err)
|
||||
_, err = h.Write([]byte("hello1"))
|
||||
require.Equal(t, EPERM, err)
|
||||
assert.NoError(t, h.Close())
|
||||
checkListing(t, root, []string{"file1,5,false"})
|
||||
|
||||
// Check opening the file with O_TRUNC does actually truncate
|
||||
// it even if we don't write to it
|
||||
h, err = vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, h.Close())
|
||||
checkListing(t, root, []string{"file1,0,false"})
|
||||
|
||||
// Check opening the file with O_TRUNC and writing does work
|
||||
h, err = vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777)
|
||||
require.NoError(t, err)
|
||||
_, err = h.WriteString("hello12")
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, h.Close())
|
||||
checkListing(t, root, []string{"file1,7,false"})
|
||||
}
|
||||
|
||||
func TestWriteFileHandleWriteAt(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, fh := writeHandleCreate(t, r)
|
||||
|
||||
// Preconditions
|
||||
assert.Equal(t, int64(0), fh.offset)
|
||||
assert.False(t, fh.writeCalled)
|
||||
|
||||
// Write the data
|
||||
n, err := fh.WriteAt([]byte("hello"), 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
|
||||
// After write
|
||||
assert.Equal(t, int64(5), fh.offset)
|
||||
assert.True(t, fh.writeCalled)
|
||||
|
||||
// Check can't seek
|
||||
n, err = fh.WriteAt([]byte("hello"), 100)
|
||||
assert.Equal(t, ESPIPE, err)
|
||||
assert.Equal(t, 0, n)
|
||||
|
||||
// Write more data
|
||||
n, err = fh.WriteAt([]byte(" world"), 5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 6, n)
|
||||
|
||||
// Close
|
||||
assert.NoError(t, fh.Close())
|
||||
|
||||
// Check can't write on closed handle
|
||||
n, err = fh.WriteAt([]byte("hello"), 0)
|
||||
assert.Equal(t, ECLOSED, err)
|
||||
assert.Equal(t, 0, n)
|
||||
|
||||
// check vfs
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
checkListing(t, root, []string{"file1,11,false"})
|
||||
|
||||
// check the underlying r.Fremote but not the modtime
|
||||
file1 := fstest.NewItem("file1", "hello world", t1)
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{}, fs.ModTimeNotSupported)
|
||||
}
|
||||
|
||||
func TestWriteFileHandleFlush(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, fh := writeHandleCreate(t, r)
|
||||
|
||||
// Check Flush already creates file for unwritten handles, without closing it
|
||||
err := fh.Flush()
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, fh.closed)
|
||||
root, err := vfs.Root()
|
||||
assert.NoError(t, err)
|
||||
checkListing(t, root, []string{"file1,0,false"})
|
||||
|
||||
// Write some data
|
||||
n, err := fh.Write([]byte("hello"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
|
||||
// Check Flush closes file if write called
|
||||
err = fh.Flush()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, fh.closed)
|
||||
|
||||
// Check flush does nothing if called again
|
||||
err = fh.Flush()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, fh.closed)
|
||||
|
||||
// Check file was written properly
|
||||
root, err = vfs.Root()
|
||||
assert.NoError(t, err)
|
||||
checkListing(t, root, []string{"file1,5,false"})
|
||||
}
|
||||
|
||||
func TestWriteFileHandleRelease(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
_, fh := writeHandleCreate(t, r)
|
||||
|
||||
// Check Release closes file
|
||||
err := fh.Release()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, fh.closed)
|
||||
|
||||
// Check Release does nothing if called again
|
||||
err = fh.Release()
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, fh.closed)
|
||||
}
|
||||
|
||||
// tests mod time on open files
|
||||
func TestWriteFileModTimeWithOpenWriters(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, fh := writeHandleCreate(t, r)
|
||||
|
||||
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
|
||||
|
||||
_, err := fh.Write([]byte{104, 105})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fh.Node().SetModTime(mtime)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fh.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := vfs.Stat("file1")
|
||||
require.NoError(t, err)
|
||||
|
||||
// avoid errors because of timezone differences
|
||||
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
|
||||
}
|
||||
Reference in New Issue
Block a user