diff --git a/backend/alias/alias_internal_test.go b/backend/alias/alias_internal_test.go index a79769ab3..57ac2059d 100644 --- a/backend/alias/alias_internal_test.go +++ b/backend/alias/alias_internal_test.go @@ -80,7 +80,7 @@ func TestNewFS(t *testing.T) { wantEntry := test.entries[i] require.Equal(t, wantEntry.remote, gotEntry.Remote(), what) - require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what) + require.Equal(t, wantEntry.size, gotEntry.Size(), what) _, isDir := gotEntry.(fs.Directory) require.Equal(t, wantEntry.isDir, isDir, what) } diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index 2a823b4b3..13a98e70e 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -77,7 +77,7 @@ func init() { }, { Name: "upload_cutoff", Help: "Cutoff for switching to chunked upload (<= 256MB).", - Default: fs.SizeSuffix(defaultUploadCutoff), + Default: defaultUploadCutoff, Advanced: true, }, { Name: "chunk_size", @@ -85,7 +85,7 @@ func init() { Note that this is stored in memory and there may be up to "--transfers" chunks stored at once in memory.`, - Default: fs.SizeSuffix(defaultChunkSize), + Default: defaultChunkSize, Advanced: true, }, { Name: "list_chunk", @@ -1037,7 +1037,7 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5()) o.mimeType = info.ContentType() o.size = size - o.modTime = time.Time(info.LastModified()) + o.modTime = info.LastModified() o.accessTier = azblob.AccessTierType(info.AccessTier()) o.setMetadata(metadata) diff --git a/backend/b2/b2.go b/backend/b2/b2.go index f45ac858f..5f21bb692 100644 --- a/backend/b2/b2.go +++ b/backend/b2/b2.go @@ -108,7 +108,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration Files above this size will be uploaded in chunks of "--b2-chunk-size". This value should be set no larger than 4.657GiB (== 5GB).`, - Default: fs.SizeSuffix(defaultUploadCutoff), + Default: defaultUploadCutoff, Advanced: true, }, { Name: "chunk_size", @@ -118,7 +118,7 @@ When uploading large files, chunk the file into this size. Note that these chunks are buffered in memory and there might a maximum of "--transfers" chunks in progress at once. 5,000,000 Bytes is the minimim size.`, - Default: fs.SizeSuffix(defaultChunkSize), + Default: defaultChunkSize, Advanced: true, }, { Name: "disable_checksum", diff --git a/backend/box/upload.go b/backend/box/upload.go index a3133cb76..3afad813e 100644 --- a/backend/box/upload.go +++ b/backend/box/upload.go @@ -211,8 +211,8 @@ outer: } reqSize := remaining - if reqSize >= int64(chunkSize) { - reqSize = int64(chunkSize) + if reqSize >= chunkSize { + reqSize = chunkSize } // Make a block of memory diff --git a/backend/cache/cache_internal_test.go b/backend/cache/cache_internal_test.go index 3cd720606..aacd5684b 100644 --- a/backend/cache/cache_internal_test.go +++ b/backend/cache/cache_internal_test.go @@ -387,10 +387,10 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) { // write the object o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData) - require.Equal(t, o.Size(), int64(testSize)) + require.Equal(t, o.Size(), testSize) time.Sleep(time.Second * 3) - checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false) + checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false) require.NoError(t, err) require.Equal(t, int64(len(checkSample)), o.Size()) diff --git a/backend/cache/storage_persistent.go b/backend/cache/storage_persistent.go index 967908737..b6818c43a 100644 --- a/backend/cache/storage_persistent.go +++ b/backend/cache/storage_persistent.go @@ -398,7 +398,7 @@ func (b *Persistent) AddObject(cachedObject *Object) error { if err != nil { return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err) } - err = bucket.Put([]byte(cachedObject.Name), []byte(encoded)) + err = bucket.Put([]byte(cachedObject.Name), encoded) if err != nil { return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err) } @@ -809,7 +809,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error { if err != nil { return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err) } - err = bucket.Put([]byte(destPath), []byte(encoded)) + err = bucket.Put([]byte(destPath), encoded) if err != nil { return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err) } @@ -1049,7 +1049,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error { if err != nil { return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err) } - err = bucket.Put([]byte(destPath), []byte(encoded)) + err = bucket.Put([]byte(destPath), encoded) if err != nil { return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err) } diff --git a/backend/crypt/cipher.go b/backend/crypt/cipher.go index ab0656ba8..655216851 100644 --- a/backend/crypt/cipher.go +++ b/backend/crypt/cipher.go @@ -463,7 +463,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) { if int(newRune) < base { newRune += 256 } - _, _ = result.WriteRune(rune(newRune)) + _, _ = result.WriteRune(newRune) default: _, _ = result.WriteRune(runeValue) diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go index e4ad4ba79..c08d2638e 100644 --- a/backend/dropbox/dropbox.go +++ b/backend/dropbox/dropbox.go @@ -130,8 +130,8 @@ Any files larger than this will be uploaded in chunks of this size. Note that chunks are buffered in memory (one at a time) so rclone can deal with retries. Setting this larger will increase the speed slightly (at most 10%% for 128MB in tests) at the cost of using more -memory. It can be set smaller if you are tight on memory.`, fs.SizeSuffix(maxChunkSize)), - Default: fs.SizeSuffix(defaultChunkSize), +memory. It can be set smaller if you are tight on memory.`, maxChunkSize), + Default: defaultChunkSize, Advanced: true, }, { Name: "impersonate", diff --git a/backend/jottacloud/jottacloud.go b/backend/jottacloud/jottacloud.go index 51d0dc529..0dc33fd6b 100644 --- a/backend/jottacloud/jottacloud.go +++ b/backend/jottacloud/jottacloud.go @@ -1006,7 +1006,7 @@ func (o *Object) MimeType() string { // setMetaData sets the metadata from info func (o *Object) setMetaData(info *api.JottaFile) (err error) { o.hasMetaData = true - o.size = int64(info.Size) + o.size = info.Size o.md5 = info.MD5 o.mimeType = info.MimeType o.modTime = time.Time(info.ModifiedAt) @@ -1212,7 +1212,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio // finally update the meta data o.hasMetaData = true - o.size = int64(result.Bytes) + o.size = result.Bytes o.md5 = result.Md5 o.modTime = time.Unix(result.Modified/1000, 0) } else { diff --git a/backend/local/about_unix.go b/backend/local/about_unix.go index 745f2e559..5db54f6de 100644 --- a/backend/local/about_unix.go +++ b/backend/local/about_unix.go @@ -16,7 +16,7 @@ func (f *Fs) About() (*fs.Usage, error) { if err != nil { return nil, errors.Wrap(err, "failed to read disk usage") } - bs := int64(s.Bsize) + bs := int64(s.Bsize) // nolint: unconvert usage := &fs.Usage{ Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use diff --git a/backend/local/read_device_unix.go b/backend/local/read_device_unix.go index 1b2b0c539..38cf60ed7 100644 --- a/backend/local/read_device_unix.go +++ b/backend/local/read_device_unix.go @@ -22,5 +22,5 @@ func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 { fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys()) return devUnset } - return uint64(statT.Dev) + return uint64(statT.Dev) // nolint: unconvert } diff --git a/backend/opendrive/opendrive.go b/backend/opendrive/opendrive.go index 5cc4897d3..994e91579 100644 --- a/backend/opendrive/opendrive.go +++ b/backend/opendrive/opendrive.go @@ -785,7 +785,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { remote := path.Join(dir, folder.Name) // cache the directory ID for later lookups f.dirCache.Put(remote, folder.FolderID) - d := fs.NewDir(remote, time.Unix(int64(folder.DateModified), 0)).SetID(folder.FolderID) + d := fs.NewDir(remote, time.Unix(folder.DateModified, 0)).SetID(folder.FolderID) d.SetItems(int64(folder.ChildFolders)) entries = append(entries, d) } diff --git a/backend/qingstor/upload.go b/backend/qingstor/upload.go index d529beb85..1359b14e0 100644 --- a/backend/qingstor/upload.go +++ b/backend/qingstor/upload.go @@ -143,7 +143,7 @@ func (u *uploader) init() { // Try to adjust partSize if it is too small and account for // integer division truncation. - if u.totalSize/u.cfg.partSize >= int64(u.cfg.partSize) { + if u.totalSize/u.cfg.partSize >= u.cfg.partSize { // Add one to the part size to account for remainders // during the size calculation. e.g odd number of bytes. u.cfg.partSize = (u.totalSize / int64(u.cfg.maxUploadParts)) + 1 diff --git a/backend/yandex/yandex.go b/backend/yandex/yandex.go index f237d6834..b48463fff 100644 --- a/backend/yandex/yandex.go +++ b/backend/yandex/yandex.go @@ -307,7 +307,7 @@ func (f *Fs) itemToDirEntry(remote string, object *api.ResourceInfoResponse) (fs if err != nil { return nil, errors.Wrap(err, "error parsing time in directory item") } - d := fs.NewDir(remote, t).SetSize(int64(object.Size)) + d := fs.NewDir(remote, t).SetSize(object.Size) return d, nil case "file": o, err := f.newObjectWithInfo(remote, object) diff --git a/cmd/cmd.go b/cmd/cmd.go index 1b6842e2e..c86c55d2b 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -456,7 +456,7 @@ func AddBackendFlags() { help = help[:nl] } help = strings.TrimSpace(help) - flag := pflag.CommandLine.VarPF(opt, name, string(opt.ShortOpt), help) + flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help) if _, isBool := opt.Default.(bool); isBool { flag.NoOptDefVal = "true" } diff --git a/cmd/mount/handle.go b/cmd/mount/handle.go index f987033bf..5d32a7d7b 100644 --- a/cmd/mount/handle.go +++ b/cmd/mount/handle.go @@ -45,7 +45,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f if err != nil { return translateError(err) } - resp.Size = int(n) + resp.Size = n return nil } diff --git a/cmd/serve/dlna/cds.go b/cmd/serve/dlna/cds.go index 79330bc54..409997a71 100644 --- a/cmd/serve/dlna/cds.go +++ b/cmd/serve/dlna/cds.go @@ -158,7 +158,7 @@ func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *htt }, nil case "Browse": var browse browse - if err := xml.Unmarshal([]byte(argsXML), &browse); err != nil { + if err := xml.Unmarshal(argsXML, &browse); err != nil { return nil, err } obj, err := cds.objectFromID(browse.ObjectID) @@ -179,7 +179,7 @@ func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *htt } return }():] - if browse.RequestedCount != 0 && int(browse.RequestedCount) < len(objs) { + if browse.RequestedCount != 0 && browse.RequestedCount < len(objs) { objs = objs[:browse.RequestedCount] } result, err := xml.Marshal(objs) diff --git a/fs/hash/hash.go b/fs/hash/hash.go index 57b206bfd..005933911 100644 --- a/fs/hash/hash.go +++ b/fs/hash/hash.go @@ -255,7 +255,7 @@ func (h Set) GetOne() Type { i++ v >>= 1 } - return Type(None) + return None } // Array returns an array of all hash types in the set diff --git a/lib/readers/repeatable.go b/lib/readers/repeatable.go index b902f3802..bb308b3e7 100644 --- a/lib/readers/repeatable.go +++ b/lib/readers/repeatable.go @@ -100,5 +100,5 @@ func NewRepeatableReaderBuffer(r io.Reader, buf []byte) *RepeatableReader { // NewRepeatableLimitReaderBuffer create new repeatable reader from // Reader r and buf wrapped in a io.LimitReader to read only size. func NewRepeatableLimitReaderBuffer(r io.Reader, buf []byte, size int64) *RepeatableReader { - return NewRepeatableReaderBuffer(io.LimitReader(r, int64(size)), buf) + return NewRepeatableReaderBuffer(io.LimitReader(r, size), buf) }