build: fix gocritic lint issue elseif
This commit is contained in:
parent
4454ed9d3b
commit
e82b5b11af
13 changed files with 92 additions and 120 deletions
|
@ -1035,12 +1035,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||||
if _, createErr := fc.Create(ctx, size, nil); createErr != nil {
|
if _, createErr := fc.Create(ctx, size, nil); createErr != nil {
|
||||||
return fmt.Errorf("update: unable to create file: %w", createErr)
|
return fmt.Errorf("update: unable to create file: %w", createErr)
|
||||||
}
|
}
|
||||||
} else {
|
} else if size != o.Size() {
|
||||||
// Resize the file if needed
|
// Resize the file if needed
|
||||||
if size != o.Size() {
|
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
||||||
if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
|
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
||||||
return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
22
backend/cache/cache.go
vendored
22
backend/cache/cache.go
vendored
|
@ -409,18 +409,16 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
if err != nil {
|
||||||
if err != nil {
|
decPass = opt.PlexPassword
|
||||||
decPass = opt.PlexPassword
|
}
|
||||||
}
|
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
||||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
|
m.Set("plex_token", token)
|
||||||
m.Set("plex_token", token)
|
})
|
||||||
})
|
if err != nil {
|
||||||
if err != nil {
|
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
6
backend/cache/handle.go
vendored
6
backend/cache/handle.go
vendored
|
@ -415,10 +415,8 @@ func (w *worker) run() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
continue
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||||
|
|
|
@ -58,12 +58,10 @@ func populateSSECustomerKeys(opt *Options) error {
|
||||||
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
|
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
|
||||||
if opt.SSECustomerKeySha256 == "" {
|
if opt.SSECustomerKeySha256 == "" {
|
||||||
opt.SSECustomerKeySha256 = sha256Checksum
|
opt.SSECustomerKeySha256 = sha256Checksum
|
||||||
} else {
|
} else if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||||
if opt.SSECustomerKeySha256 != sha256Checksum {
|
return fmt.Errorf("the computed SHA256 checksum "+
|
||||||
return fmt.Errorf("the computed SHA256 checksum "+
|
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
sha256Checksum, opt.SSECustomerKeySha256)
|
||||||
sha256Checksum, opt.SSECustomerKeySha256)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if opt.SSECustomerAlgorithm == "" {
|
if opt.SSECustomerAlgorithm == "" {
|
||||||
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
|
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
|
||||||
|
|
|
@ -4173,11 +4173,9 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||||
if opt.noSkipMarkers {
|
if opt.noSkipMarkers {
|
||||||
// process directory markers as files
|
// process directory markers as files
|
||||||
isDirectory = false
|
isDirectory = false
|
||||||
} else {
|
} else if remote == f.opt.Enc.ToStandardPath(opt.directory) {
|
||||||
// Don't insert the root directory
|
// Don't insert the root directory
|
||||||
if remote == f.opt.Enc.ToStandardPath(opt.directory) {
|
continue
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
remote = remote[len(opt.prefix):]
|
remote = remote[len(opt.prefix):]
|
||||||
|
|
|
@ -296,16 +296,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||||
//request object meta info
|
//request object meta info
|
||||||
if info, err := f.readMetaDataForPath(ctx, f.diskRoot, &api.ResourceInfoRequestOptions{}); err != nil {
|
if info, err := f.readMetaDataForPath(ctx, f.diskRoot, &api.ResourceInfoRequestOptions{}); err != nil {
|
||||||
|
|
||||||
} else {
|
} else if info.ResourceType == "file" {
|
||||||
if info.ResourceType == "file" {
|
rootDir := path.Dir(root)
|
||||||
rootDir := path.Dir(root)
|
if rootDir == "." {
|
||||||
if rootDir == "." {
|
rootDir = ""
|
||||||
rootDir = ""
|
|
||||||
}
|
|
||||||
f.setRoot(rootDir)
|
|
||||||
// return an error with an fs which points to the parent
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
}
|
||||||
|
f.setRoot(rootDir)
|
||||||
|
// return an error with an fs which points to the parent
|
||||||
|
return f, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -190,51 +190,49 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||||
b.indent(msg, file, Color(terminal.RedFg, "File was deleted"))
|
b.indent(msg, file, Color(terminal.RedFg, "File was deleted"))
|
||||||
ds.deleted++
|
ds.deleted++
|
||||||
d |= deltaDeleted
|
d |= deltaDeleted
|
||||||
} else {
|
} else if !now.isDir(file) {
|
||||||
// skip dirs here, as we only care if they are new/deleted, not newer/older
|
// skip dirs here, as we only care if they are new/deleted, not newer/older
|
||||||
if !now.isDir(file) {
|
whatchanged := []string{}
|
||||||
whatchanged := []string{}
|
if b.opt.Compare.Size {
|
||||||
if b.opt.Compare.Size {
|
if sizeDiffers(old.getSize(file), now.getSize(file)) {
|
||||||
if sizeDiffers(old.getSize(file), now.getSize(file)) {
|
fs.Debugf(file, "(old: %v current: %v)", old.getSize(file), now.getSize(file))
|
||||||
fs.Debugf(file, "(old: %v current: %v)", old.getSize(file), now.getSize(file))
|
if now.getSize(file) > old.getSize(file) {
|
||||||
if now.getSize(file) > old.getSize(file) {
|
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (larger)"))
|
||||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (larger)"))
|
d |= deltaLarger
|
||||||
d |= deltaLarger
|
} else {
|
||||||
} else {
|
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (smaller)"))
|
||||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (smaller)"))
|
d |= deltaSmaller
|
||||||
d |= deltaSmaller
|
|
||||||
}
|
|
||||||
s = now.getSize(file)
|
|
||||||
}
|
}
|
||||||
|
s = now.getSize(file)
|
||||||
}
|
}
|
||||||
if b.opt.Compare.Modtime {
|
}
|
||||||
if timeDiffers(fctx, old.getTime(file), now.getTime(file), f, f) {
|
if b.opt.Compare.Modtime {
|
||||||
if old.beforeOther(now, file) {
|
if timeDiffers(fctx, old.getTime(file), now.getTime(file), f, f) {
|
||||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
if old.beforeOther(now, file) {
|
||||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (newer)"))
|
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||||
d |= deltaNewer
|
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (newer)"))
|
||||||
} else { // Current version is older than prior sync.
|
d |= deltaNewer
|
||||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
} else { // Current version is older than prior sync.
|
||||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (older)"))
|
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||||
d |= deltaOlder
|
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (older)"))
|
||||||
}
|
d |= deltaOlder
|
||||||
t = now.getTime(file)
|
|
||||||
}
|
}
|
||||||
|
t = now.getTime(file)
|
||||||
}
|
}
|
||||||
if b.opt.Compare.Checksum {
|
}
|
||||||
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
if b.opt.Compare.Checksum {
|
||||||
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
||||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
||||||
d |= deltaHash
|
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
||||||
h = now.getHash(file)
|
d |= deltaHash
|
||||||
}
|
h = now.getHash(file)
|
||||||
}
|
|
||||||
// concat changes and print log
|
|
||||||
if d.is(deltaModified) {
|
|
||||||
summary := fmt.Sprintf(Color(terminal.YellowFg, "File changed: %s"), strings.Join(whatchanged, ", "))
|
|
||||||
b.indent(msg, file, summary)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// concat changes and print log
|
||||||
|
if d.is(deltaModified) {
|
||||||
|
summary := fmt.Sprintf(Color(terminal.YellowFg, "File changed: %s"), strings.Join(whatchanged, ", "))
|
||||||
|
b.indent(msg, file, summary)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.is(deltaModified) {
|
if d.is(deltaModified) {
|
||||||
|
|
|
@ -476,10 +476,8 @@ func (b *bisyncRun) checkSync(listing1, listing2 string) error {
|
||||||
if !files2.has(file) && !files2.has(b.aliases.Alias(file)) {
|
if !files2.has(file) && !files2.has(b.aliases.Alias(file)) {
|
||||||
b.indent("ERROR", file, "Path1 file not found in Path2")
|
b.indent("ERROR", file, "Path1 file not found in Path2")
|
||||||
ok = false
|
ok = false
|
||||||
} else {
|
} else if !b.fileInfoEqual(file, files2.getTryAlias(file, b.aliases.Alias(file)), files1, files2) {
|
||||||
if !b.fileInfoEqual(file, files2.getTryAlias(file, b.aliases.Alias(file)), files1, files2) {
|
ok = false
|
||||||
ok = false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, file := range files2.list {
|
for _, file := range files2.list {
|
||||||
|
|
|
@ -153,16 +153,14 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
|
||||||
}
|
}
|
||||||
configKey = []byte(obscure.MustReveal(string(obscuredKey)))
|
configKey = []byte(obscure.MustReveal(string(obscuredKey)))
|
||||||
fs.Debugf(nil, "using _RCLONE_CONFIG_KEY_FILE for configKey")
|
fs.Debugf(nil, "using _RCLONE_CONFIG_KEY_FILE for configKey")
|
||||||
} else {
|
} else if len(configKey) == 0 {
|
||||||
if len(configKey) == 0 {
|
if usingPasswordCommand {
|
||||||
if usingPasswordCommand {
|
return nil, errors.New("using --password-command derived password, unable to decrypt configuration")
|
||||||
return nil, errors.New("using --password-command derived password, unable to decrypt configuration")
|
|
||||||
}
|
|
||||||
if !ci.AskPassword {
|
|
||||||
return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
|
|
||||||
}
|
|
||||||
getConfigPassword("Enter configuration password:")
|
|
||||||
}
|
}
|
||||||
|
if !ci.AskPassword {
|
||||||
|
return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
|
||||||
|
}
|
||||||
|
getConfigPassword("Enter configuration password:")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Nonce is first 24 bytes of the ciphertext
|
// Nonce is first 24 bytes of the ciphertext
|
||||||
|
|
|
@ -1865,14 +1865,12 @@ func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string)
|
||||||
if OverlappingFilterCheck(ctx, backupDir, fsrc) {
|
if OverlappingFilterCheck(ctx, backupDir, fsrc) {
|
||||||
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap"))
|
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap"))
|
||||||
}
|
}
|
||||||
} else {
|
} else if ci.Suffix == "" {
|
||||||
if ci.Suffix == "" {
|
if SameDir(fdst, backupDir) {
|
||||||
if SameDir(fdst, backupDir) {
|
return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't be the same"))
|
||||||
return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't be the same"))
|
}
|
||||||
}
|
if SameDir(fsrc, backupDir) {
|
||||||
if SameDir(fsrc, backupDir) {
|
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't be the same"))
|
||||||
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't be the same"))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if ci.Suffix != "" {
|
} else if ci.Suffix != "" {
|
||||||
|
@ -2041,15 +2039,13 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = Op(ctx, fdst, dstObj, dstFileName, srcObj)
|
_, err = Op(ctx, fdst, dstObj, dstFileName, srcObj)
|
||||||
} else {
|
} else if !cp {
|
||||||
if !cp {
|
if ci.IgnoreExisting {
|
||||||
if ci.IgnoreExisting {
|
fs.Debugf(srcObj, "Not removing source file as destination file exists and --ignore-existing is set")
|
||||||
fs.Debugf(srcObj, "Not removing source file as destination file exists and --ignore-existing is set")
|
logger(ctx, Match, srcObj, dstObj, nil)
|
||||||
logger(ctx, Match, srcObj, dstObj, nil)
|
} else if !SameObject(srcObj, dstObj) {
|
||||||
} else if !SameObject(srcObj, dstObj) {
|
err = DeleteFile(ctx, srcObj)
|
||||||
err = DeleteFile(ctx, srcObj)
|
logger(ctx, Differ, srcObj, dstObj, nil)
|
||||||
logger(ctx, Differ, srcObj, dstObj, nil)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -819,10 +819,8 @@ func rcCheck(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
if srcFs != nil {
|
if srcFs != nil {
|
||||||
return nil, rc.NewErrParamInvalid(errors.New("only supply dstFs when using checkFileHash"))
|
return nil, rc.NewErrParamInvalid(errors.New("only supply dstFs when using checkFileHash"))
|
||||||
}
|
}
|
||||||
} else {
|
} else if srcFs == nil {
|
||||||
if srcFs == nil {
|
return nil, rc.NewErrParamInvalid(errors.New("need srcFs parameter when not using checkFileHash"))
|
||||||
return nil, rc.NewErrParamInvalid(errors.New("need srcFs parameter when not using checkFileHash"))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
oneway, _ := in.GetBool("oneway")
|
oneway, _ := in.GetBool("oneway")
|
||||||
|
|
|
@ -358,10 +358,8 @@ func (dc *DirCache) RootParentID(ctx context.Context, create bool) (ID string, e
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
dc.rootParentID = rootParentID
|
dc.rootParentID = rootParentID
|
||||||
} else {
|
} else if dc.rootID == dc.trueRootID {
|
||||||
if dc.rootID == dc.trueRootID {
|
return "", errors.New("is root directory")
|
||||||
return "", errors.New("is root directory")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if dc.rootParentID == "" {
|
if dc.rootParentID == "" {
|
||||||
return "", errors.New("internal error: didn't find rootParentID")
|
return "", errors.New("internal error: didn't find rootParentID")
|
||||||
|
|
|
@ -203,11 +203,9 @@ func (fh *WriteFileHandle) close() (err error) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fh.file.setObject(fh.o)
|
fh.file.setObject(fh.o)
|
||||||
err = writeCloseErr
|
err = writeCloseErr
|
||||||
} else {
|
} else if fh.file.getObject() == nil {
|
||||||
// Remove vfs file entry when no object is present
|
// Remove vfs file entry when no object is present
|
||||||
if fh.file.getObject() == nil {
|
_ = fh.file.Remove()
|
||||||
_ = fh.file.Remove()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue