From a83fec756b23fdc2e98aed1919220efb34371060 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Tue, 29 Aug 2023 12:57:33 +0100 Subject: [PATCH] build: fix lint errors when re-enabling revive var-naming --- backend/oracleobjectstorage/multipart.go | 20 ++++++++++---------- backend/s3/s3.go | 18 +++++++++--------- backend/sftp/ssh_external.go | 4 ++-- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/backend/oracleobjectstorage/multipart.go b/backend/oracleobjectstorage/multipart.go index 615032ed2..892f51acc 100644 --- a/backend/oracleobjectstorage/multipart.go +++ b/backend/oracleobjectstorage/multipart.go @@ -40,7 +40,7 @@ type objectChunkWriter struct { f *Fs bucket *string key *string - uploadId *string + uploadID *string partsToCommit []objectstorage.CommitMultipartUploadPartDetails partsToCommitMu sync.Mutex existingParts map[int]objectstorage.MultipartUploadPartSummary @@ -103,7 +103,7 @@ func (f *Fs) OpenChunkWriter( chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize) } - uploadId, existingParts, err := o.createMultipartUpload(ctx, ui.req) + uploadID, existingParts, err := o.createMultipartUpload(ctx, ui.req) if err != nil { return -1, nil, fmt.Errorf("create multipart upload request failed: %w", err) } @@ -114,12 +114,12 @@ func (f *Fs) OpenChunkWriter( f: f, bucket: &bucketName, key: &bucketPath, - uploadId: &uploadId, + uploadID: &uploadID, existingParts: existingParts, ui: ui, o: o, } - fs.Debugf(o, "open chunk writer: started multipart upload: %v", uploadId) + fs.Debugf(o, "open chunk writer: started multipart upload: %v", uploadID) return int64(chunkSize), chunkWriter, err } @@ -161,7 +161,7 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea NamespaceName: common.String(w.f.opt.Namespace), BucketName: w.bucket, ObjectName: w.key, - UploadId: w.uploadId, + UploadId: w.uploadID, UploadPartNum: common.Int(ossPartNumber), ContentLength: common.Int64(currentChunkSize), ContentMD5: common.String(md5sum), @@ -210,7 +210,7 @@ func (w *objectChunkWriter) Close(ctx context.Context) (err error) { NamespaceName: common.String(w.f.opt.Namespace), BucketName: w.bucket, ObjectName: w.key, - UploadId: w.uploadId, + UploadId: w.uploadID, } req.PartsToCommit = w.partsToCommit var resp objectstorage.CommitMultipartUploadResponse @@ -218,7 +218,7 @@ func (w *objectChunkWriter) Close(ctx context.Context) (err error) { resp, err = w.f.srv.CommitMultipartUpload(ctx, req) // if multipart is corrupted, we will abort the uploadId if isMultiPartUploadCorrupted(err) { - fs.Debugf(w.o, "multipart uploadId %v is corrupted, aborting...", *w.uploadId) + fs.Debugf(w.o, "multipart uploadId %v is corrupted, aborting...", *w.uploadID) _ = w.Abort(ctx) return false, err } @@ -235,7 +235,7 @@ func (w *objectChunkWriter) Close(ctx context.Context) (err error) { fs.Errorf(w.o, "multipart upload corrupted: multipart md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5) return fmt.Errorf("multipart upload corrupted: md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5) } - fs.Debugf(w.o, "multipart upload %v md5 matched: expecting %s and got %s", *w.uploadId, wantMultipartMd5, gotMultipartMd5) + fs.Debugf(w.o, "multipart upload %v md5 matched: expecting %s and got %s", *w.uploadID, wantMultipartMd5, gotMultipartMd5) return nil } @@ -259,11 +259,11 @@ func (w *objectChunkWriter) Abort(ctx context.Context) error { ctx, w.bucket, w.key, - w.uploadId) + w.uploadID) if err != nil { fs.Debugf(w.o, "Failed to cancel multipart upload: %v", err) } else { - fs.Debugf(w.o, "canceled and aborted multipart upload: %v", *w.uploadId) + fs.Debugf(w.o, "canceled and aborted multipart upload: %v", *w.uploadID) } return err } diff --git a/backend/s3/s3.go b/backend/s3/s3.go index a925a8d21..d30657e25 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -5301,7 +5301,7 @@ type s3ChunkWriter struct { f *Fs bucket *string key *string - uploadId *string + uploadID *string multiPartUploadInput *s3.CreateMultipartUploadInput completedPartsMu sync.Mutex completedParts []*s3.CompletedPart @@ -5370,7 +5370,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn f: f, bucket: mOut.Bucket, key: mOut.Key, - uploadId: mOut.UploadId, + uploadID: mOut.UploadId, multiPartUploadInput: &mReq, completedParts: make([]*s3.CompletedPart, 0), ui: ui, @@ -5438,7 +5438,7 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader Bucket: w.bucket, Key: w.key, PartNumber: s3PartNumber, - UploadId: w.uploadId, + UploadId: w.uploadID, ContentMD5: &md5sum, ContentLength: aws.Int64(currentChunkSize), RequestPayer: w.multiPartUploadInput.RequestPayer, @@ -5479,15 +5479,15 @@ func (w *s3ChunkWriter) Abort(ctx context.Context) error { _, err := w.f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{ Bucket: w.bucket, Key: w.key, - UploadId: w.uploadId, + UploadId: w.uploadID, RequestPayer: w.multiPartUploadInput.RequestPayer, }) return w.f.shouldRetry(ctx, err) }) if err != nil { - return fmt.Errorf("failed to abort multipart upload %q: %w", *w.uploadId, err) + return fmt.Errorf("failed to abort multipart upload %q: %w", *w.uploadID, err) } - fs.Debugf(w.o, "multipart upload %q aborted", *w.uploadId) + fs.Debugf(w.o, "multipart upload %q aborted", *w.uploadID) return err } @@ -5506,12 +5506,12 @@ func (w *s3ChunkWriter) Close(ctx context.Context) (err error) { Parts: w.completedParts, }, RequestPayer: w.multiPartUploadInput.RequestPayer, - UploadId: w.uploadId, + UploadId: w.uploadID, }) return w.f.shouldRetry(ctx, err) }) if err != nil { - return fmt.Errorf("failed to complete multipart upload %q: %w", *w.uploadId, err) + return fmt.Errorf("failed to complete multipart upload %q: %w", *w.uploadID, err) } if resp != nil { if resp.ETag != nil { @@ -5521,7 +5521,7 @@ func (w *s3ChunkWriter) Close(ctx context.Context) (err error) { w.versionID = *resp.VersionId } } - fs.Debugf(w.o, "multipart upload %q finished", *w.uploadId) + fs.Debugf(w.o, "multipart upload %q finished", *w.uploadID) return err } diff --git a/backend/sftp/ssh_external.go b/backend/sftp/ssh_external.go index 9635401c0..0ac5e6539 100644 --- a/backend/sftp/ssh_external.go +++ b/backend/sftp/ssh_external.go @@ -47,7 +47,7 @@ func (s *sshClientExternal) Close() error { // NewSession makes a new external SSH connection func (s *sshClientExternal) NewSession() (sshSession, error) { - session := s.f.newSshSessionExternal() + session := s.f.newSSHSessionExternal() if s.session == nil { fs.Debugf(s.f, "ssh external: creating additional session") } @@ -77,7 +77,7 @@ type sshSessionExternal struct { runningSFTP bool } -func (f *Fs) newSshSessionExternal() *sshSessionExternal { +func (f *Fs) newSSHSessionExternal() *sshSessionExternal { s := &sshSessionExternal{ f: f, }