From 31fabb34023a8a7ceabba7290da347eab388a3f1 Mon Sep 17 00:00:00 2001 From: wiserain Date: Sat, 20 Jul 2024 21:50:08 +0900 Subject: [PATCH] pikpak: correct file transfer progress for uploads by hash Pikpak can accelerate file uploads by leveraging existing content in its storage (identified by a custom hash called gcid). Previously, file transfer statistics were incorrect for uploads without outbound traffic as the input stream remained unchanged. This commit addresses the issue by: * Removing unnecessary unwrapping/wrapping of accountings before/after gcid calculation, leading immediate AccountRead() on buffering. * Correctly tracking file transfer statistics for uploads with no incoming/outgoing traffic by marking them as Server Side Copies. This change ensures correct statistics tracking and improves overall user experience. --- backend/pikpak/pikpak.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/backend/pikpak/pikpak.go b/backend/pikpak/pikpak.go index 3d666867f..a18fe573f 100644 --- a/backend/pikpak/pikpak.go +++ b/backend/pikpak/pikpak.go @@ -1248,6 +1248,12 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string, return nil, fmt.Errorf("invalid response: %+v", new) } else if new.File.Phase == api.PhaseTypeComplete { // early return; in case of zero-byte objects + if acc, ok := in.(*accounting.Account); ok && acc != nil { + // if `in io.Reader` is still in type of `*accounting.Account` (meaning that it is unused) + // it is considered as a server side copy as no incoming/outgoing traffic occur at all + acc.ServerSideTransferStart() + acc.ServerSideCopyEnd(size) + } return new.File, nil } @@ -1711,18 +1717,12 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi return fmt.Errorf("failed to calculate gcid: %w", err) } } else { - // unwrap the accounting from the input, we use wrap to put it - // back on after the buffering - var wrap accounting.WrapFn - in, wrap = accounting.UnWrap(in) var cleanup func() gcid, in, cleanup, err = readGcid(in, size, int64(o.fs.opt.HashMemoryThreshold)) defer cleanup() if err != nil { return fmt.Errorf("failed to calculate gcid: %w", err) } - // Wrap the accounting back onto the stream - in = wrap(in) } } fs.Debugf(o, "gcid = %s", gcid)