From f689db4422f05086bb1eae20d1b21a054a1d74a3 Mon Sep 17 00:00:00 2001
From: Leandro Sacchet <leandro.sacchet@animati.com.br>
Date: Wed, 3 Aug 2022 11:53:02 -0300
Subject: [PATCH] fs: Add --max-delete-size a delete size threshold

Fixes #3329
---
 docs/content/docs.md                 |  8 ++++
 fs/accounting/stats.go               | 10 +++++
 fs/config.go                         |  2 +
 fs/config/configflags/configflags.go |  1 +
 fs/operations/operations.go          |  6 +++
 fs/operations/operations_test.go     | 56 ++++++++++++++++++++++++++++
 6 files changed, 83 insertions(+)

diff --git a/docs/content/docs.md b/docs/content/docs.md
index b00177c2b..ff198e50d 100644
--- a/docs/content/docs.md
+++ b/docs/content/docs.md
@@ -1334,6 +1334,14 @@ This tells rclone not to delete more than N files.  If that limit is
 exceeded then a fatal error will be generated and rclone will stop the
 operation in progress.
 
+### --max-delete-size=SIZE ###
+
+Rclone will stop deleting files when the total size of deletions has
+reached the size specified. It defaults to off.
+
+If that limit is exceeded then a fatal error will be generated and
+rclone will stop the operation in progress.
+
 ### --max-depth=N ###
 
 This modifies the recursion depth for all the commands except purge.
diff --git a/fs/accounting/stats.go b/fs/accounting/stats.go
index deb30523d..8e1310483 100644
--- a/fs/accounting/stats.go
+++ b/fs/accounting/stats.go
@@ -48,6 +48,7 @@ type StatsInfo struct {
 	renameQueue       int
 	renameQueueSize   int64
 	deletes           int64
+	deletesSize       int64
 	deletedDirs       int64
 	inProgress        *inProgress
 	startedTransfers  []*Transfer   // currently active transfers
@@ -598,6 +599,14 @@ func (s *StatsInfo) Deletes(deletes int64) int64 {
 	return s.deletes
 }
 
+// DeletesSize updates the stats for deletes size
+func (s *StatsInfo) DeletesSize(deletesSize int64) int64 {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.deletesSize += deletesSize
+	return s.deletesSize
+}
+
 // DeletedDirs updates the stats for deletedDirs
 func (s *StatsInfo) DeletedDirs(deletedDirs int64) int64 {
 	s.mu.Lock()
@@ -627,6 +636,7 @@ func (s *StatsInfo) ResetCounters() {
 	s.checks = 0
 	s.transfers = 0
 	s.deletes = 0
+	s.deletesSize = 0
 	s.deletedDirs = 0
 	s.renames = 0
 	s.startedTransfers = nil
diff --git a/fs/config.go b/fs/config.go
index 8fc8e45d9..4049b6ea4 100644
--- a/fs/config.go
+++ b/fs/config.go
@@ -71,6 +71,7 @@ type ConfigInfo struct {
 	InsecureSkipVerify      bool // Skip server certificate verification
 	DeleteMode              DeleteMode
 	MaxDelete               int64
+	MaxDeleteSize           SizeSuffix
 	TrackRenames            bool   // Track file renames.
 	TrackRenamesStrategy    string // Comma separated list of strategies used to track renames
 	LowLevelRetries         int
@@ -162,6 +163,7 @@ func NewConfig() *ConfigInfo {
 	c.ExpectContinueTimeout = 1 * time.Second
 	c.DeleteMode = DeleteModeDefault
 	c.MaxDelete = -1
+	c.MaxDeleteSize = SizeSuffix(-1)
 	c.LowLevelRetries = 10
 	c.MaxDepth = -1
 	c.DataRateUnit = "bytes"
diff --git a/fs/config/configflags/configflags.go b/fs/config/configflags/configflags.go
index 2ecf9e8c7..96d728fcc 100644
--- a/fs/config/configflags/configflags.go
+++ b/fs/config/configflags/configflags.go
@@ -71,6 +71,7 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
 	flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer")
 	flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transferring (default)")
 	flags.Int64VarP(flagSet, &ci.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes")
+	flags.FVarP(flagSet, &ci.MaxDeleteSize, "max-delete-size", "", "When synchronizing, limit the total size of deletes")
 	flags.BoolVarP(flagSet, &ci.TrackRenames, "track-renames", "", ci.TrackRenames, "When synchronizing, track file renames and do a server-side move if possible")
 	flags.StringVarP(flagSet, &ci.TrackRenamesStrategy, "track-renames-strategy", "", ci.TrackRenamesStrategy, "Strategies to use when synchronizing using track-renames hash|modtime|leaf")
 	flags.IntVarP(flagSet, &ci.LowLevelRetries, "low-level-retries", "", ci.LowLevelRetries, "Number of low level retries to do")
diff --git a/fs/operations/operations.go b/fs/operations/operations.go
index 7f901a715..7288cbf0d 100644
--- a/fs/operations/operations.go
+++ b/fs/operations/operations.go
@@ -637,6 +637,12 @@ func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs
 	defer func() {
 		tr.Done(ctx, err)
 	}()
+	deletesSize := accounting.Stats(ctx).DeletesSize(0) // file not yet deleted, we should not add at this time
+	size := dst.Size()
+	if int64(ci.MaxDeleteSize) != -1 && (deletesSize+size) > int64(ci.MaxDeleteSize) {
+		return fserrors.FatalError(errors.New("--max-delete-size threshold reached"))
+	}
+	_ = accounting.Stats(ctx).DeletesSize(size) // here we count
 	numDeletes := accounting.Stats(ctx).Deletes(1)
 	if ci.MaxDelete != -1 && numDeletes > ci.MaxDelete {
 		return fserrors.FatalError(errors.New("--max-delete threshold reached"))
diff --git a/fs/operations/operations_test.go b/fs/operations/operations_test.go
index d1e2d8fdb..acced6855 100644
--- a/fs/operations/operations_test.go
+++ b/fs/operations/operations_test.go
@@ -419,6 +419,62 @@ func TestDelete(t *testing.T) {
 	r.CheckRemoteItems(t, file3)
 }
 
+func TestMaxDelete(t *testing.T) {
+	ctx := context.Background()
+	ctx, ci := fs.AddConfig(ctx)
+	r := fstest.NewRun(t)
+	accounting.GlobalStats().ResetCounters()
+	ci.MaxDelete = 2
+	defer r.Finalise()
+	file1 := r.WriteObject(ctx, "small", "1234567890", t2)                                                                                           // 10 bytes
+	file2 := r.WriteObject(ctx, "medium", "------------------------------------------------------------", t1)                                        // 60 bytes
+	file3 := r.WriteObject(ctx, "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
+	r.CheckRemoteItems(t, file1, file2, file3)
+	err := operations.Delete(ctx, r.Fremote)
+
+	require.Error(t, err)
+	objects, _, _, err := operations.Count(ctx, r.Fremote)
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), objects)
+}
+
+// TestMaxDeleteSizeLargeFile one of the files is larger than allowed
+func TestMaxDeleteSizeLargeFile(t *testing.T) {
+	ctx := context.Background()
+	ctx, ci := fs.AddConfig(ctx)
+	r := fstest.NewRun(t)
+	accounting.GlobalStats().ResetCounters()
+	ci.MaxDeleteSize = 70
+	defer r.Finalise()
+	file1 := r.WriteObject(ctx, "small", "1234567890", t2)                                                                                           // 10 bytes
+	file2 := r.WriteObject(ctx, "medium", "------------------------------------------------------------", t1)                                        // 60 bytes
+	file3 := r.WriteObject(ctx, "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
+	r.CheckRemoteItems(t, file1, file2, file3)
+
+	err := operations.Delete(ctx, r.Fremote)
+	require.Error(t, err)
+	r.CheckRemoteItems(t, file3)
+}
+
+func TestMaxDeleteSize(t *testing.T) {
+	ctx := context.Background()
+	ctx, ci := fs.AddConfig(ctx)
+	r := fstest.NewRun(t)
+	accounting.GlobalStats().ResetCounters()
+	ci.MaxDeleteSize = 160
+	defer r.Finalise()
+	file1 := r.WriteObject(ctx, "small", "1234567890", t2)                                                                                           // 10 bytes
+	file2 := r.WriteObject(ctx, "medium", "------------------------------------------------------------", t1)                                        // 60 bytes
+	file3 := r.WriteObject(ctx, "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
+	r.CheckRemoteItems(t, file1, file2, file3)
+
+	err := operations.Delete(ctx, r.Fremote)
+	require.Error(t, err)
+	objects, _, _, err := operations.Count(ctx, r.Fremote)
+	require.NoError(t, err)
+	assert.Equal(t, int64(1), objects) // 10 or 100 bytes
+}
+
 func TestRetry(t *testing.T) {
 	ctx := context.Background()