diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go index d0e71a876..4947512cc 100644 --- a/pkg/local_object_storage/blobovnicza/control.go +++ b/pkg/local_object_storage/blobovnicza/control.go @@ -129,7 +129,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error { }) }) if err != nil { - return fmt.Errorf("can't determine DB size: %w", err) + return fmt.Errorf("determine DB size: %w", err) } if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly { b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items)) @@ -140,7 +140,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error { return saveItemsCount(tx, items) }); err != nil { b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items)) - return fmt.Errorf("can't save blobovnicza's size and items count: %w", err) + return fmt.Errorf("save blobovnicza's size and items count: %w", err) } b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items)) } diff --git a/pkg/local_object_storage/blobovnicza/iterate.go b/pkg/local_object_storage/blobovnicza/iterate.go index 01e5529da..cd33b263c 100644 --- a/pkg/local_object_storage/blobovnicza/iterate.go +++ b/pkg/local_object_storage/blobovnicza/iterate.go @@ -146,7 +146,7 @@ func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes, if prm.ignoreErrors { return nil } - return fmt.Errorf("could not decode address key: %w", err) + return fmt.Errorf("decode address key: %w", err) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go index 1a4f11c29..5d158644e 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go @@ -115,13 +115,13 @@ func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnic // decompress the data data, err := b.compression.Decompress(res.Object()) if err != nil { - return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err) + return common.GetRes{}, fmt.Errorf("decompress object data: %w", err) } // unmarshal the object obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err) } return common.GetRes{Object: obj, RawData: data}, nil diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go index 6d06b8e6f..84b9bc55f 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go @@ -130,13 +130,13 @@ func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blob // decompress the data data, err := b.compression.Decompress(res.Object()) if err != nil { - return common.GetRangeRes{}, fmt.Errorf("could not decompress object data: %w", err) + return common.GetRangeRes{}, fmt.Errorf("decompress object data: %w", err) } // unmarshal the object obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - return common.GetRangeRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + return common.GetRangeRes{}, fmt.Errorf("unmarshal the object: %w", err) } from := prm.Range.GetOffset() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go index a710cf988..5c2d58ca1 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go @@ -49,7 +49,7 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm zap.String("root_path", b.rootPath)) return nil } - return fmt.Errorf("could not decompress object data: %w", err) + return fmt.Errorf("decompress object data: %w", err) } if prm.Handler != nil { @@ -82,7 +82,7 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo zap.String("root_path", b.rootPath)) return false, nil } - return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err) + return false, fmt.Errorf("open blobovnicza %s: %w", p, err) } defer shBlz.Close(ctx) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go index 7d44aa5c6..f2f9509ad 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go @@ -69,10 +69,10 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) { )...) if err := blz.Open(ctx); err != nil { - return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err) + return nil, fmt.Errorf("open blobovnicza %s: %w", b.path, err) } if err := blz.Init(ctx); err != nil { - return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err) + return nil, fmt.Errorf("init blobovnicza %s: %w", b.path, err) } b.refCount++ @@ -127,7 +127,7 @@ func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error { zap.String("id", b.path), zap.Error(err), ) - return fmt.Errorf("failed to close blobovnicza (path = %s): %w", b.path, err) + return fmt.Errorf("close blobovnicza (path = %s): %w", b.path, err) } b.refCount = 0 diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go index a77ad2f93..031b385b2 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree.go @@ -538,7 +538,7 @@ func (t *FSTree) countFiles() (uint64, uint64, error) { }, ) if err != nil { - return 0, 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err) + return 0, 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err) } return count, size, nil @@ -577,7 +577,7 @@ func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) { }, ) if err != nil { - return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err) + return 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err) } success = true return result, nil diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go index 0252c7983..3afef7d18 100644 --- a/pkg/local_object_storage/blobstor/memstore/memstore.go +++ b/pkg/local_object_storage/blobstor/memstore/memstore.go @@ -47,13 +47,13 @@ func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes, // Decompress the data. var err error if data, err = s.compression.Decompress(data); err != nil { - return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err) + return common.GetRes{}, fmt.Errorf("decompress object data: %w", err) } // Unmarshal the SDK object. obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err) } return common.GetRes{Object: obj, RawData: data}, nil diff --git a/pkg/local_object_storage/blobstor/mode.go b/pkg/local_object_storage/blobstor/mode.go index af19e398e..80268fa7a 100644 --- a/pkg/local_object_storage/blobstor/mode.go +++ b/pkg/local_object_storage/blobstor/mode.go @@ -27,7 +27,7 @@ func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error { } } if err != nil { - return fmt.Errorf("can't set blobstor mode (old=%s, new=%s): %w", b.mode, m, err) + return fmt.Errorf("set blobstor mode (old=%s, new=%s): %w", b.mode, m, err) } b.mode = m diff --git a/pkg/local_object_storage/blobstor/put.go b/pkg/local_object_storage/blobstor/put.go index 342da28bf..fe9c109dd 100644 --- a/pkg/local_object_storage/blobstor/put.go +++ b/pkg/local_object_storage/blobstor/put.go @@ -52,7 +52,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e // marshal object data, err := prm.Object.Marshal() if err != nil { - return common.PutRes{}, fmt.Errorf("could not marshal the object: %w", err) + return common.PutRes{}, fmt.Errorf("marshal the object: %w", err) } prm.RawData = data } diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index bd9eb1021..6a416cfd9 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -95,7 +95,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { err := eg.Wait() close(errCh) if err != nil { - return fmt.Errorf("failed to initialize shards: %w", err) + return fmt.Errorf("initialize shards: %w", err) } for res := range errCh { @@ -117,7 +117,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { continue } - return fmt.Errorf("could not initialize shard %s: %w", res.id, res.err) + return fmt.Errorf("initialize shard %s: %w", res.id, res.err) } } @@ -320,7 +320,7 @@ loop: for _, newID := range shardsToAdd { sh, err := e.createShard(ctx, rcfg.shards[newID]) if err != nil { - return fmt.Errorf("could not add new shard with '%s' metabase path: %w", newID, err) + return fmt.Errorf("add new shard with '%s' metabase path: %w", newID, err) } idStr := sh.ID().String() @@ -331,13 +331,13 @@ loop: } if err != nil { _ = sh.Close(ctx) - return fmt.Errorf("could not init %s shard: %w", idStr, err) + return fmt.Errorf("init %s shard: %w", idStr, err) } err = e.addShard(sh) if err != nil { _ = sh.Close(ctx) - return fmt.Errorf("could not add %s shard: %w", idStr, err) + return fmt.Errorf("add %s shard: %w", idStr, err) } e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr)) diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index 2e0344bfb..623f5c941 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -578,7 +578,7 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, prm EvacuateShardPrm) (bool, string, error) { if prm.TreeHandler == nil { - return false, "", fmt.Errorf("failed to evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID()) + return false, "", fmt.Errorf("evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID()) } return prm.TreeHandler(ctx, tree.CID, tree.TreeID, sh) diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 898f685ec..6d4844b75 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -108,12 +108,12 @@ func (m *metricsWithID) SetEvacuationInProgress(value bool) { func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*shard.ID, error) { sh, err := e.createShard(ctx, opts) if err != nil { - return nil, fmt.Errorf("could not create a shard: %w", err) + return nil, fmt.Errorf("create a shard: %w", err) } err = e.addShard(sh) if err != nil { - return nil, fmt.Errorf("could not add %s shard: %w", sh.ID().String(), err) + return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err) } e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode()) @@ -124,7 +124,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*shard.Shard, error) { id, err := generateShardID() if err != nil { - return nil, fmt.Errorf("could not generate shard ID: %w", err) + return nil, fmt.Errorf("generate shard ID: %w", err) } opts = e.appendMetrics(id, opts) @@ -180,7 +180,7 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error { pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true)) if err != nil { - return fmt.Errorf("could not create pool: %w", err) + return fmt.Errorf("create pool: %w", err) } strID := sh.ID().String() @@ -374,7 +374,7 @@ func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedS zap.Error(err), ) multiErrGuard.Lock() - multiErr = errors.Join(multiErr, fmt.Errorf("could not change shard (id:%s) mode to disabled: %w", sh.ID(), err)) + multiErr = errors.Join(multiErr, fmt.Errorf("change shard (id:%s) mode to disabled: %w", sh.ID(), err)) multiErrGuard.Unlock() } @@ -385,7 +385,7 @@ func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedS zap.Error(err), ) multiErrGuard.Lock() - multiErr = errors.Join(multiErr, fmt.Errorf("could not close removed shard (id:%s): %w", sh.ID(), err)) + multiErr = errors.Join(multiErr, fmt.Errorf("close removed shard (id:%s): %w", sh.ID(), err)) multiErrGuard.Unlock() } return nil diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go index 07fa7e9cf..c19c65224 100644 --- a/pkg/local_object_storage/metabase/control.go +++ b/pkg/local_object_storage/metabase/control.go @@ -54,7 +54,7 @@ func (db *DB) Open(ctx context.Context, m mode.Mode) error { func (db *DB) openDB(ctx context.Context, mode mode.Mode) error { err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission) if err != nil { - return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err) + return fmt.Errorf("create dir %s for metabase: %w", db.info.Path, err) } db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) @@ -73,7 +73,7 @@ func (db *DB) openBolt(ctx context.Context) error { db.boltDB, err = bbolt.Open(db.info.Path, db.info.Permission, db.boltOptions) if err != nil { - return fmt.Errorf("can't open boltDB database: %w", err) + return fmt.Errorf("open boltDB database: %w", err) } db.boltDB.MaxBatchDelay = db.boltBatchDelay db.boltDB.MaxBatchSize = db.boltBatchSize @@ -145,27 +145,27 @@ func (db *DB) init(reset bool) error { if reset { err := tx.DeleteBucket(name) if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) { - return fmt.Errorf("could not delete static bucket %s: %w", k, err) + return fmt.Errorf("delete static bucket %s: %w", k, err) } } _, err := tx.CreateBucketIfNotExists(name) if err != nil { - return fmt.Errorf("could not create static bucket %s: %w", k, err) + return fmt.Errorf("create static bucket %s: %w", k, err) } } for _, b := range deprecatedBuckets { err := tx.DeleteBucket(b) if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) { - return fmt.Errorf("could not delete deprecated bucket %s: %w", string(b), err) + return fmt.Errorf("delete deprecated bucket %s: %w", string(b), err) } } if !reset { // counters will be recalculated by refill metabase err = syncCounter(tx, false) if err != nil { - return fmt.Errorf("could not sync object counter: %w", err) + return fmt.Errorf("sync object counter: %w", err) } return nil diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go index 3ead0d9a0..f29dafe77 100644 --- a/pkg/local_object_storage/metabase/counter.go +++ b/pkg/local_object_storage/metabase/counter.go @@ -238,14 +238,14 @@ func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error { } if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil { - return fmt.Errorf("could not increase phy object counter: %w", err) + return fmt.Errorf("increase phy object counter: %w", err) } if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil { - return fmt.Errorf("could not increase logical object counter: %w", err) + return fmt.Errorf("increase logical object counter: %w", err) } if isUserObject { if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil { - return fmt.Errorf("could not increase user object counter: %w", err) + return fmt.Errorf("increase user object counter: %w", err) } } return db.incContainerObjectCounter(tx, cnrID, isUserObject) @@ -362,7 +362,7 @@ func (db *DB) incContainerObjectCounter(tx *bbolt.Tx, cnrID cid.ID, isUserObject func syncCounter(tx *bbolt.Tx, force bool) error { shardInfoB, err := createBucketLikelyExists(tx, shardInfoBucket) if err != nil { - return fmt.Errorf("could not get shard info bucket: %w", err) + return fmt.Errorf("get shard info bucket: %w", err) } shardObjectCounterInitialized := len(shardInfoB.Get(objectPhyCounterKey)) == 8 && len(shardInfoB.Get(objectLogicCounterKey)) == 8 && @@ -375,7 +375,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error { containerCounterB, err := createBucketLikelyExists(tx, containerCounterBucketName) if err != nil { - return fmt.Errorf("could not get container counter bucket: %w", err) + return fmt.Errorf("get container counter bucket: %w", err) } var addr oid.Address @@ -428,7 +428,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error { return nil }) if err != nil { - return fmt.Errorf("could not iterate objects: %w", err) + return fmt.Errorf("iterate objects: %w", err) } return setObjectCounters(counters, shardInfoB, containerCounterB) @@ -448,7 +448,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container value := containerCounterValue(count) err := containerCounterB.Put(key, value) if err != nil { - return fmt.Errorf("could not update phy container object counter: %w", err) + return fmt.Errorf("update phy container object counter: %w", err) } } phyData := make([]byte, 8) @@ -456,7 +456,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container err := shardInfoB.Put(objectPhyCounterKey, phyData) if err != nil { - return fmt.Errorf("could not update phy object counter: %w", err) + return fmt.Errorf("update phy object counter: %w", err) } logData := make([]byte, 8) @@ -464,7 +464,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container err = shardInfoB.Put(objectLogicCounterKey, logData) if err != nil { - return fmt.Errorf("could not update logic object counter: %w", err) + return fmt.Errorf("update logic object counter: %w", err) } userData := make([]byte, 8) @@ -472,7 +472,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container err = shardInfoB.Put(objectUserCounterKey, userData) if err != nil { - return fmt.Errorf("could not update user object counter: %w", err) + return fmt.Errorf("update user object counter: %w", err) } return nil @@ -492,7 +492,7 @@ func parseContainerCounterKey(buf []byte) (cid.ID, error) { } var cnrID cid.ID if err := cnrID.Decode(buf); err != nil { - return cid.ID{}, fmt.Errorf("failed to decode container ID: %w", err) + return cid.ID{}, fmt.Errorf("decode container ID: %w", err) } return cnrID, nil } diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index 62ab1056d..00ee2baa3 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -163,26 +163,26 @@ func (db *DB) updateCountersDelete(tx *bbolt.Tx, res DeleteRes) error { if res.phyCount > 0 { err := db.updateShardObjectCounter(tx, phy, res.phyCount, false) if err != nil { - return fmt.Errorf("could not decrease phy object counter: %w", err) + return fmt.Errorf("decrease phy object counter: %w", err) } } if res.logicCount > 0 { err := db.updateShardObjectCounter(tx, logical, res.logicCount, false) if err != nil { - return fmt.Errorf("could not decrease logical object counter: %w", err) + return fmt.Errorf("decrease logical object counter: %w", err) } } if res.userCount > 0 { err := db.updateShardObjectCounter(tx, user, res.userCount, false) if err != nil { - return fmt.Errorf("could not decrease user object counter: %w", err) + return fmt.Errorf("decrease user object counter: %w", err) } } if err := db.updateContainerCounter(tx, res.removedByCnrID, false); err != nil { - return fmt.Errorf("could not decrease container object counter: %w", err) + return fmt.Errorf("decrease container object counter: %w", err) } return nil } @@ -259,7 +259,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter if garbageBKT != nil { err := garbageBKT.Delete(addrKey) if err != nil { - return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err) + return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err) } } return deleteSingleResult{}, nil @@ -280,7 +280,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter if garbageBKT != nil { err := garbageBKT.Delete(addrKey) if err != nil { - return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err) + return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err) } } @@ -308,7 +308,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter // remove object err = db.deleteObject(tx, obj, false) if err != nil { - return deleteSingleResult{}, fmt.Errorf("could not remove object: %w", err) + return deleteSingleResult{}, fmt.Errorf("remove object: %w", err) } if err := deleteECRelatedInfo(tx, garbageBKT, obj, addr.Container(), refCounter); err != nil { @@ -335,12 +335,12 @@ func (db *DB) deleteObject( err = updateListIndexes(tx, obj, delListIndexItem) if err != nil { - return fmt.Errorf("can't remove list indexes: %w", err) + return fmt.Errorf("remove list indexes: %w", err) } err = updateFKBTIndexes(tx, obj, delFKBTIndexItem) if err != nil { - return fmt.Errorf("can't remove fake bucket tree indexes: %w", err) + return fmt.Errorf("remove fake bucket tree indexes: %w", err) } if isParent { @@ -351,7 +351,7 @@ func (db *DB) deleteObject( addrKey := addressKey(object.AddressOf(obj), key) err := garbageBKT.Delete(addrKey) if err != nil { - return fmt.Errorf("could not remove from garbage bucket: %w", err) + return fmt.Errorf("remove from garbage bucket: %w", err) } } } @@ -529,7 +529,7 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize)) err := garbageBKT.Delete(addrKey) if err != nil { - return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err) + return fmt.Errorf("remove EC parent from garbage bucket: %w", err) } } @@ -567,7 +567,7 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize)) err := garbageBKT.Delete(addrKey) if err != nil { - return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err) + return fmt.Errorf("remove EC parent from garbage bucket: %w", err) } } diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go index 411beb6b3..3133c5480 100644 --- a/pkg/local_object_storage/metabase/exists.go +++ b/pkg/local_object_storage/metabase/exists.go @@ -229,7 +229,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e err := splitInfo.Unmarshal(bytes.Clone(rawSplitInfo)) if err != nil { - return nil, fmt.Errorf("can't unmarshal split info from root index: %w", err) + return nil, fmt.Errorf("unmarshal split info from root index: %w", err) } return splitInfo, nil diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go index 1cbf78ab2..af274b245 100644 --- a/pkg/local_object_storage/metabase/get.go +++ b/pkg/local_object_storage/metabase/get.go @@ -187,7 +187,7 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD err = child.Unmarshal(bytes.Clone(data)) if err != nil { - return nil, fmt.Errorf("can't unmarshal child with parent: %w", err) + return nil, fmt.Errorf("unmarshal child with parent: %w", err) } par := child.Parent() diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go index b0db952b2..2f23d424c 100644 --- a/pkg/local_object_storage/metabase/graveyard.go +++ b/pkg/local_object_storage/metabase/graveyard.go @@ -177,7 +177,7 @@ type gcHandler struct { func (g gcHandler) handleKV(k, _ []byte) error { o, err := garbageFromKV(k) if err != nil { - return fmt.Errorf("could not parse garbage object: %w", err) + return fmt.Errorf("parse garbage object: %w", err) } return g.h(o) @@ -190,7 +190,7 @@ type graveyardHandler struct { func (g graveyardHandler) handleKV(k, v []byte) error { o, err := graveFromKV(k, v) if err != nil { - return fmt.Errorf("could not parse grave: %w", err) + return fmt.Errorf("parse grave: %w", err) } return g.h(o) @@ -240,7 +240,7 @@ func (db *DB) iterateDeletedObj(tx *bbolt.Tx, h kvHandler, offset *oid.Address) func garbageFromKV(k []byte) (res GarbageObject, err error) { err = decodeAddressFromKey(&res.addr, k) if err != nil { - err = fmt.Errorf("could not parse address: %w", err) + err = fmt.Errorf("parse address: %w", err) } return diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index 5ac0c0be5..99fdec310 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -373,7 +373,7 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck if data != nil { err := targetBucket.Delete(tombKey) if err != nil { - return nil, nil, fmt.Errorf("could not remove grave with tombstone key: %w", err) + return nil, nil, fmt.Errorf("remove grave with tombstone key: %w", err) } } diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go index ce6ae1004..7edb96384 100644 --- a/pkg/local_object_storage/metabase/mode.go +++ b/pkg/local_object_storage/metabase/mode.go @@ -19,7 +19,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error { if !db.mode.NoMetabase() { if err := db.Close(ctx); err != nil { - return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err) + return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err) } } @@ -31,7 +31,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error { err = db.Init(ctx) } if err != nil { - return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err) + return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err) } } diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index 6f9dc1bf0..16918c4d9 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -180,18 +180,18 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o err := putUniqueIndexes(tx, obj, si, id) if err != nil { - return fmt.Errorf("can't put unique indexes: %w", err) + return fmt.Errorf("put unique indexes: %w", err) } err = updateListIndexes(tx, obj, putListIndexItem) if err != nil { - return fmt.Errorf("can't put list indexes: %w", err) + return fmt.Errorf("put list indexes: %w", err) } if indexAttributes { err = updateFKBTIndexes(tx, obj, putFKBTIndexItem) if err != nil { - return fmt.Errorf("can't put fake bucket tree indexes: %w", err) + return fmt.Errorf("put fake bucket tree indexes: %w", err) } } @@ -250,7 +250,7 @@ func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, ad } rawObject, err := obj.CutPayload().Marshal() if err != nil { - return fmt.Errorf("can't marshal object header: %w", err) + return fmt.Errorf("marshal object header: %w", err) } return putUniqueIndexItem(tx, namedBucketItem{ name: bucketName, @@ -475,7 +475,7 @@ func createBucketLikelyExists[T bucketContainer](tx T, name []byte) (*bbolt.Buck func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error { bkt, err := createBucketLikelyExists(tx, item.name) if err != nil { - return fmt.Errorf("can't create index %v: %w", item.name, err) + return fmt.Errorf("create index %v: %w", item.name, err) } data, err := update(bkt.Get(item.key), item.val) @@ -492,12 +492,12 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error { func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { bkt, err := createBucketLikelyExists(tx, item.name) if err != nil { - return fmt.Errorf("can't create index %v: %w", item.name, err) + return fmt.Errorf("create index %v: %w", item.name, err) } fkbtRoot, err := createBucketLikelyExists(bkt, item.key) if err != nil { - return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err) + return fmt.Errorf("create fake bucket tree index %v: %w", item.key, err) } return fkbtRoot.Put(item.val, zeroValue) @@ -506,19 +506,19 @@ func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { bkt, err := createBucketLikelyExists(tx, item.name) if err != nil { - return fmt.Errorf("can't create index %v: %w", item.name, err) + return fmt.Errorf("create index %v: %w", item.name, err) } lst, err := decodeList(bkt.Get(item.key)) if err != nil { - return fmt.Errorf("can't decode leaf list %v: %w", item.key, err) + return fmt.Errorf("decode leaf list %v: %w", item.key, err) } lst = append(lst, item.val) encodedLst, err := encodeList(lst) if err != nil { - return fmt.Errorf("can't encode leaf list %v: %w", item.key, err) + return fmt.Errorf("encode leaf list %v: %w", item.key, err) } return bkt.Put(item.key, encodedLst) diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index f802036be..9f1b8b060 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -565,7 +565,7 @@ func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filt case v2object.FilterHeaderContainerID: // support deprecated field err := res.cnr.DecodeString(filters[i].Value()) if err != nil { - return filterGroup{}, fmt.Errorf("can't parse container id: %w", err) + return filterGroup{}, fmt.Errorf("parse container id: %w", err) } res.withCnrFilter = true diff --git a/pkg/local_object_storage/metabase/shard_id.go b/pkg/local_object_storage/metabase/shard_id.go index e58115bc8..72618b1a0 100644 --- a/pkg/local_object_storage/metabase/shard_id.go +++ b/pkg/local_object_storage/metabase/shard_id.go @@ -32,13 +32,13 @@ func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error } if err := db.openDB(ctx, mode); err != nil { - return nil, fmt.Errorf("failed to open metabase: %w", err) + return nil, fmt.Errorf("open metabase: %w", err) } id, err := db.readShardID() if cErr := db.close(); cErr != nil { - err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr)) + err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr)) } return id, metaerr.Wrap(err) @@ -70,7 +70,7 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err } if err := db.openDB(ctx, mode); err != nil { - return fmt.Errorf("failed to open metabase: %w", err) + return fmt.Errorf("open metabase: %w", err) } err := db.writeShardID(id) @@ -79,7 +79,7 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err } if cErr := db.close(); cErr != nil { - err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr)) + err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr)) } return metaerr.Wrap(err) diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go index bcf72f440..6eba58c69 100644 --- a/pkg/local_object_storage/metabase/upgrade.go +++ b/pkg/local_object_storage/metabase/upgrade.go @@ -95,7 +95,7 @@ func compactDB(db *bbolt.DB) error { NoSync: true, }) if err != nil { - return fmt.Errorf("can't open new metabase to compact: %w", err) + return fmt.Errorf("open new metabase to compact: %w", err) } if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil { return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName))) @@ -292,7 +292,7 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i } expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64) if err != nil { - return fmt.Errorf("could not parse expiration epoch: %w", err) + return fmt.Errorf("parse expiration epoch: %w", err) } expirationEpochBucket := b.Bucket(attrValue) attrKeyValueC := expirationEpochBucket.Cursor() @@ -399,7 +399,7 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([] for _, key := range keys { attr, ok := attributeFromAttributeBucket(key) if !ok { - return nil, fmt.Errorf("failed to parse attribute key from user attribute bucket key %s", hex.EncodeToString(key)) + return nil, fmt.Errorf("parse attribute key from user attribute bucket key %s", hex.EncodeToString(key)) } if !IsAtrributeIndexed(attr) { keysToDrop = append(keysToDrop, key) @@ -407,7 +407,7 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([] } contID, ok := cidFromAttributeBucket(key) if !ok { - return nil, fmt.Errorf("failed to parse container ID from user attribute bucket key %s", hex.EncodeToString(key)) + return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key)) } info, err := cs.Info(contID) if err != nil { diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go index 0a2f91a47..80851f1c4 100644 --- a/pkg/local_object_storage/metabase/util.go +++ b/pkg/local_object_storage/metabase/util.go @@ -231,11 +231,11 @@ func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) { epoch := binary.BigEndian.Uint64(key) var cnr cid.ID if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil { - return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (container ID): %w", err) + return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (container ID): %w", err) } var obj oid.ID if err := obj.Decode(key[epochSize+cidSize:]); err != nil { - return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (object ID): %w", err) + return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (object ID): %w", err) } return epoch, cnr, obj, nil } diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go index 048bb9af6..fbc0f1ad9 100644 --- a/pkg/local_object_storage/metabase/version.go +++ b/pkg/local_object_storage/metabase/version.go @@ -67,7 +67,7 @@ func updateVersion(tx *bbolt.Tx, version uint64) error { b, err := tx.CreateBucketIfNotExists(shardInfoBucket) if err != nil { - return fmt.Errorf("can't create auxiliary bucket: %w", err) + return fmt.Errorf("create auxiliary bucket: %w", err) } return b.Put(versionKey, data) } diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 6e68e9986..fecf96f66 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -106,7 +106,7 @@ func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error { } } if err != nil { - return fmt.Errorf("can't set pilorama mode (old=%s, new=%s): %w", t.mode, m, err) + return fmt.Errorf("set pilorama mode (old=%s, new=%s): %w", t.mode, m, err) } t.mode = m @@ -128,7 +128,7 @@ func (t *boltForest) openBolt(m mode.Mode) error { readOnly := m.ReadOnly() err := util.MkdirAllX(filepath.Dir(t.path), t.perm) if err != nil { - return metaerr.Wrap(fmt.Errorf("can't create dir %s for the pilorama: %w", t.path, err)) + return metaerr.Wrap(fmt.Errorf("create dir %s for the pilorama: %w", t.path, err)) } opts := *bbolt.DefaultOptions @@ -139,7 +139,7 @@ func (t *boltForest) openBolt(m mode.Mode) error { t.db, err = bbolt.Open(t.path, t.perm, &opts) if err != nil { - return metaerr.Wrap(fmt.Errorf("can't open the pilorama DB: %w", err)) + return metaerr.Wrap(fmt.Errorf("open the pilorama DB: %w", err)) } t.db.MaxBatchSize = t.maxBatchSize @@ -1360,7 +1360,7 @@ func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, err return nil }) if err != nil { - return nil, metaerr.Wrap(fmt.Errorf("could not list trees: %w", err)) + return nil, metaerr.Wrap(fmt.Errorf("list trees: %w", err)) } success = true return ids, nil @@ -1504,7 +1504,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (* var contID cidSDK.ID if err := contID.Decode(k[:32]); err != nil { - return fmt.Errorf("failed to decode containerID: %w", err) + return fmt.Errorf("decode containerID: %w", err) } res.Items = append(res.Items, ContainerIDTreeID{ CID: contID, diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go index 364649b50..0309f0c81 100644 --- a/pkg/local_object_storage/shard/container.go +++ b/pkg/local_object_storage/shard/container.go @@ -36,7 +36,7 @@ func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { size, err := s.metaBase.ContainerSize(prm.cnr) if err != nil { - return ContainerSizeRes{}, fmt.Errorf("could not get container size: %w", err) + return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err) } return ContainerSizeRes{ @@ -71,7 +71,7 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID) if err != nil { - return ContainerCountRes{}, fmt.Errorf("could not get container counters: %w", err) + return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err) } return ContainerCountRes{ diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 78ce241fe..1c1933af5 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -38,7 +38,7 @@ func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err err err = s.SetMode(ctx, mode.DegradedReadOnly) if err != nil { - return fmt.Errorf("could not switch to mode %s", mode.Mode(mode.DegradedReadOnly)) + return fmt.Errorf("switch to mode %s", mode.Mode(mode.DegradedReadOnly)) } return nil } @@ -72,7 +72,7 @@ func (s *Shard) Open(ctx context.Context) error { for j := i + 1; j < len(components); j++ { if err := components[j].Open(ctx, m); err != nil { // Other components must be opened, fail. - return fmt.Errorf("could not open %T: %w", components[j], err) + return fmt.Errorf("open %T: %w", components[j], err) } } err = s.handleMetabaseFailure(ctx, "open", err) @@ -83,7 +83,7 @@ func (s *Shard) Open(ctx context.Context) error { break } - return fmt.Errorf("could not open %T: %w", component, err) + return fmt.Errorf("open %T: %w", component, err) } } return nil @@ -184,7 +184,7 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error { break } - return fmt.Errorf("could not initialize %T: %w", component, err) + return fmt.Errorf("initialize %T: %w", component, err) } } return nil @@ -205,7 +205,7 @@ func (s *Shard) refillMetabase(ctx context.Context) error { err := s.metaBase.Reset() if err != nil { - return fmt.Errorf("could not reset metabase: %w", err) + return fmt.Errorf("reset metabase: %w", err) } withCount := true @@ -254,12 +254,12 @@ func (s *Shard) refillMetabase(ctx context.Context) error { err = errors.Join(egErr, itErr) if err != nil { - return fmt.Errorf("could not put objects to the meta: %w", err) + return fmt.Errorf("put objects to the meta: %w", err) } err = s.metaBase.SyncCounters() if err != nil { - return fmt.Errorf("could not sync object counters: %w", err) + return fmt.Errorf("sync object counters: %w", err) } success = true @@ -318,7 +318,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error { var lock objectSDK.Lock if err := lock.Unmarshal(obj.Payload()); err != nil { - return fmt.Errorf("could not unmarshal lock content: %w", err) + return fmt.Errorf("unmarshal lock content: %w", err) } locked := make([]oid.ID, lock.NumberOfMembers()) @@ -328,7 +328,7 @@ func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) err id, _ := obj.ID() err := s.metaBase.Lock(ctx, cnr, id, locked) if err != nil { - return fmt.Errorf("could not lock objects: %w", err) + return fmt.Errorf("lock objects: %w", err) } return nil } @@ -337,7 +337,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object tombstone := objectSDK.NewTombstone() if err := tombstone.Unmarshal(obj.Payload()); err != nil { - return fmt.Errorf("could not unmarshal tombstone content: %w", err) + return fmt.Errorf("unmarshal tombstone content: %w", err) } tombAddr := object.AddressOf(obj) @@ -358,7 +358,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object _, err := s.metaBase.Inhume(ctx, inhumePrm) if err != nil { - return fmt.Errorf("could not inhume objects: %w", err) + return fmt.Errorf("inhume objects: %w", err) } return nil } diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go index 7a31a705e..15d1eb6ba 100644 --- a/pkg/local_object_storage/shard/get.go +++ b/pkg/local_object_storage/shard/get.go @@ -175,7 +175,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta mExRes, err := s.metaBase.StorageID(ctx, mPrm) if err != nil { - return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err) + return nil, true, fmt.Errorf("fetch blobovnicza id from metabase: %w", err) } storageID := mExRes.StorageID() diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go index 6ccae3f53..26492cf01 100644 --- a/pkg/local_object_storage/shard/id.go +++ b/pkg/local_object_storage/shard/id.go @@ -36,7 +36,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { modeDegraded := s.GetMode().NoMetabase() if !modeDegraded { if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil { - err = fmt.Errorf("failed to read shard id from metabase: %w", err) + err = fmt.Errorf("read shard id from metabase: %w", err) } } @@ -64,7 +64,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { if len(idFromMetabase) == 0 && !modeDegraded { if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil { - err = errors.Join(err, fmt.Errorf("failed to write shard id to metabase: %w", setErr)) + err = errors.Join(err, fmt.Errorf("write shard id to metabase: %w", setErr)) } } return diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index f583ef5d9..c5275dafd 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -109,7 +109,7 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { lst, err := s.metaBase.Containers(ctx) if err != nil { - return res, fmt.Errorf("can't list stored containers: %w", err) + return res, fmt.Errorf("list stored containers: %w", err) } filters := objectSDK.NewSearchFilters() @@ -149,7 +149,7 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo containers, err := s.metaBase.Containers(ctx) if err != nil { - return ListContainersRes{}, fmt.Errorf("could not get list of containers: %w", err) + return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err) } return ListContainersRes{ @@ -180,7 +180,7 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List metaPrm.SetCursor(prm.cursor) res, err := s.metaBase.ListWithCursor(ctx, metaPrm) if err != nil { - return ListWithCursorRes{}, fmt.Errorf("could not get list of objects: %w", err) + return ListWithCursorRes{}, fmt.Errorf("get list of objects: %w", err) } return ListWithCursorRes{ @@ -208,7 +208,7 @@ func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContai metaPrm.Handler = prm.Handler err := s.metaBase.IterateOverContainers(ctx, metaPrm) if err != nil { - return fmt.Errorf("could not iterate over containers: %w", err) + return fmt.Errorf("iterate over containers: %w", err) } return nil @@ -235,7 +235,7 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv metaPrm.Handler = prm.Handler err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) if err != nil { - return fmt.Errorf("could not iterate over objects: %w", err) + return fmt.Errorf("iterate over objects: %w", err) } return nil @@ -258,7 +258,7 @@ func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAlive metaPrm.ContainerID = prm.ContainerID count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm) if err != nil { - return 0, fmt.Errorf("could not count alive objects in bucket: %w", err) + return 0, fmt.Errorf("count alive objects in bucket: %w", err) } return count, nil diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index 1e4643db5..3f23111af 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -81,7 +81,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { res, err = s.blobStor.Put(ctx, putPrm) if err != nil { - return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err) + return PutRes{}, fmt.Errorf("put object to BLOB storage: %w", err) } } @@ -94,7 +94,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { if err != nil { // may we need to handle this case in a special way // since the object has been successfully written to BlobStor - return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err) + return PutRes{}, fmt.Errorf("put object to metabase: %w", err) } if res.Inserted { diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go index 184ca9b71..c7c7e11c2 100644 --- a/pkg/local_object_storage/shard/select.go +++ b/pkg/local_object_storage/shard/select.go @@ -67,7 +67,7 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) { mRes, err := s.metaBase.Select(ctx, selectPrm) if err != nil { - return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err) + return SelectRes{}, fmt.Errorf("select objects from metabase: %w", err) } return SelectRes{ diff --git a/pkg/local_object_storage/writecache/iterate.go b/pkg/local_object_storage/writecache/iterate.go index 9ec039f91..e369fbd50 100644 --- a/pkg/local_object_storage/writecache/iterate.go +++ b/pkg/local_object_storage/writecache/iterate.go @@ -30,7 +30,7 @@ func IterateDB(db *bbolt.DB, f func(oid.Address) error) error { return b.ForEach(func(k, _ []byte) error { err := addr.DecodeString(string(k)) if err != nil { - return fmt.Errorf("could not parse object address: %w", err) + return fmt.Errorf("parse object address: %w", err) } return f(addr) diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go index 73d12fd33..c491be60b 100644 --- a/pkg/local_object_storage/writecache/mode.go +++ b/pkg/local_object_storage/writecache/mode.go @@ -83,7 +83,7 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error { } if !shrink { if err := c.fsTree.Close(ctx); err != nil { - return fmt.Errorf("can't close write-cache storage: %w", err) + return fmt.Errorf("close write-cache storage: %w", err) } return nil } @@ -98,16 +98,16 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error { if errors.Is(err, errIterationCompleted) { empty = false } else { - return fmt.Errorf("failed to check write-cache items: %w", err) + return fmt.Errorf("check write-cache items: %w", err) } } if err := c.fsTree.Close(ctx); err != nil { - return fmt.Errorf("can't close write-cache storage: %w", err) + return fmt.Errorf("close write-cache storage: %w", err) } if empty { err := os.RemoveAll(c.path) if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("failed to remove write-cache files: %w", err) + return fmt.Errorf("remove write-cache files: %w", err) } } else { c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty) diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go index a0e236cb7..e88566cdf 100644 --- a/pkg/local_object_storage/writecache/storage.go +++ b/pkg/local_object_storage/writecache/storage.go @@ -31,10 +31,10 @@ func (c *cache) openStore(mod mode.ComponentMode) error { fstree.WithFileCounter(c.counter), ) if err := c.fsTree.Open(mod); err != nil { - return fmt.Errorf("could not open FSTree: %w", err) + return fmt.Errorf("open FSTree: %w", err) } if err := c.fsTree.Init(); err != nil { - return fmt.Errorf("could not init FSTree: %w", err) + return fmt.Errorf("init FSTree: %w", err) } return nil diff --git a/pkg/local_object_storage/writecache/upgrade.go b/pkg/local_object_storage/writecache/upgrade.go index 3a100f1a3..5eb341ba4 100644 --- a/pkg/local_object_storage/writecache/upgrade.go +++ b/pkg/local_object_storage/writecache/upgrade.go @@ -25,11 +25,11 @@ func (c *cache) flushAndDropBBoltDB(ctx context.Context) error { return nil } if err != nil { - return fmt.Errorf("could not check write-cache database existence: %w", err) + return fmt.Errorf("check write-cache database existence: %w", err) } db, err := OpenDB(c.path, true, os.OpenFile) if err != nil { - return fmt.Errorf("could not open write-cache database: %w", err) + return fmt.Errorf("open write-cache database: %w", err) } defer func() { _ = db.Close()