Compare commits

..

37 commits

Author SHA1 Message Date
d7fcc5ce30 [#1586] objsvc: Allow to send search response in multiple messages
Previously, `ln` was only set once, so search has really worked for
small number of objects.

Fix panic:
```
panic: runtime error: slice bounds out of range [:43690] with capacity 21238
goroutine 6859775 [running]:
git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object.(*searchStreamMsgSizeCtrl).Send(0xc001eec8d0, 0xc005734000)
        git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/transport_splitter.go:173 +0x1f0
git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search/v2.(*streamWriter).WriteIDs(0xc000520320, {0xc00eb1a000, 0x4fd9c, 0x7fd6475a9a68?})
        git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search/v2/streamer.go:28 +0x155
git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search.(*uniqueIDWriter).WriteIDs(0xc001386420, {0xc00eb1a000?, 0xc0013ea9c0?, 0x113eef3?})
        git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search/util.go:62 +0x202
git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search.(*execCtx).writeIDList(0xc00011aa38?, {0xc00eb1a000?, 0xc001eec9f0?, 0xc0008f4380?})
        git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search/exec.go:68 +0x91
git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search.(*execCtx).executeLocal(0xc0008f4380, {0x176c538, 0xc001eec9f0})
        git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search/local.go:18 +0x16b
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-28 12:29:22 +00:00
c0221d76e6 [#1577] node/container: Fix typo
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-12-28 12:05:01 +03:00
242f0095d0 [#1577] container: Reduce iterations through container list
* Separated iteration through container ids from `ContainersOf()`
  so that it could be reused.
* When listing containers we used to iterate through the
  the whole list of containers twice: first when reading from
  a contract, then when sending them. Now we can send batches
  of containers when reading from the contract.

Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-12-27 15:30:26 +03:00
6fe34d266a [#1577] morph: Fix typo
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-12-27 14:03:19 +03:00
fa08bfa553
[#1583] metabase/test: Update TestLisObjectsWithCursor
Update this test following recent changes to ensure
that `(*DB).ListWithCursor` skips expired objects.

Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
2024-12-26 14:39:50 +03:00
0da998ef50
[#1583] metabase: Skip expired objects in ListWithCursor
Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
2024-12-26 14:39:49 +03:00
e44782473a [#1512] object: Fix writePart for EC-container
* Immediatly return after `ObjectAlreadyRemoved` error.

Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-12-26 11:27:55 +00:00
9cd1bcef06 [#1512] object: Make raw PutSingle check status within response
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-12-26 11:27:55 +00:00
ca0a33ea0f [#465] objsvc: Set NETMAP_EPOCH xheader for auxiliary requests
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-26 09:17:58 +00:00
f6c5222952 [#1581] services/session: Use user.ID.EncodeToString() where possible
gopatch:
```
@@
var id expression
@@
-base58.Encode(id.WalletBytes())
+id.EncodeToString()
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-25 18:09:36 +00:00
ea868e09f8
[#1582] adm: Use int64 type and the default value for --till flag
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-25 14:22:28 +03:00
31d3d299bf
[#1582] adm: Unify promps for reading a password
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-25 14:22:28 +03:00
b5b4f78b49
[#1582] adm: Allow using the default account in deposit-notary
It has never worked, actually.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-25 14:22:28 +03:00
2832f44437 [#1531] metrics: Rename app_info metric
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-12-23 10:40:18 +00:00
7c3bcb0f44
[#1578] Makefile: Refill GAS with a single command in env-up
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-23 11:17:22 +03:00
e64871c3fd
[#1578] adm: Allow to transfer GAS to multiple recepients
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-23 11:17:22 +03:00
303cd35a01
[#1578] adm: Remove unnecessary comments in RefillGasCmd
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-23 11:17:22 +03:00
bb9ba1bce2
[#1578] adm: Remove bool flag from refillGas()
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-23 11:17:22 +03:00
db03742d33
[#1578] adm: Reword help message for morph refill-gas
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-23 11:17:22 +03:00
148d68933b [#1573] node: Simplify bootstrapWithState()
After #1382 we have no need to use lambdas.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-20 08:17:05 +00:00
51ee132ea3
[#1342] network/cache: Add node address to error multiClient
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
2024-12-18 19:27:35 +03:00
226dd25dd0 [#1568] pilorama: Replace "containerID" with "container ID" in the error message
It is "container ID" in every other place.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-18 15:52:26 +00:00
bd0197eaa8 [#1568] storage: Remove "could not/can't/failed to" from error messages
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-18 15:52:26 +00:00
e44b84c18c
[#1569] cli: Remove unnecessary variable after refactoring
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-18 10:17:04 +03:00
bed49e6ace
[#1569] cli: Make --range flag required in object hash
Previously, `object head` was used if no range was provided.
This is wrong on multiple levels:
1. We print an error if the checksum is missing in header,
   even though taking hash is possible.
2. We silently ignore --salt parameter.
3. `--range` is required for Object.RANGEHASH RPC, custom logic for one
   specific usecase has no value.

So we make it required and make CLI command follow more closely
the FrostFS API.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-18 10:17:04 +03:00
df05057ed4 [#1452] container: Add ListStream method
* Added new method for listing containers to container service.
  It opens stream and sends containers in batches.

* Added TransportSplitter wrapper around ExecutionService to
  split container ID list read from contract in parts that are
  smaller than grpc max message size. Batch size can be changed
  in node configuration file (as in example config file).

* Changed `container list` implementaion in cli: now ListStream
  is called by default. Old List is called only if ListStream
  is not implemented.

* Changed `internalclient.ListContainersPrm`.`Account` to
  `OwnerID` since `client.PrmContainerList`.`Account` was
  renamed to `OwnerID` in sdk.

Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-12-17 16:22:43 +03:00
b6c8ebf493 [#1453] container: Replace sort.Slice with slices.SortFunc
* Replaced `sort.Slice` with `slices.SortFunc` in
  `ListContainersRes.SortedIDList()` as it is a bit faster,
  according to 15102e6dfd.

Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-12-17 13:33:43 +03:00
6e82661c35 [#1563] tree: Wrap only ChainRouterError erros with ObjectAccessDenied
* Such wrapping helps to differentiate logical check errors and server internal
  errors.

Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-12-16 15:16:07 +03:00
1a091ea7bb [#1563] object: Wrap only ChainRouterError erros with ObjectAccessDenied
* Such wrapping helps to differentiate logical check errors and server internal
  errors.

Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-12-16 15:15:25 +03:00
7ac3542714 [#1563] ape: Introduce ChainRouterError error type
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-12-16 15:12:30 +03:00
f0c43c8d80
[#1502] Use zap.Error for logging errors
Use `zap.Error` instead of `zap.String` for logging errors: change all expressions like
`zap.String("error", err.Error())` or `zap.String("err", err.Error())` to `zap.Error(err)`.
Leave similar expressions with other messages unchanged, for example,
`zap.String("last_error", lastErr.Error())` or `zap.String("reason", ctx.Err().Error())`.

This change was made by applying the following patch:
```diff
@@
var err expression
@@
-zap.String("error", err.Error())
+zap.Error(err)

@@
var err expression
@@
-zap.String("err", err.Error())
+zap.Error(err)
```

Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
2024-12-16 11:13:42 +03:00
8ba9f31fca
[#1510] metabase/test: Fix BenchmarkListWithCursor
- Fix misplaced `(*DB).Close` (broken after 47dcfa20f3)
- Use `errors.Is` for error checking (broken after fcdbf5e509)

Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
2024-12-13 13:19:15 +03:00
2af3409d39
[#1510] metabase/test: Fix BenchmarkGet
Fix misplaced `(*DB).Close` (broken after 47dcfa20f3)

Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
2024-12-13 13:18:43 +03:00
d165ac042c
[#1558] morph/client: Reuse notary rpcclient wrapper
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-12 15:30:12 +03:00
7151c71d51
[#1558] morph/client: Remove "could not"/"can't"/"failed to" from error messages
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-12 15:30:12 +03:00
91d9dc2676
[#1558] morph/event: Remove "could not" from error messages
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-12 15:30:12 +03:00
7853dbc315 [#1557] morph/event: Remove embedded structs from scriptHashWithValue
Also, make them public, because otherwise `unused` linter complains.
```
pkg/morph/event/utils.go:25:2  unused  field `typ` is unused
```
This complain is wrong, though: we _use_ `typ` field because the whole
struct is used as a map key.

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-12-12 11:55:09 +00:00
148 changed files with 1049 additions and 596 deletions

View file

@ -270,10 +270,12 @@ env-up: all
echo "Frostfs contracts not found"; exit 1; \
fi
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH}
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0
${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \
--storage-wallet ./dev/storage/wallet01.json \
--storage-wallet ./dev/storage/wallet02.json \
--storage-wallet ./dev/storage/wallet03.json \
--storage-wallet ./dev/storage/wallet04.json
@if [ ! -f "$(LOCODE_DB_PATH)" ]; then \
make locode-download; \
fi

View file

@ -12,7 +12,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/gas"
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
@ -141,60 +140,29 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key
}
func generateStorageCreds(cmd *cobra.Command, _ []string) error {
return refillGas(cmd, storageGasConfigFlag, true)
}
func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) {
// storage wallet path is not part of the config
storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
// wallet address is not part of the config
walletAddress, _ := cmd.Flags().GetString(walletAddressFlag)
var gasReceiver util.Uint160
if len(walletAddress) != 0 {
gasReceiver, err = address.StringToUint160(walletAddress)
if err != nil {
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
}
} else {
if storageWalletPath == "" {
return fmt.Errorf("missing wallet path (use '--%s <out.json>')", commonflags.StorageWalletFlag)
}
var w *wallet.Wallet
if createWallet {
w, err = wallet.NewWallet(storageWalletPath)
} else {
w, err = wallet.NewWalletFromFile(storageWalletPath)
}
if err != nil {
return fmt.Errorf("can't create wallet: %w", err)
}
if createWallet {
var password string
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
password, err := config.GetStoragePassword(viper.GetViper(), label)
if err != nil {
return fmt.Errorf("can't fetch password: %w", err)
}
if label == "" {
label = constants.SingleAccountName
}
if err := w.CreateAccount(label, password); err != nil {
return fmt.Errorf("can't create account: %w", err)
}
}
gasReceiver = w.Accounts[0].Contract.ScriptHash()
walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag)
w, err := wallet.NewWallet(walletPath)
if err != nil {
return fmt.Errorf("create wallet: %w", err)
}
label, _ := cmd.Flags().GetString(storageWalletLabelFlag)
password, err := config.GetStoragePassword(viper.GetViper(), label)
if err != nil {
return fmt.Errorf("can't fetch password: %w", err)
}
if label == "" {
label = constants.SingleAccountName
}
if err := w.CreateAccount(label, password); err != nil {
return fmt.Errorf("can't create account: %w", err)
}
return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash())
}
func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) {
gasStr := viper.GetString(gasFlag)
gasAmount, err := helper.ParseGASAmount(gasStr)
@ -208,9 +176,11 @@ func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error
}
bw := io.NewBufBinWriter()
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
for _, gasReceiver := range gasReceivers {
emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All,
wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil)
emit.Opcodes(bw.BinWriter, opcode.ASSERT)
}
if bw.Err != nil {
return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err)
}

View file

@ -1,7 +1,12 @@
package generate
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
@ -33,7 +38,27 @@ var (
_ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag))
},
RunE: func(cmd *cobra.Command, _ []string) error {
return refillGas(cmd, commonflags.RefillGasAmountFlag, false)
storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag)
walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag)
var gasReceivers []util.Uint160
for _, walletAddress := range walletAddresses {
addr, err := address.StringToUint160(walletAddress)
if err != nil {
return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err)
}
gasReceivers = append(gasReceivers, addr)
}
for _, storageWalletPath := range storageWalletPaths {
w, err := wallet.NewWalletFromFile(storageWalletPath)
if err != nil {
return fmt.Errorf("can't create wallet: %w", err)
}
gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash())
}
return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...)
},
}
GenerateAlphabetCmd = &cobra.Command{
@ -50,10 +75,10 @@ var (
func initRefillGasCmd() {
RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet")
RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet")
RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet")
RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer")
RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag)
RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag)
}
func initGenerateStorageCmd() {

View file

@ -4,7 +4,6 @@ import (
"errors"
"fmt"
"math/big"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
@ -41,7 +40,8 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
}
accHash := w.GetChangeAddress()
if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil {
addr, _ := cmd.Flags().GetString(walletAccountFlag)
if addr != "" {
accHash, err = address.StringToUint160(addr)
if err != nil {
return fmt.Errorf("invalid address: %s", addr)
@ -53,7 +53,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("can't find account for %s", accHash)
}
prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash))
prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash))
pass, err := input.ReadPassword(prompt)
if err != nil {
return fmt.Errorf("can't get password: %v", err)
@ -73,16 +73,9 @@ func depositNotary(cmd *cobra.Command, _ []string) error {
return err
}
till := int64(defaultNotaryDepositLifetime)
tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag)
if err != nil {
return err
}
if tillStr != "" {
till, err = strconv.ParseInt(tillStr, 10, 64)
if err != nil || till <= 0 {
return errInvalidNotaryDepositLifetime
}
till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag)
if till <= 0 {
return errInvalidNotaryDepositLifetime
}
return transferGas(cmd, acc, accHash, gasAmount, till)

View file

@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() {
DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet")
DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address")
DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit")
DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks")
DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks")
}
func init() {

View file

@ -105,7 +105,7 @@ func storageConfig(cmd *cobra.Command, args []string) {
fatalOnErr(errors.New("can't find account in wallet"))
}
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account))
c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account))
fatalOnErr(err)
err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams())

View file

@ -9,7 +9,6 @@ import (
"io"
"os"
"slices"
"sort"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting"
@ -78,13 +77,31 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain
// SortedIDList returns sorted list of identifiers of user's containers.
func (x ListContainersRes) SortedIDList() []cid.ID {
list := x.cliRes.Containers()
sort.Slice(list, func(i, j int) bool {
lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString()
return strings.Compare(lhs, rhs) < 0
slices.SortFunc(list, func(lhs, rhs cid.ID) int {
return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString())
})
return list
}
func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) {
cliPrm := &client.PrmContainerListStream{
XHeaders: prm.XHeaders,
OwnerID: prm.OwnerID,
Session: prm.Session,
}
rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm)
if err != nil {
return fmt.Errorf("init container list: %w", err)
}
err = rdr.Iterate(processCnr)
if err != nil {
return fmt.Errorf("read container list: %w", err)
}
return
}
// PutContainerPrm groups parameters of PutContainer operation.
type PutContainerPrm struct {
Client *client.Client

View file

@ -6,8 +6,11 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/spf13/cobra"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// flags of list command.
@ -51,44 +54,60 @@ var listContainersCmd = &cobra.Command{
var prm internalclient.ListContainersPrm
prm.SetClient(cli)
prm.Account = idUser
res, err := internalclient.ListContainers(cmd.Context(), prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
prm.OwnerID = idUser
prmGet := internalclient.GetContainerPrm{
Client: cli,
}
var containerIDs []cid.ID
err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool {
printContainer(cmd, prmGet, id)
return false
})
if err == nil {
return
}
if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented {
res, err := internalclient.ListContainers(cmd.Context(), prm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
containerIDs = res.SortedIDList()
} else {
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
}
containerIDs := res.SortedIDList()
for _, cnrID := range containerIDs {
if flagVarListName == "" && !flagVarListPrintAttr {
cmd.Println(cnrID.String())
continue
}
prmGet.ClientParams.ContainerID = &cnrID
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
if err != nil {
cmd.Printf(" failed to read attributes: %v\n", err)
continue
}
cnr := res.Container()
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
continue
}
cmd.Println(cnrID.String())
if flagVarListPrintAttr {
cnr.IterateUserAttributes(func(key, val string) {
cmd.Printf(" %s: %s\n", key, val)
})
}
printContainer(cmd, prmGet, cnrID)
}
},
}
func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) {
if flagVarListName == "" && !flagVarListPrintAttr {
cmd.Println(id.String())
return
}
prmGet.ClientParams.ContainerID = &id
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
if err != nil {
cmd.Printf(" failed to read attributes: %v\n", err)
return
}
cnr := res.Container()
if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName {
return
}
cmd.Println(id.String())
if flagVarListPrintAttr {
cnr.IterateUserAttributes(func(key, val string) {
cmd.Printf(" %s: %s\n", key, val)
})
}
}
func initContainerListContainersCmd() {
commonflags.Init(listContainersCmd)

View file

@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/spf13/cobra"
@ -43,6 +42,8 @@ func initObjectHashCmd() {
_ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.String("range", "", "Range to take hash from in the form offset1:length1,...")
_ = objectHashCmd.MarkFlagRequired("range")
flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'")
flags.String(getRangeHashSaltFlag, "", "Salt in hex format")
}
@ -66,36 +67,6 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
tz := typ == hashTz
fullHash := len(ranges) == 0
if fullHash {
var headPrm internalclient.HeadObjectPrm
headPrm.SetClient(cli)
Prepare(cmd, &headPrm)
headPrm.SetAddress(objAddr)
// get hash of full payload through HEAD (may be user can do it through dedicated command?)
res, err := internalclient.HeadObject(cmd.Context(), headPrm)
commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
var cs checksum.Checksum
var csSet bool
if tz {
cs, csSet = res.Header().PayloadHomomorphicHash()
} else {
cs, csSet = res.Header().PayloadChecksum()
}
if csSet {
cmd.Println(hex.EncodeToString(cs.Value()))
} else {
cmd.Println("Missing checksum in object header.")
}
return
}
var hashPrm internalclient.HashPayloadRangesPrm
hashPrm.SetClient(cli)
Prepare(cmd, &hashPrm)
@ -104,7 +75,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) {
hashPrm.SetSalt(salt)
hashPrm.SetRanges(ranges)
if tz {
if typ == hashTz {
hashPrm.TZ()
}

View file

@ -77,7 +77,7 @@ func (c *httpComponent) reload(ctx context.Context) {
log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()),
zap.Error(err),
)
} else {
c.init(ctx)

View file

@ -119,12 +119,12 @@ func shutdown(ctx context.Context) {
innerRing.Stop(ctx)
if err := metricsCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()),
zap.Error(err),
)
}
if err := pprofCmp.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()),
zap.Error(err),
)
}

View file

@ -58,7 +58,7 @@ func (c *pprofComponent) reload(ctx context.Context) {
log.Info(ctx, c.name+" config updated")
if err := c.shutdown(ctx); err != nil {
log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer,
zap.String("error", err.Error()))
zap.Error(err))
return
}

View file

@ -606,9 +606,10 @@ type cfgAccounting struct {
type cfgContainer struct {
scriptHash neogoutil.Uint160
parsers map[event.Type]event.NotificationParser
subscribers map[event.Type][]event.Handler
workerPool util.WorkerPool // pool for asynchronous handlers
parsers map[event.Type]event.NotificationParser
subscribers map[event.Type][]event.Handler
workerPool util.WorkerPool // pool for asynchronous handlers
containerBatchSize uint32
}
type cfgFrostfsID struct {
@ -1119,7 +1120,7 @@ func initLocalStorage(ctx context.Context, c *cfg) {
err := ls.Close(context.WithoutCancel(ctx))
if err != nil {
c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure,
zap.String("error", err.Error()),
zap.Error(err),
)
} else {
c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully)
@ -1209,7 +1210,7 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch,
zap.Uint64("epoch", epoch),
zap.String("error", err.Error()))
zap.Error(err))
return
}
@ -1219,9 +1220,9 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) {
// bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract
// with the binary-encoded information from the current node's configuration.
// The state is set using the provided setter which MUST NOT be nil.
func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error {
func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error {
ni := c.cfgNodeInfo.localInfo
stateSetter(&ni)
ni.SetStatus(state)
prm := nmClient.AddPeerPrm{}
prm.SetNodeInfo(ni)
@ -1231,9 +1232,7 @@ func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.N
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
func bootstrapOnline(ctx context.Context, c *cfg) error {
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Online)
})
return c.bootstrapWithState(ctx, netmap.Online)
}
// bootstrap calls bootstrapWithState with:
@ -1244,9 +1243,7 @@ func (c *cfg) bootstrap(ctx context.Context) error {
st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE {
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) {
ni.SetStatus(netmap.Maintenance)
})
return c.bootstrapWithState(ctx, netmap.Maintenance)
}
c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState,

View file

@ -0,0 +1,27 @@
package containerconfig
import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
const (
subsection = "container"
listStreamSubsection = "list_stream"
// ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once.
ContainerBatchSizeDefault = 1000
)
// ContainerBatchSize returns the value of "batch_size" config parameter
// from "list_stream" subsection of "container" section.
//
// Returns ContainerBatchSizeDefault if the value is missing or if
// the value is not positive integer.
func ContainerBatchSize(c *config.Config) uint32 {
if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil {
return ContainerBatchSizeDefault
}
size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size")
if size == 0 {
return ContainerBatchSizeDefault
}
return size
}

View file

@ -0,0 +1,27 @@
package containerconfig_test
import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
"github.com/stretchr/testify/require"
)
func TestContainerSection(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
empty := configtest.EmptyConfig()
require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty))
})
const path = "../../../../config/example/node"
fileConfigTest := func(c *config.Config) {
require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c))
}
configtest.ForEachFileType(path, fileConfigTest)
t.Run("ENV", func(t *testing.T) {
configtest.ForEnvFileType(t, path, fileConfigTest)
})
}

View file

@ -5,6 +5,7 @@ import (
"context"
"net"
containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
@ -47,6 +48,7 @@ func initContainerService(_ context.Context, c *cfg) {
}
c.shared.frostfsidClient = frostfsIDSubjectProvider
c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg)
defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
@ -56,7 +58,9 @@ func initContainerService(_ context.Context, c *cfg) {
&c.key.PrivateKey,
containerService.NewAPEServer(defaultChainRouter, cnrRdr,
newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient,
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc),
containerService.NewSplitterService(
c.cfgContainer.containerBatchSize, c.respSvc,
containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)),
),
)
service = containerService.NewAuditService(service, c.log, c.audit)
@ -218,6 +222,7 @@ type morphContainerReader struct {
lister interface {
ContainersOf(*user.ID) ([]cid.ID, error)
IterateContainersOf(*user.ID, func(cid.ID) error) error
}
}
@ -233,6 +238,10 @@ func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
return x.lister.ContainersOf(id)
}
func (x *morphContainerReader) IterateContainersOf(id *user.ID, processCID func(cid.ID) error) error {
return x.lister.IterateContainersOf(id, processCID)
}
type morphContainerWriter struct {
neoClient *cntClient.Client
}

View file

@ -134,7 +134,7 @@ func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.C
err := stopper(ctx)
if err != nil {
c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name),
zap.String("error", err.Error()),
zap.Error(err),
)
}

View file

@ -96,7 +96,7 @@ func initMorphClient(ctx context.Context, c *cfg) {
if err != nil {
c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient,
zap.Any("endpoints", addresses),
zap.String("error", err.Error()),
zap.Error(err),
)
fatalOnErr(err)
@ -168,7 +168,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) {
fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
}
subs, err = subscriber.New(ctx, &subscriber.Params{

View file

@ -197,7 +197,7 @@ func addNewEpochNotificationHandlers(c *cfg) {
_, _, err := makeNotaryDeposit(ctx, c)
if err != nil {
c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit,
zap.String("error", err.Error()),
zap.Error(err),
)
}
})

View file

@ -58,7 +58,7 @@ func (c *cfg) MaxObjectSize() uint64 {
sz, err := c.cfgNetmap.wrapper.MaxObjectSize()
if err != nil {
c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue,
zap.String("error", err.Error()),
zap.Error(err),
)
}
@ -269,7 +269,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
_, err := ls.Inhume(ctx, inhumePrm)
if err != nil {
c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage,
zap.String("error", err.Error()),
zap.Error(err),
)
}
}),

View file

@ -113,7 +113,7 @@ func initTreeService(c *cfg) {
// Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged.
c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved,
zap.Stringer("cid", ev.ID),
zap.String("error", err.Error()))
zap.Error(err))
}
})

View file

@ -83,6 +83,9 @@ FROSTFS_POLICER_HEAD_TIMEOUT=15s
FROSTFS_REPLICATOR_PUT_TIMEOUT=15s
FROSTFS_REPLICATOR_POOL_SIZE=10
# Container service section
FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500
# Object service section
FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200

View file

@ -124,6 +124,11 @@
"pool_size": 10,
"put_timeout": "15s"
},
"container": {
"list_stream": {
"batch_size": "500"
}
},
"object": {
"delete": {
"tombstone_lifetime": 10

View file

@ -108,6 +108,10 @@ replicator:
put_timeout: 15s # timeout for the Replicator PUT remote operation
pool_size: 10 # maximum amount of concurrent replications
container:
list_stream:
batch_size: 500 # container_batch_size is the maximum amount of containers to send via stream at once
object:
delete:
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs

2
go.mod
View file

@ -8,7 +8,7 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241206094944-81c423e7094d
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241210104938-c4463df8d467
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88

BIN
go.sum

Binary file not shown.

View file

@ -12,8 +12,9 @@ type ApplicationInfo struct {
func NewApplicationInfo(version string) *ApplicationInfo {
appInfo := &ApplicationInfo{
versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{
Name: "app_info",
Help: "General information about the application.",
Namespace: namespace,
Name: "app_info",
Help: "General information about the application.",
}, []string{"version"}),
}
appInfo.versionValue.With(prometheus.Labels{"version": version})

View file

@ -67,7 +67,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
if err != nil {
// do not throw error, try best case matching
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
zap.String("error", err.Error()))
zap.Error(err))
} else if isInnerRingNode {
return &ClassifyResult{
Role: acl.RoleInnerRing,
@ -84,7 +84,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK
// is not possible for previous epoch, so
// do not throw error, try best case matching
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
zap.String("error", err.Error()))
zap.Error(err))
} else if isContainerNode {
return &ClassifyResult{
Role: acl.RoleContainer,

View file

@ -100,7 +100,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain *
fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey)
if err != nil {
fromMainChainBlock = 0
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error()))
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err))
}
mainnetChain.from = fromMainChainBlock
@ -456,7 +456,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey)
if err != nil {
fromSideChainBlock = 0
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error()))
s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err))
}
morphChain := &chainParams{

View file

@ -177,7 +177,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) {
if err != nil {
// we don't stop inner ring execution on this error
s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators,
zap.String("error", err.Error()))
zap.Error(err))
}
s.tickInitialExpoch(ctx)
@ -308,7 +308,7 @@ func (s *Server) Stop(ctx context.Context) {
for _, c := range s.closers {
if err := c(); err != nil {
s.log.Warn(ctx, logs.InnerringCloserError,
zap.String("error", err.Error()),
zap.Error(err),
)
}
}

View file

@ -33,7 +33,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
// there is no signature collecting, so we don't need extra fee
_, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod)
if err != nil {
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error()))
ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err))
return false
}
@ -47,7 +47,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool {
networkMap, err := ap.netmapClient.NetMap()
if err != nil {
ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes,
zap.String("error", err.Error()))
zap.Error(err))
return false
}
@ -83,7 +83,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
if err != nil {
ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey,
zap.String("error", err.Error()))
zap.Error(err))
continue
}
@ -93,7 +93,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net
ap.log.Warn(ctx, logs.AlphabetCantTransferGas,
zap.String("receiver", key.Address()),
zap.Int64("amount", int64(gasPerNode)),
zap.String("error", err.Error()),
zap.Error(err),
)
}
}
@ -110,7 +110,7 @@ func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint
ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet,
zap.Strings("receivers", receiversLog),
zap.Int64("amount", int64(gasPerNode)),
zap.String("error", err.Error()),
zap.Error(err),
)
}
}

View file

@ -50,7 +50,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
err := cp.checkPutContainer(pctx)
if err != nil {
cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed,
zap.String("error", err.Error()),
zap.Error(err),
)
return false
@ -58,7 +58,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool
if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer,
zap.String("error", err.Error()),
zap.Error(err),
)
return false
}
@ -113,7 +113,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
err := cp.checkDeleteContainer(e)
if err != nil {
cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed,
zap.String("error", err.Error()),
zap.Error(err),
)
return false
@ -121,7 +121,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven
if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil {
cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer,
zap.String("error", err.Error()),
zap.Error(err),
)
return false

View file

@ -73,7 +73,7 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De
err = np.morphClient.TransferGas(receiver, np.mintEmitValue)
if err != nil {
np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver,
zap.String("error", err.Error()))
zap.Error(err))
return false
}

View file

@ -28,21 +28,21 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList()
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet,
zap.String("error", err.Error()))
zap.Error(err))
return false
}
sidechainAlphabet, err := gp.morphClient.Committee()
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain,
zap.String("error", err.Error()))
zap.Error(err))
return false
}
newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain,
zap.String("error", err.Error()))
zap.Error(err))
return false
}
@ -65,7 +65,7 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25
err = gp.voter.VoteForSidechainValidator(ctx, votePrm)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee,
zap.String("error", err.Error()))
zap.Error(err))
}
// 2. Update NeoFSAlphabet role in the sidechain.
@ -98,14 +98,14 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
innerRing, err := gp.irFetcher.InnerRingKeys()
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain,
zap.String("error", err.Error()))
zap.Error(err))
return
}
newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys,
zap.String("error", err.Error()))
zap.Error(err))
return
}
@ -122,7 +122,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid
if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil {
gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys,
zap.String("error", err.Error()))
zap.Error(err))
}
}
@ -135,7 +135,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabe
err := gp.morphClient.UpdateNotaryList(ctx, updPrm)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain,
zap.String("error", err.Error()))
zap.Error(err))
}
}
@ -155,6 +155,6 @@ func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlph
err := gp.frostfsClient.AlphabetUpdate(ctx, prm)
if err != nil {
gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract,
zap.String("error", err.Error()))
zap.Error(err))
}
}

View file

@ -49,7 +49,7 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea
})
if err != nil {
np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache,
zap.String("error", err.Error()))
zap.Error(err))
return false
}

View file

@ -17,7 +17,7 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
epochDuration, err := np.netmapClient.EpochDuration()
if err != nil {
np.log.Warn(ctx, logs.NetmapCantGetEpochDuration,
zap.String("error", err.Error()))
zap.Error(err))
} else {
np.epochState.SetEpochDuration(epochDuration)
}
@ -28,19 +28,19 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc
if err != nil {
np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight,
zap.String("hash", ev.TxHash().StringLE()),
zap.String("error", err.Error()))
zap.Error(err))
}
if err := np.epochTimer.ResetEpochTimer(h); err != nil {
np.log.Warn(ctx, logs.NetmapCantResetEpochTimer,
zap.String("error", err.Error()))
zap.Error(err))
}
// get new netmap snapshot
networkMap, err := np.netmapClient.NetMap()
if err != nil {
np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup,
zap.String("error", err.Error()))
zap.Error(err))
return false
}

View file

@ -42,7 +42,7 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer)
err = np.nodeValidator.VerifyAndUpdate(&nodeInfo)
if err != nil {
np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate,
zap.String("error", err.Error()),
zap.Error(err),
)
return false

View file

@ -62,7 +62,7 @@ func (s *Server) IsAlphabet(ctx context.Context) bool {
func (s *Server) InnerRingIndex(ctx context.Context) int {
index, err := s.statusIndex.InnerRingIndex()
if err != nil {
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error()))
s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err))
return -1
}
@ -74,7 +74,7 @@ func (s *Server) InnerRingIndex(ctx context.Context) int {
func (s *Server) InnerRingSize(ctx context.Context) int {
size, err := s.statusIndex.InnerRingSize()
if err != nil {
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error()))
s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err))
return 0
}
@ -86,7 +86,7 @@ func (s *Server) InnerRingSize(ctx context.Context) int {
func (s *Server) AlphabetIndex(ctx context.Context) int {
index, err := s.statusIndex.AlphabetIndex()
if err != nil {
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error()))
s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err))
return -1
}
@ -132,7 +132,7 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V
s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract,
zap.Int8("alphabet_index", int8(letter)),
zap.Uint64("epoch", epoch),
zap.String("error", err.Error()))
zap.Error(err))
}
})

View file

@ -129,7 +129,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
})
})
if err != nil {
return fmt.Errorf("can't determine DB size: %w", err)
return fmt.Errorf("determine DB size: %w", err)
}
if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly {
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items))
@ -140,7 +140,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error {
return saveItemsCount(tx, items)
}); err != nil {
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items))
return fmt.Errorf("can't save blobovnicza's size and items count: %w", err)
return fmt.Errorf("save blobovnicza's size and items count: %w", err)
}
b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items))
}

View file

@ -146,7 +146,7 @@ func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes,
if prm.ignoreErrors {
return nil
}
return fmt.Errorf("could not decode address key: %w", err)
return fmt.Errorf("decode address key: %w", err)
}
}

View file

@ -82,7 +82,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co
if !client.IsErrObjectNotFound(err) {
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel,
zap.String("level", p),
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
}

View file

@ -57,7 +57,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common
if !client.IsErrObjectNotFound(err) {
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
}

View file

@ -69,7 +69,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G
if !client.IsErrObjectNotFound(err) {
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)
}
@ -115,13 +115,13 @@ func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnic
// decompress the data
data, err := b.compression.Decompress(res.Object())
if err != nil {
return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
}
// unmarshal the object
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
}
return common.GetRes{Object: obj, RawData: data}, nil

View file

@ -71,7 +71,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re
if !outOfBounds && !client.IsErrObjectNotFound(err) {
b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel,
zap.String("level", p),
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
if outOfBounds {
@ -130,13 +130,13 @@ func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blob
// decompress the data
data, err := b.compression.Decompress(res.Object())
if err != nil {
return common.GetRangeRes{}, fmt.Errorf("could not decompress object data: %w", err)
return common.GetRangeRes{}, fmt.Errorf("decompress object data: %w", err)
}
// unmarshal the object
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
return common.GetRangeRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
return common.GetRangeRes{}, fmt.Errorf("unmarshal the object: %w", err)
}
from := prm.Range.GetOffset()

View file

@ -44,12 +44,12 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm
if prm.IgnoreErrors {
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.Stringer("address", elem.Address()),
zap.String("err", err.Error()),
zap.Error(err),
zap.String("storage_id", p),
zap.String("root_path", b.rootPath))
return nil
}
return fmt.Errorf("could not decompress object data: %w", err)
return fmt.Errorf("decompress object data: %w", err)
}
if prm.Handler != nil {
@ -77,12 +77,12 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo
if err != nil {
if ignoreErrors {
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.String("err", err.Error()),
zap.Error(err),
zap.String("storage_id", p),
zap.String("root_path", b.rootPath))
return false, nil
}
return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err)
return false, fmt.Errorf("open blobovnicza %s: %w", p, err)
}
defer shBlz.Close(ctx)

View file

@ -69,10 +69,10 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) {
)...)
if err := blz.Open(ctx); err != nil {
return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err)
return nil, fmt.Errorf("open blobovnicza %s: %w", b.path, err)
}
if err := blz.Init(ctx); err != nil {
return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err)
return nil, fmt.Errorf("init blobovnicza %s: %w", b.path, err)
}
b.refCount++
@ -97,7 +97,7 @@ func (b *sharedDB) Close(ctx context.Context) {
if err := b.blcza.Close(ctx); err != nil {
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
zap.String("error", err.Error()),
zap.Error(err),
)
}
b.blcza = nil
@ -125,9 +125,9 @@ func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error {
if err := b.blcza.Close(ctx); err != nil {
b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza,
zap.String("id", b.path),
zap.String("error", err.Error()),
zap.Error(err),
)
return fmt.Errorf("failed to close blobovnicza (path = %s): %w", b.path, err)
return fmt.Errorf("close blobovnicza (path = %s): %w", b.path, err)
}
b.refCount = 0

View file

@ -83,7 +83,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err)
} else {
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza,
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
@ -106,7 +106,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error)
} else {
i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza,
zap.String("path", active.SystemPath()),
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
if errors.Is(err, blobovnicza.ErrNoSpace) {

View file

@ -74,7 +74,7 @@ func (b *BlobStor) Close(ctx context.Context) error {
for i := range b.storage {
err := b.storage[i].Storage.Close(ctx)
if err != nil {
b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error()))
b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.Error(err))
if firstErr == nil {
firstErr = err
}

View file

@ -75,7 +75,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi
for _, err := range errors[:len(errors)-1] {
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking,
zap.Stringer("address", prm.Address),
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}

View file

@ -153,7 +153,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
if err != nil {
if prm.IgnoreErrors {
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.String("err", err.Error()),
zap.Error(err),
zap.String("directory_path", dirPath))
return nil
}
@ -202,7 +202,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
if prm.IgnoreErrors {
t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.Stringer("address", addr),
zap.String("err", err.Error()),
zap.Error(err),
zap.String("path", path))
continue
}
@ -538,7 +538,7 @@ func (t *FSTree) countFiles() (uint64, uint64, error) {
},
)
if err != nil {
return 0, 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
return 0, 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
}
return count, size, nil
@ -577,7 +577,7 @@ func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) {
},
)
if err != nil {
return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
return 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err)
}
success = true
return result, nil

View file

@ -45,7 +45,7 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I
b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration,
zap.String("storage_path", b.storage[i].Storage.Path()),
zap.String("storage_type", b.storage[i].Storage.Type()),
zap.String("err", err.Error()))
zap.Error(err))
continue
}
return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err)

View file

@ -47,13 +47,13 @@ func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes,
// Decompress the data.
var err error
if data, err = s.compression.Decompress(data); err != nil {
return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err)
return common.GetRes{}, fmt.Errorf("decompress object data: %w", err)
}
// Unmarshal the SDK object.
obj := objectSDK.New()
if err := obj.Unmarshal(data); err != nil {
return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err)
return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err)
}
return common.GetRes{Object: obj, RawData: data}, nil

View file

@ -27,7 +27,7 @@ func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error {
}
}
if err != nil {
return fmt.Errorf("can't set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
return fmt.Errorf("set blobstor mode (old=%s, new=%s): %w", b.mode, m, err)
}
b.mode = m

View file

@ -52,7 +52,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e
// marshal object
data, err := prm.Object.Marshal()
if err != nil {
return common.PutRes{}, fmt.Errorf("could not marshal the object: %w", err)
return common.PutRes{}, fmt.Errorf("marshal the object: %w", err)
}
prm.RawData = data
}

View file

@ -95,7 +95,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
err := eg.Wait()
close(errCh)
if err != nil {
return fmt.Errorf("failed to initialize shards: %w", err)
return fmt.Errorf("initialize shards: %w", err)
}
for res := range errCh {
@ -117,7 +117,7 @@ func (e *StorageEngine) Init(ctx context.Context) error {
continue
}
return fmt.Errorf("could not initialize shard %s: %w", res.id, res.err)
return fmt.Errorf("initialize shard %s: %w", res.id, res.err)
}
}
@ -167,7 +167,7 @@ func (e *StorageEngine) close(ctx context.Context, releasePools bool) error {
if err := sh.Close(ctx); err != nil {
e.log.Debug(ctx, logs.EngineCouldNotCloseShard,
zap.String("id", id),
zap.String("error", err.Error()),
zap.Error(err),
)
}
}
@ -320,7 +320,7 @@ loop:
for _, newID := range shardsToAdd {
sh, err := e.createShard(ctx, rcfg.shards[newID])
if err != nil {
return fmt.Errorf("could not add new shard with '%s' metabase path: %w", newID, err)
return fmt.Errorf("add new shard with '%s' metabase path: %w", newID, err)
}
idStr := sh.ID().String()
@ -331,13 +331,13 @@ loop:
}
if err != nil {
_ = sh.Close(ctx)
return fmt.Errorf("could not init %s shard: %w", idStr, err)
return fmt.Errorf("init %s shard: %w", idStr, err)
}
err = e.addShard(sh)
if err != nil {
_ = sh.Close(ctx)
return fmt.Errorf("could not add %s shard: %w", idStr, err)
return fmt.Errorf("add %s shard: %w", idStr, err)
}
e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr))

View file

@ -154,7 +154,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
if err != nil {
e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren,
zap.Stringer("addr", addr),
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return false
}
@ -166,7 +166,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
if err != nil {
e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
zap.Stringer("addr", addr),
zap.String("err", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
continue
}
@ -196,7 +196,7 @@ func (e *StorageEngine) deleteChunks(
if err != nil {
e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard,
zap.Stringer("addr", addr),
zap.String("err", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
continue
}

View file

@ -140,7 +140,7 @@ func (e *StorageEngine) reportShardError(
if isLogical(err) {
e.log.Warn(ctx, msg,
zap.Stringer("shard_id", sh.ID()),
zap.String("error", err.Error()))
zap.Error(err))
return
}
@ -151,7 +151,7 @@ func (e *StorageEngine) reportShardError(
e.log.Warn(ctx, msg, append([]zap.Field{
zap.Stringer("shard_id", sid),
zap.Uint32("error count", errCount),
zap.String("error", err.Error()),
zap.Error(err),
}, fields...)...)
if e.errorsThreshold == 0 || errCount < e.errorsThreshold {

View file

@ -578,7 +578,7 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree
func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, prm EvacuateShardPrm) (bool, string, error) {
if prm.TreeHandler == nil {
return false, "", fmt.Errorf("failed to evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID())
return false, "", fmt.Errorf("evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID())
}
return prm.TreeHandler(ctx, tree.CID, tree.TreeID, sh)

View file

@ -106,7 +106,7 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
zap.String("error", it.MetaError.Error()),
zap.Error(it.MetaError),
zap.Stringer("address", prm.addr),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}

View file

@ -143,7 +143,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
} else {
e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence,
zap.Stringer("shard_id", sh.ID()),
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}
@ -165,14 +165,14 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) {
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
zap.Stringer("shard_id", sh.ID()),
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return
}
if client.IsErrObjectAlreadyRemoved(err) {
e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard,
zap.Stringer("shard_id", sh.ID()),
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
res.status = putToShardRemoved
res.err = err

View file

@ -118,7 +118,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error
if it.ShardWithMeta.Shard != nil && it.MetaError != nil {
e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound,
zap.Stringer("shard_id", it.ShardWithMeta.ID()),
zap.String("error", it.MetaError.Error()),
zap.Error(it.MetaError),
zap.Stringer("address", prm.addr),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
}

View file

@ -108,12 +108,12 @@ func (m *metricsWithID) SetEvacuationInProgress(value bool) {
func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*shard.ID, error) {
sh, err := e.createShard(ctx, opts)
if err != nil {
return nil, fmt.Errorf("could not create a shard: %w", err)
return nil, fmt.Errorf("create a shard: %w", err)
}
err = e.addShard(sh)
if err != nil {
return nil, fmt.Errorf("could not add %s shard: %w", sh.ID().String(), err)
return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err)
}
e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode())
@ -124,7 +124,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh
func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*shard.Shard, error) {
id, err := generateShardID()
if err != nil {
return nil, fmt.Errorf("could not generate shard ID: %w", err)
return nil, fmt.Errorf("generate shard ID: %w", err)
}
opts = e.appendMetrics(id, opts)
@ -180,7 +180,7 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error {
pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true))
if err != nil {
return fmt.Errorf("could not create pool: %w", err)
return fmt.Errorf("create pool: %w", err)
}
strID := sh.ID().String()
@ -374,7 +374,7 @@ func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedS
zap.Error(err),
)
multiErrGuard.Lock()
multiErr = errors.Join(multiErr, fmt.Errorf("could not change shard (id:%s) mode to disabled: %w", sh.ID(), err))
multiErr = errors.Join(multiErr, fmt.Errorf("change shard (id:%s) mode to disabled: %w", sh.ID(), err))
multiErrGuard.Unlock()
}
@ -385,7 +385,7 @@ func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedS
zap.Error(err),
)
multiErrGuard.Lock()
multiErr = errors.Join(multiErr, fmt.Errorf("could not close removed shard (id:%s): %w", sh.ID(), err))
multiErr = errors.Join(multiErr, fmt.Errorf("close removed shard (id:%s): %w", sh.ID(), err))
multiErrGuard.Unlock()
}
return nil

View file

@ -54,7 +54,7 @@ func (db *DB) Open(ctx context.Context, m mode.Mode) error {
func (db *DB) openDB(ctx context.Context, mode mode.Mode) error {
err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission)
if err != nil {
return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err)
return fmt.Errorf("create dir %s for metabase: %w", db.info.Path, err)
}
db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path))
@ -73,7 +73,7 @@ func (db *DB) openBolt(ctx context.Context) error {
db.boltDB, err = bbolt.Open(db.info.Path, db.info.Permission, db.boltOptions)
if err != nil {
return fmt.Errorf("can't open boltDB database: %w", err)
return fmt.Errorf("open boltDB database: %w", err)
}
db.boltDB.MaxBatchDelay = db.boltBatchDelay
db.boltDB.MaxBatchSize = db.boltBatchSize
@ -145,27 +145,27 @@ func (db *DB) init(reset bool) error {
if reset {
err := tx.DeleteBucket(name)
if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
return fmt.Errorf("could not delete static bucket %s: %w", k, err)
return fmt.Errorf("delete static bucket %s: %w", k, err)
}
}
_, err := tx.CreateBucketIfNotExists(name)
if err != nil {
return fmt.Errorf("could not create static bucket %s: %w", k, err)
return fmt.Errorf("create static bucket %s: %w", k, err)
}
}
for _, b := range deprecatedBuckets {
err := tx.DeleteBucket(b)
if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) {
return fmt.Errorf("could not delete deprecated bucket %s: %w", string(b), err)
return fmt.Errorf("delete deprecated bucket %s: %w", string(b), err)
}
}
if !reset { // counters will be recalculated by refill metabase
err = syncCounter(tx, false)
if err != nil {
return fmt.Errorf("could not sync object counter: %w", err)
return fmt.Errorf("sync object counter: %w", err)
}
return nil

View file

@ -238,14 +238,14 @@ func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error {
}
if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil {
return fmt.Errorf("could not increase phy object counter: %w", err)
return fmt.Errorf("increase phy object counter: %w", err)
}
if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil {
return fmt.Errorf("could not increase logical object counter: %w", err)
return fmt.Errorf("increase logical object counter: %w", err)
}
if isUserObject {
if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil {
return fmt.Errorf("could not increase user object counter: %w", err)
return fmt.Errorf("increase user object counter: %w", err)
}
}
return db.incContainerObjectCounter(tx, cnrID, isUserObject)
@ -362,7 +362,7 @@ func (db *DB) incContainerObjectCounter(tx *bbolt.Tx, cnrID cid.ID, isUserObject
func syncCounter(tx *bbolt.Tx, force bool) error {
shardInfoB, err := createBucketLikelyExists(tx, shardInfoBucket)
if err != nil {
return fmt.Errorf("could not get shard info bucket: %w", err)
return fmt.Errorf("get shard info bucket: %w", err)
}
shardObjectCounterInitialized := len(shardInfoB.Get(objectPhyCounterKey)) == 8 &&
len(shardInfoB.Get(objectLogicCounterKey)) == 8 &&
@ -375,7 +375,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error {
containerCounterB, err := createBucketLikelyExists(tx, containerCounterBucketName)
if err != nil {
return fmt.Errorf("could not get container counter bucket: %w", err)
return fmt.Errorf("get container counter bucket: %w", err)
}
var addr oid.Address
@ -428,7 +428,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error {
return nil
})
if err != nil {
return fmt.Errorf("could not iterate objects: %w", err)
return fmt.Errorf("iterate objects: %w", err)
}
return setObjectCounters(counters, shardInfoB, containerCounterB)
@ -448,7 +448,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
value := containerCounterValue(count)
err := containerCounterB.Put(key, value)
if err != nil {
return fmt.Errorf("could not update phy container object counter: %w", err)
return fmt.Errorf("update phy container object counter: %w", err)
}
}
phyData := make([]byte, 8)
@ -456,7 +456,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
err := shardInfoB.Put(objectPhyCounterKey, phyData)
if err != nil {
return fmt.Errorf("could not update phy object counter: %w", err)
return fmt.Errorf("update phy object counter: %w", err)
}
logData := make([]byte, 8)
@ -464,7 +464,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
err = shardInfoB.Put(objectLogicCounterKey, logData)
if err != nil {
return fmt.Errorf("could not update logic object counter: %w", err)
return fmt.Errorf("update logic object counter: %w", err)
}
userData := make([]byte, 8)
@ -472,7 +472,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container
err = shardInfoB.Put(objectUserCounterKey, userData)
if err != nil {
return fmt.Errorf("could not update user object counter: %w", err)
return fmt.Errorf("update user object counter: %w", err)
}
return nil
@ -492,7 +492,7 @@ func parseContainerCounterKey(buf []byte) (cid.ID, error) {
}
var cnrID cid.ID
if err := cnrID.Decode(buf); err != nil {
return cid.ID{}, fmt.Errorf("failed to decode container ID: %w", err)
return cid.ID{}, fmt.Errorf("decode container ID: %w", err)
}
return cnrID, nil
}

View file

@ -163,26 +163,26 @@ func (db *DB) updateCountersDelete(tx *bbolt.Tx, res DeleteRes) error {
if res.phyCount > 0 {
err := db.updateShardObjectCounter(tx, phy, res.phyCount, false)
if err != nil {
return fmt.Errorf("could not decrease phy object counter: %w", err)
return fmt.Errorf("decrease phy object counter: %w", err)
}
}
if res.logicCount > 0 {
err := db.updateShardObjectCounter(tx, logical, res.logicCount, false)
if err != nil {
return fmt.Errorf("could not decrease logical object counter: %w", err)
return fmt.Errorf("decrease logical object counter: %w", err)
}
}
if res.userCount > 0 {
err := db.updateShardObjectCounter(tx, user, res.userCount, false)
if err != nil {
return fmt.Errorf("could not decrease user object counter: %w", err)
return fmt.Errorf("decrease user object counter: %w", err)
}
}
if err := db.updateContainerCounter(tx, res.removedByCnrID, false); err != nil {
return fmt.Errorf("could not decrease container object counter: %w", err)
return fmt.Errorf("decrease container object counter: %w", err)
}
return nil
}
@ -259,7 +259,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
if garbageBKT != nil {
err := garbageBKT.Delete(addrKey)
if err != nil {
return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err)
return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err)
}
}
return deleteSingleResult{}, nil
@ -280,7 +280,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
if garbageBKT != nil {
err := garbageBKT.Delete(addrKey)
if err != nil {
return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err)
return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err)
}
}
@ -308,7 +308,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
// remove object
err = db.deleteObject(tx, obj, false)
if err != nil {
return deleteSingleResult{}, fmt.Errorf("could not remove object: %w", err)
return deleteSingleResult{}, fmt.Errorf("remove object: %w", err)
}
if err := deleteECRelatedInfo(tx, garbageBKT, obj, addr.Container(), refCounter); err != nil {
@ -335,12 +335,12 @@ func (db *DB) deleteObject(
err = updateListIndexes(tx, obj, delListIndexItem)
if err != nil {
return fmt.Errorf("can't remove list indexes: %w", err)
return fmt.Errorf("remove list indexes: %w", err)
}
err = updateFKBTIndexes(tx, obj, delFKBTIndexItem)
if err != nil {
return fmt.Errorf("can't remove fake bucket tree indexes: %w", err)
return fmt.Errorf("remove fake bucket tree indexes: %w", err)
}
if isParent {
@ -351,7 +351,7 @@ func (db *DB) deleteObject(
addrKey := addressKey(object.AddressOf(obj), key)
err := garbageBKT.Delete(addrKey)
if err != nil {
return fmt.Errorf("could not remove from garbage bucket: %w", err)
return fmt.Errorf("remove from garbage bucket: %w", err)
}
}
}
@ -529,7 +529,7 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize))
err := garbageBKT.Delete(addrKey)
if err != nil {
return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err)
return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
}
}
@ -567,7 +567,7 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK.
addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize))
err := garbageBKT.Delete(addrKey)
if err != nil {
return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err)
return fmt.Errorf("remove EC parent from garbage bucket: %w", err)
}
}

View file

@ -229,7 +229,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e
err := splitInfo.Unmarshal(bytes.Clone(rawSplitInfo))
if err != nil {
return nil, fmt.Errorf("can't unmarshal split info from root index: %w", err)
return nil, fmt.Errorf("unmarshal split info from root index: %w", err)
}
return splitInfo, nil

View file

@ -187,7 +187,7 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD
err = child.Unmarshal(bytes.Clone(data))
if err != nil {
return nil, fmt.Errorf("can't unmarshal child with parent: %w", err)
return nil, fmt.Errorf("unmarshal child with parent: %w", err)
}
par := child.Parent()

View file

@ -219,7 +219,6 @@ func benchmarkGet(b *testing.B, numOfObj int) {
meta.WithMaxBatchSize(batchSize),
meta.WithMaxBatchDelay(10*time.Millisecond),
)
defer func() { require.NoError(b, db.Close(context.Background())) }()
addrs := make([]oid.Address, 0, numOfObj)
for range numOfObj {
@ -234,6 +233,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
}
db, addrs := prepareDb(runtime.NumCPU())
defer func() { require.NoError(b, db.Close(context.Background())) }()
b.Run("parallel", func(b *testing.B) {
b.ReportAllocs()

View file

@ -177,7 +177,7 @@ type gcHandler struct {
func (g gcHandler) handleKV(k, _ []byte) error {
o, err := garbageFromKV(k)
if err != nil {
return fmt.Errorf("could not parse garbage object: %w", err)
return fmt.Errorf("parse garbage object: %w", err)
}
return g.h(o)
@ -190,7 +190,7 @@ type graveyardHandler struct {
func (g graveyardHandler) handleKV(k, v []byte) error {
o, err := graveFromKV(k, v)
if err != nil {
return fmt.Errorf("could not parse grave: %w", err)
return fmt.Errorf("parse grave: %w", err)
}
return g.h(o)
@ -240,7 +240,7 @@ func (db *DB) iterateDeletedObj(tx *bbolt.Tx, h kvHandler, offset *oid.Address)
func garbageFromKV(k []byte) (res GarbageObject, err error) {
err = decodeAddressFromKey(&res.addr, k)
if err != nil {
err = fmt.Errorf("could not parse address: %w", err)
err = fmt.Errorf("parse address: %w", err)
}
return

View file

@ -373,7 +373,7 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck
if data != nil {
err := targetBucket.Delete(tombKey)
if err != nil {
return nil, nil, fmt.Errorf("could not remove grave with tombstone key: %w", err)
return nil, nil, fmt.Errorf("remove grave with tombstone key: %w", err)
}
}

View file

@ -87,7 +87,8 @@ type CountAliveObjectsInContainerPrm struct {
}
// ListWithCursor lists physical objects available in metabase starting from
// cursor. Includes objects of all types. Does not include inhumed objects.
// cursor. Includes objects of all types. Does not include inhumed and expired
// objects.
// Use cursor value from response for consecutive requests.
//
// Returns ErrEndOfListing if there are no more objects to return or count
@ -143,6 +144,8 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int,
rawAddr := make([]byte, cidSize, addressKeySize)
currEpoch := db.epochState.CurrentEpoch()
loop:
for ; name != nil; name, _ = c.Next() {
cidRaw, prefix := parseContainerIDWithPrefix(&containerID, name)
@ -167,7 +170,7 @@ loop:
if bkt != nil {
copy(rawAddr, cidRaw)
result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID,
result, count, cursor, threshold)
result, count, cursor, threshold, currEpoch)
if err != nil {
return nil, nil, err
}
@ -212,6 +215,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
limit int, // stop listing at `limit` items in result
cursor *Cursor, // start from cursor object
threshold bool, // ignore cursor and start immediately
currEpoch uint64,
) ([]objectcore.Info, []byte, *Cursor, error) {
if cursor == nil {
cursor = new(Cursor)
@ -243,13 +247,19 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket
continue
}
var o objectSDK.Object
if err := o.Unmarshal(bytes.Clone(v)); err != nil {
return nil, nil, nil, err
}
expEpoch, hasExpEpoch := hasExpirationEpoch(&o)
if !objectLocked(bkt.Tx(), cnt, obj) && hasExpEpoch && expEpoch < currEpoch {
continue
}
var isLinkingObj bool
var ecInfo *objectcore.ECInfo
if objType == objectSDK.TypeRegular {
var o objectSDK.Object
if err := o.Unmarshal(bytes.Clone(v)); err != nil {
return nil, nil, nil, err
}
isLinkingObj = isLinkObject(&o)
ecHeader := o.ECHeader()
if ecHeader != nil {

View file

@ -3,14 +3,17 @@ package meta_test
import (
"context"
"errors"
"strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
"go.etcd.io/bbolt"
@ -18,6 +21,8 @@ import (
func BenchmarkListWithCursor(b *testing.B) {
db := listWithCursorPrepareDB(b)
defer func() { require.NoError(b, db.Close(context.Background())) }()
b.Run("1 item", func(b *testing.B) {
benchmarkListWithCursor(b, db, 1)
})
@ -33,7 +38,6 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{
NoSync: true,
})) // faster single-thread generation
defer func() { require.NoError(b, db.Close(context.Background())) }()
obj := testutil.GenerateObject()
for i := range 100_000 { // should be a multiple of all batch sizes
@ -55,7 +59,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
for range b.N {
res, err := db.ListWithCursor(context.Background(), prm)
if err != nil {
if err != meta.ErrEndOfListing {
if errors.Is(err, meta.ErrEndOfListing) {
b.Fatalf("error: %v", err)
}
prm.SetCursor(nil)
@ -70,14 +74,16 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
func TestLisObjectsWithCursor(t *testing.T) {
t.Parallel()
db := newDB(t)
defer func() { require.NoError(t, db.Close(context.Background())) }()
const (
currEpoch = 100
expEpoch = currEpoch - 1
containers = 5
total = containers * 4 // regular + ts + child + lock
total = containers * 6 // regular + ts + child + lock + non-expired regular + locked expired
)
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
defer func() { require.NoError(t, db.Close(context.Background())) }()
expected := make([]object.Info, 0, total)
// fill metabase with objects
@ -126,6 +132,26 @@ func TestLisObjectsWithCursor(t *testing.T) {
err = putBig(db, child)
require.NoError(t, err)
expected = append(expected, object.Info{Address: object.AddressOf(child), Type: objectSDK.TypeRegular})
// add expired object (do not include into expected)
obj = testutil.GenerateObjectWithCID(containerID)
testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
require.NoError(t, metaPut(db, obj, nil))
// add non-expired object (include into expected)
obj = testutil.GenerateObjectWithCID(containerID)
testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(currEpoch))
require.NoError(t, metaPut(db, obj, nil))
expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
// add locked expired object (include into expected)
obj = testutil.GenerateObjectWithCID(containerID)
objID := oidtest.ID()
obj.SetID(objID)
testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch))
require.NoError(t, metaPut(db, obj, nil))
require.NoError(t, db.Lock(context.Background(), containerID, oidtest.ID(), []oid.ID{objID}))
expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular})
}
t.Run("success with various count", func(t *testing.T) {

View file

@ -19,7 +19,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error {
if !db.mode.NoMetabase() {
if err := db.Close(ctx); err != nil {
return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
}
}
@ -31,7 +31,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error {
err = db.Init(ctx)
}
if err != nil {
return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err)
}
}

View file

@ -180,18 +180,18 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o
err := putUniqueIndexes(tx, obj, si, id)
if err != nil {
return fmt.Errorf("can't put unique indexes: %w", err)
return fmt.Errorf("put unique indexes: %w", err)
}
err = updateListIndexes(tx, obj, putListIndexItem)
if err != nil {
return fmt.Errorf("can't put list indexes: %w", err)
return fmt.Errorf("put list indexes: %w", err)
}
if indexAttributes {
err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
if err != nil {
return fmt.Errorf("can't put fake bucket tree indexes: %w", err)
return fmt.Errorf("put fake bucket tree indexes: %w", err)
}
}
@ -250,7 +250,7 @@ func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, ad
}
rawObject, err := obj.CutPayload().Marshal()
if err != nil {
return fmt.Errorf("can't marshal object header: %w", err)
return fmt.Errorf("marshal object header: %w", err)
}
return putUniqueIndexItem(tx, namedBucketItem{
name: bucketName,
@ -475,7 +475,7 @@ func createBucketLikelyExists[T bucketContainer](tx T, name []byte) (*bbolt.Buck
func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
return fmt.Errorf("can't create index %v: %w", item.name, err)
return fmt.Errorf("create index %v: %w", item.name, err)
}
data, err := update(bkt.Get(item.key), item.val)
@ -492,12 +492,12 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
return fmt.Errorf("can't create index %v: %w", item.name, err)
return fmt.Errorf("create index %v: %w", item.name, err)
}
fkbtRoot, err := createBucketLikelyExists(bkt, item.key)
if err != nil {
return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err)
return fmt.Errorf("create fake bucket tree index %v: %w", item.key, err)
}
return fkbtRoot.Put(item.val, zeroValue)
@ -506,19 +506,19 @@ func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
return fmt.Errorf("can't create index %v: %w", item.name, err)
return fmt.Errorf("create index %v: %w", item.name, err)
}
lst, err := decodeList(bkt.Get(item.key))
if err != nil {
return fmt.Errorf("can't decode leaf list %v: %w", item.key, err)
return fmt.Errorf("decode leaf list %v: %w", item.key, err)
}
lst = append(lst, item.val)
encodedLst, err := encodeList(lst)
if err != nil {
return fmt.Errorf("can't encode leaf list %v: %w", item.key, err)
return fmt.Errorf("encode leaf list %v: %w", item.key, err)
}
return bkt.Put(item.key, encodedLst)

View file

@ -565,7 +565,7 @@ func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filt
case v2object.FilterHeaderContainerID: // support deprecated field
err := res.cnr.DecodeString(filters[i].Value())
if err != nil {
return filterGroup{}, fmt.Errorf("can't parse container id: %w", err)
return filterGroup{}, fmt.Errorf("parse container id: %w", err)
}
res.withCnrFilter = true

View file

@ -32,13 +32,13 @@ func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error
}
if err := db.openDB(ctx, mode); err != nil {
return nil, fmt.Errorf("failed to open metabase: %w", err)
return nil, fmt.Errorf("open metabase: %w", err)
}
id, err := db.readShardID()
if cErr := db.close(); cErr != nil {
err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr))
err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
}
return id, metaerr.Wrap(err)
@ -70,7 +70,7 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err
}
if err := db.openDB(ctx, mode); err != nil {
return fmt.Errorf("failed to open metabase: %w", err)
return fmt.Errorf("open metabase: %w", err)
}
err := db.writeShardID(id)
@ -79,7 +79,7 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err
}
if cErr := db.close(); cErr != nil {
err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr))
err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr))
}
return metaerr.Wrap(err)

View file

@ -95,7 +95,7 @@ func compactDB(db *bbolt.DB) error {
NoSync: true,
})
if err != nil {
return fmt.Errorf("can't open new metabase to compact: %w", err)
return fmt.Errorf("open new metabase to compact: %w", err)
}
if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil {
return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName)))
@ -292,7 +292,7 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i
}
expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64)
if err != nil {
return fmt.Errorf("could not parse expiration epoch: %w", err)
return fmt.Errorf("parse expiration epoch: %w", err)
}
expirationEpochBucket := b.Bucket(attrValue)
attrKeyValueC := expirationEpochBucket.Cursor()
@ -399,7 +399,7 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([]
for _, key := range keys {
attr, ok := attributeFromAttributeBucket(key)
if !ok {
return nil, fmt.Errorf("failed to parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
return nil, fmt.Errorf("parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
}
if !IsAtrributeIndexed(attr) {
keysToDrop = append(keysToDrop, key)
@ -407,7 +407,7 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([]
}
contID, ok := cidFromAttributeBucket(key)
if !ok {
return nil, fmt.Errorf("failed to parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
}
info, err := cs.Info(contID)
if err != nil {

View file

@ -231,11 +231,11 @@ func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) {
epoch := binary.BigEndian.Uint64(key)
var cnr cid.ID
if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil {
return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (container ID): %w", err)
return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (container ID): %w", err)
}
var obj oid.ID
if err := obj.Decode(key[epochSize+cidSize:]); err != nil {
return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (object ID): %w", err)
return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (object ID): %w", err)
}
return epoch, cnr, obj, nil
}

View file

@ -67,7 +67,7 @@ func updateVersion(tx *bbolt.Tx, version uint64) error {
b, err := tx.CreateBucketIfNotExists(shardInfoBucket)
if err != nil {
return fmt.Errorf("can't create auxiliary bucket: %w", err)
return fmt.Errorf("create auxiliary bucket: %w", err)
}
return b.Put(versionKey, data)
}

View file

@ -106,7 +106,7 @@ func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error {
}
}
if err != nil {
return fmt.Errorf("can't set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
return fmt.Errorf("set pilorama mode (old=%s, new=%s): %w", t.mode, m, err)
}
t.mode = m
@ -128,7 +128,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
readOnly := m.ReadOnly()
err := util.MkdirAllX(filepath.Dir(t.path), t.perm)
if err != nil {
return metaerr.Wrap(fmt.Errorf("can't create dir %s for the pilorama: %w", t.path, err))
return metaerr.Wrap(fmt.Errorf("create dir %s for the pilorama: %w", t.path, err))
}
opts := *bbolt.DefaultOptions
@ -139,7 +139,7 @@ func (t *boltForest) openBolt(m mode.Mode) error {
t.db, err = bbolt.Open(t.path, t.perm, &opts)
if err != nil {
return metaerr.Wrap(fmt.Errorf("can't open the pilorama DB: %w", err))
return metaerr.Wrap(fmt.Errorf("open the pilorama DB: %w", err))
}
t.db.MaxBatchSize = t.maxBatchSize
@ -1360,7 +1360,7 @@ func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, err
return nil
})
if err != nil {
return nil, metaerr.Wrap(fmt.Errorf("could not list trees: %w", err))
return nil, metaerr.Wrap(fmt.Errorf("list trees: %w", err))
}
success = true
return ids, nil
@ -1504,7 +1504,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (*
var contID cidSDK.ID
if err := contID.Decode(k[:32]); err != nil {
return fmt.Errorf("failed to decode containerID: %w", err)
return fmt.Errorf("decode container ID: %w", err)
}
res.Items = append(res.Items, ContainerIDTreeID{
CID: contID,

View file

@ -36,7 +36,7 @@ func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) {
size, err := s.metaBase.ContainerSize(prm.cnr)
if err != nil {
return ContainerSizeRes{}, fmt.Errorf("could not get container size: %w", err)
return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err)
}
return ContainerSizeRes{
@ -71,7 +71,7 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont
counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID)
if err != nil {
return ContainerCountRes{}, fmt.Errorf("could not get container counters: %w", err)
return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err)
}
return ContainerCountRes{

View file

@ -38,7 +38,7 @@ func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err err
err = s.SetMode(ctx, mode.DegradedReadOnly)
if err != nil {
return fmt.Errorf("could not switch to mode %s", mode.Mode(mode.DegradedReadOnly))
return fmt.Errorf("switch to mode %s", mode.Mode(mode.DegradedReadOnly))
}
return nil
}
@ -72,7 +72,7 @@ func (s *Shard) Open(ctx context.Context) error {
for j := i + 1; j < len(components); j++ {
if err := components[j].Open(ctx, m); err != nil {
// Other components must be opened, fail.
return fmt.Errorf("could not open %T: %w", components[j], err)
return fmt.Errorf("open %T: %w", components[j], err)
}
}
err = s.handleMetabaseFailure(ctx, "open", err)
@ -83,7 +83,7 @@ func (s *Shard) Open(ctx context.Context) error {
break
}
return fmt.Errorf("could not open %T: %w", component, err)
return fmt.Errorf("open %T: %w", component, err)
}
}
return nil
@ -184,7 +184,7 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error {
break
}
return fmt.Errorf("could not initialize %T: %w", component, err)
return fmt.Errorf("initialize %T: %w", component, err)
}
}
return nil
@ -205,7 +205,7 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
err := s.metaBase.Reset()
if err != nil {
return fmt.Errorf("could not reset metabase: %w", err)
return fmt.Errorf("reset metabase: %w", err)
}
withCount := true
@ -254,12 +254,12 @@ func (s *Shard) refillMetabase(ctx context.Context) error {
err = errors.Join(egErr, itErr)
if err != nil {
return fmt.Errorf("could not put objects to the meta: %w", err)
return fmt.Errorf("put objects to the meta: %w", err)
}
err = s.metaBase.SyncCounters()
if err != nil {
return fmt.Errorf("could not sync object counters: %w", err)
return fmt.Errorf("sync object counters: %w", err)
}
success = true
@ -272,7 +272,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
if err := obj.Unmarshal(data); err != nil {
s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject,
zap.Stringer("address", addr),
zap.String("err", err.Error()))
zap.Error(err))
return nil
}
@ -318,7 +318,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error {
var lock objectSDK.Lock
if err := lock.Unmarshal(obj.Payload()); err != nil {
return fmt.Errorf("could not unmarshal lock content: %w", err)
return fmt.Errorf("unmarshal lock content: %w", err)
}
locked := make([]oid.ID, lock.NumberOfMembers())
@ -328,7 +328,7 @@ func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) err
id, _ := obj.ID()
err := s.metaBase.Lock(ctx, cnr, id, locked)
if err != nil {
return fmt.Errorf("could not lock objects: %w", err)
return fmt.Errorf("lock objects: %w", err)
}
return nil
}
@ -337,7 +337,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
tombstone := objectSDK.NewTombstone()
if err := tombstone.Unmarshal(obj.Payload()); err != nil {
return fmt.Errorf("could not unmarshal tombstone content: %w", err)
return fmt.Errorf("unmarshal tombstone content: %w", err)
}
tombAddr := object.AddressOf(obj)
@ -358,7 +358,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object
_, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
return fmt.Errorf("could not inhume objects: %w", err)
return fmt.Errorf("inhume objects: %w", err)
}
return nil
}

View file

@ -112,7 +112,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
if err != nil {
s.log.Debug(ctx, logs.StorageIDRetrievalFailure,
zap.Stringer("object", addr),
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return err
}
@ -132,7 +132,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
if err != nil && !client.IsErrObjectNotFound(err) {
s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor,
zap.Stringer("object_address", addr),
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return err
}

View file

@ -205,7 +205,7 @@ func (gc *gc) handleEvent(ctx context.Context, event Event) {
})
if err != nil {
gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool,
zap.String("error", err.Error()),
zap.Error(err),
)
v.prevGroup.Done()
@ -313,7 +313,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
err := s.metaBase.IterateOverGarbage(ctx, iterPrm)
if err != nil {
s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed,
zap.String("error", err.Error()),
zap.Error(err),
)
return
@ -334,7 +334,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
if err != nil {
s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects,
zap.String("error", err.Error()),
zap.Error(err),
)
result.success = false
}
@ -396,7 +396,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) {
})
if err = errGroup.Wait(); err != nil {
s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error()))
s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.Error(err))
}
}
@ -429,7 +429,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address)
res, err := s.metaBase.Inhume(ctx, inhumePrm)
if err != nil {
s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects,
zap.String("error", err.Error()),
zap.Error(err),
)
return
@ -584,7 +584,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) {
})
if err = errGroup.Wait(); err != nil {
s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error()))
s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.Error(err))
}
}
@ -637,7 +637,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston
res, err := s.metaBase.InhumeTombstones(ctx, tss)
if err != nil {
s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage,
zap.String("error", err.Error()),
zap.Error(err),
)
return
@ -665,7 +665,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
unlocked, err := s.metaBase.FreeLockedBy(lockers)
if err != nil {
s.log.Warn(ctx, logs.ShardFailureToUnlockObjects,
zap.String("error", err.Error()),
zap.Error(err),
)
return
@ -678,7 +678,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []
res, err := s.metaBase.Inhume(ctx, pInhume)
if err != nil {
s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage,
zap.String("error", err.Error()),
zap.Error(err),
)
return
@ -722,7 +722,7 @@ func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) {
_, err := s.metaBase.FreeLockedBy(lockers)
if err != nil {
s.log.Warn(ctx, logs.ShardFailureToUnlockObjects,
zap.String("error", err.Error()),
zap.Error(err),
)
return

View file

@ -175,7 +175,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta
mExRes, err := s.metaBase.StorageID(ctx, mPrm)
if err != nil {
return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err)
return nil, true, fmt.Errorf("fetch blobovnicza id from metabase: %w", err)
}
storageID := mExRes.StorageID()

View file

@ -36,7 +36,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) {
modeDegraded := s.GetMode().NoMetabase()
if !modeDegraded {
if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil {
err = fmt.Errorf("failed to read shard id from metabase: %w", err)
err = fmt.Errorf("read shard id from metabase: %w", err)
}
}
@ -64,7 +64,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) {
if len(idFromMetabase) == 0 && !modeDegraded {
if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil {
err = errors.Join(err, fmt.Errorf("failed to write shard id to metabase: %w", setErr))
err = errors.Join(err, fmt.Errorf("write shard id to metabase: %w", setErr))
}
}
return

View file

@ -110,7 +110,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
}
s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase,
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
)

View file

@ -109,7 +109,7 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
lst, err := s.metaBase.Containers(ctx)
if err != nil {
return res, fmt.Errorf("can't list stored containers: %w", err)
return res, fmt.Errorf("list stored containers: %w", err)
}
filters := objectSDK.NewSearchFilters()
@ -124,7 +124,7 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) {
if err != nil {
s.log.Debug(ctx, logs.ShardCantSelectAllObjects,
zap.Stringer("cid", lst[i]),
zap.String("error", err.Error()),
zap.Error(err),
zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
continue
@ -149,7 +149,7 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo
containers, err := s.metaBase.Containers(ctx)
if err != nil {
return ListContainersRes{}, fmt.Errorf("could not get list of containers: %w", err)
return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err)
}
return ListContainersRes{
@ -180,7 +180,7 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List
metaPrm.SetCursor(prm.cursor)
res, err := s.metaBase.ListWithCursor(ctx, metaPrm)
if err != nil {
return ListWithCursorRes{}, fmt.Errorf("could not get list of objects: %w", err)
return ListWithCursorRes{}, fmt.Errorf("get list of objects: %w", err)
}
return ListWithCursorRes{
@ -208,7 +208,7 @@ func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContai
metaPrm.Handler = prm.Handler
err := s.metaBase.IterateOverContainers(ctx, metaPrm)
if err != nil {
return fmt.Errorf("could not iterate over containers: %w", err)
return fmt.Errorf("iterate over containers: %w", err)
}
return nil
@ -235,7 +235,7 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv
metaPrm.Handler = prm.Handler
err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
if err != nil {
return fmt.Errorf("could not iterate over objects: %w", err)
return fmt.Errorf("iterate over objects: %w", err)
}
return nil
@ -258,7 +258,7 @@ func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAlive
metaPrm.ContainerID = prm.ContainerID
count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm)
if err != nil {
return 0, fmt.Errorf("could not count alive objects in bucket: %w", err)
return 0, fmt.Errorf("count alive objects in bucket: %w", err)
}
return count, nil

View file

@ -76,12 +76,12 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
if err != nil || !tryCache {
if err != nil {
s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor,
zap.String("err", err.Error()))
zap.Error(err))
}
res, err = s.blobStor.Put(ctx, putPrm)
if err != nil {
return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err)
return PutRes{}, fmt.Errorf("put object to BLOB storage: %w", err)
}
}
@ -94,7 +94,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
if err != nil {
// may we need to handle this case in a special way
// since the object has been successfully written to BlobStor
return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err)
return PutRes{}, fmt.Errorf("put object to metabase: %w", err)
}
if res.Inserted {

View file

@ -67,7 +67,7 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
mRes, err := s.metaBase.Select(ctx, selectPrm)
if err != nil {
return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err)
return SelectRes{}, fmt.Errorf("select objects from metabase: %w", err)
}
return SelectRes{

View file

@ -30,7 +30,7 @@ func IterateDB(db *bbolt.DB, f func(oid.Address) error) error {
return b.ForEach(func(k, _ []byte) error {
err := addr.DecodeString(string(k))
if err != nil {
return fmt.Errorf("could not parse object address: %w", err)
return fmt.Errorf("parse object address: %w", err)
}
return f(addr)

View file

@ -83,7 +83,7 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
}
if !shrink {
if err := c.fsTree.Close(ctx); err != nil {
return fmt.Errorf("can't close write-cache storage: %w", err)
return fmt.Errorf("close write-cache storage: %w", err)
}
return nil
}
@ -98,16 +98,16 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
if errors.Is(err, errIterationCompleted) {
empty = false
} else {
return fmt.Errorf("failed to check write-cache items: %w", err)
return fmt.Errorf("check write-cache items: %w", err)
}
}
if err := c.fsTree.Close(ctx); err != nil {
return fmt.Errorf("can't close write-cache storage: %w", err)
return fmt.Errorf("close write-cache storage: %w", err)
}
if empty {
err := os.RemoveAll(c.path)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove write-cache files: %w", err)
return fmt.Errorf("remove write-cache files: %w", err)
}
} else {
c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty)

View file

@ -31,10 +31,10 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
fstree.WithFileCounter(c.counter),
)
if err := c.fsTree.Open(mod); err != nil {
return fmt.Errorf("could not open FSTree: %w", err)
return fmt.Errorf("open FSTree: %w", err)
}
if err := c.fsTree.Init(); err != nil {
return fmt.Errorf("could not init FSTree: %w", err)
return fmt.Errorf("init FSTree: %w", err)
}
return nil

View file

@ -25,11 +25,11 @@ func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
return nil
}
if err != nil {
return fmt.Errorf("could not check write-cache database existence: %w", err)
return fmt.Errorf("check write-cache database existence: %w", err)
}
db, err := OpenDB(c.path, true, os.OpenFile)
if err != nil {
return fmt.Errorf("could not open write-cache database: %w", err)
return fmt.Errorf("open write-cache database: %w", err)
}
defer func() {
_ = db.Close()

View file

@ -29,7 +29,7 @@ func (c *Client) BalanceOf(id user.ID) (*big.Int, error) {
amount, err := client.BigIntFromStackItem(prms[0])
if err != nil {
return nil, fmt.Errorf("could not get integer stack item from stack item (%s): %w", balanceOfMethod, err)
return nil, fmt.Errorf("get integer stack item from stack item (%s): %w", balanceOfMethod, err)
}
return amount, nil
}

View file

@ -21,7 +21,7 @@ func (c *Client) Decimals() (uint32, error) {
decimals, err := client.IntFromStackItem(prms[0])
if err != nil {
return 0, fmt.Errorf("could not get integer stack item from stack item (%s): %w", decimalsMethod, err)
return 0, fmt.Errorf("get integer stack item from stack item (%s): %w", decimalsMethod, err)
}
return uint32(decimals), nil
}

View file

@ -39,7 +39,7 @@ func (c *Client) TransferX(ctx context.Context, p TransferPrm) error {
_, err = c.client.Invoke(ctx, prm)
if err != nil {
return fmt.Errorf("could not invoke method (%s): %w", transferXMethod, err)
return fmt.Errorf("invoke method (%s): %w", transferXMethod, err)
}
return nil
}

View file

@ -196,7 +196,7 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F
txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...)
if err != nil {
return InvokeRes{}, fmt.Errorf("could not invoke %s: %w", method, err)
return InvokeRes{}, fmt.Errorf("invoke %s: %w", method, err)
}
c.logger.Debug(ctx, logs.ClientNeoClientInvoke,
@ -210,7 +210,7 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F
// TestInvokeIterator invokes contract method returning an iterator and executes cb on each element.
// If cb returns an error, the session is closed and this error is returned as-is.
// If the remove neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
// If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned.
// batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created.
// The default batchSize is 100, the default limit from neo-go.
func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error {
@ -390,7 +390,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
height, err = c.rpcActor.GetBlockCount()
if err != nil {
c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight,
zap.String("error", err.Error()))
zap.Error(err))
return nil
}
@ -404,7 +404,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error {
newHeight, err = c.rpcActor.GetBlockCount()
if err != nil {
c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243,
zap.String("error", err.Error()))
zap.Error(err))
return nil
}
@ -509,7 +509,7 @@ func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) {
list, err := c.roleList(noderoles.NeoFSAlphabet)
if err != nil {
return nil, fmt.Errorf("can't get alphabet nodes role list: %w", err)
return nil, fmt.Errorf("get alphabet nodes role list: %w", err)
}
return list, nil
@ -523,7 +523,7 @@ func (c *Client) GetDesignateHash() util.Uint160 {
func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) {
height, err := c.rpcActor.GetBlockCount()
if err != nil {
return nil, fmt.Errorf("can't get chain height: %w", err)
return nil, fmt.Errorf("get chain height: %w", err)
}
return c.rolemgmt.GetDesignatedByRole(r, height)

View file

@ -2,9 +2,7 @@ package container
import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap"
@ -16,27 +14,36 @@ import (
//
// If remote RPC does not support neo-go session API, fallback to List() method.
func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) {
var rawID []byte
var cidList []cid.ID
var err error
cb := func(id cid.ID) error {
cidList = append(cidList, id)
return nil
}
if err = c.IterateContainersOf(idUser, cb); err != nil {
return nil, err
}
return cidList, nil
}
// iterateContainers iterates over a list of container identifiers
// belonging to the specified user of FrostFS system and executes
// `cb` on each element. If idUser is nil, calls it on the list of all containers.
func (c *Client) IterateContainersOf(idUser *user.ID, cb func(item cid.ID) error) error {
var rawID []byte
if idUser != nil {
rawID = idUser.WalletBytes()
}
var cidList []cid.ID
cb := func(item stackitem.Item) error {
rawID, err := client.BytesFromStackItem(item)
itemCb := func(item stackitem.Item) error {
id, err := getCIDfromStackItem(item)
if err != nil {
return fmt.Errorf("could not get byte array from stack item (%s): %w", containersOfMethod, err)
return err
}
var id cid.ID
err = id.Decode(rawID)
if err != nil {
return fmt.Errorf("decode container ID: %w", err)
if err = cb(id); err != nil {
return err
}
cidList = append(cidList, id)
return nil
}
@ -50,13 +57,10 @@ func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) {
const batchSize = 512
cnrHash := c.client.ContractAddress()
err := c.client.Morph().TestInvokeIterator(cb, batchSize, cnrHash, containersOfMethod, rawID)
if err != nil {
if errors.Is(err, unwrap.ErrNoSessionID) {
return c.list(idUser)
}
return nil, err
err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID)
if err != nil && errors.Is(err, unwrap.ErrNoSessionID) {
return c.iterate(idUser, cb)
}
return cidList, nil
return err
}

View file

@ -78,7 +78,7 @@ func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) {
res, err := c.client.Invoke(ctx, prm)
if err != nil {
return 0, fmt.Errorf("could not invoke method (%s): %w", deleteMethod, err)
return 0, fmt.Errorf("invoke method (%s): %w", deleteMethod, err)
}
return res.VUB, nil
}

Some files were not shown because too many files have changed in this diff Show more