From b807d8c40066a90accf7dbdb5d8f31f6069f551c Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 18 Sep 2024 12:20:55 +0300 Subject: [PATCH 001/591] [#1382] go.mod: Upgrade sdk-go and api-go versions Signed-off-by: Dmitrii Stepanov --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 621d2e85d..78dce0131 100644 --- a/go.mod +++ b/go.mod @@ -4,12 +4,12 @@ go 1.22 require ( code.gitea.io/sdk/gitea v0.17.1 - git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb + git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 git.frostfs.info/TrueCloudLab/tzhash v1.8.0 diff --git a/go.sum b/go.sum index 4d21d9bca..dd0e31088 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= -git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb h1:p9ByDsw+H6p6LyYSx8LKFtAG/oPKQpDVMNfjPqdevTw= -git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI= +git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e h1:740ABnOBYx4o6jxULHdSSnVW2fYIO35ohg+Uz59sxd0= +git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU= git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4= git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= @@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad h1:cJGK/bXSF/0KMr6zkIy06qekQhQRU7eYzh+lWdQ0U8w= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad/go.mod h1:I4GzeEnftO++N2JHQn9QR88eV0rjQkGqlUCNMnCz1CY= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69 h1:DJExzndXf6hztcQ8zHlBOJV/+FA6k2FpRGUcTDWqq2M= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69/go.mod h1:GeNpo12HcEW4J412sH5yf8xFYapxlrt5fcYzRwg0Ino= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM= From 1e7f9909dade3ed905c07930c9a9f1bd9a8323b4 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 18 Sep 2024 12:21:53 +0300 Subject: [PATCH 002/591] [#1382] policer: Replace deprecated methods Signed-off-by: Dmitrii Stepanov --- pkg/services/policer/check.go | 2 +- pkg/services/policer/ec.go | 2 +- pkg/services/policer/policer_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index bf67ec4d4..06282bd8d 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -110,7 +110,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe requirements.needLocalCopy = true shortage-- - } else if nodes[i].IsMaintenance() { + } else if nodes[i].Status().IsMaintenance() { shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies) } else { if status := checkedNodes.processStatus(nodes[i]); status.Processed() { diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go index 61a65fc21..e822d1c09 100644 --- a/pkg/services/policer/ec.go +++ b/pkg/services/policer/ec.go @@ -106,7 +106,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n validPlacement: true, } } - if requiredNode.IsMaintenance() { + if requiredNode.Status().IsMaintenance() { // consider maintenance mode has object, but do not drop local copy p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode))) return ecChunkProcessResult{} diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go index e353ea428..4e17e98a8 100644 --- a/pkg/services/policer/policer_test.go +++ b/pkg/services/policer/policer_test.go @@ -174,7 +174,7 @@ func TestProcessObject(t *testing.T) { nodes[i].SetPublicKey([]byte{byte(i)}) } for _, i := range ti.maintenanceNodes { - nodes[i].SetMaintenance() + nodes[i].SetStatus(netmap.Maintenance) } var policy netmap.PlacementPolicy From e5c8f7ff9f49b9e8d0f0a7ac4290aadeba356d6c Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 18 Sep 2024 12:22:38 +0300 Subject: [PATCH 003/591] [#1382] controlSvc: Replace deprecated methods Signed-off-by: Dmitrii Stepanov --- pkg/services/control/ir/server/calls.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go index 2447a8a74..642932c91 100644 --- a/pkg/services/control/ir/server/calls.go +++ b/pkg/services/control/ir/server/calls.go @@ -91,7 +91,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) ( if len(nodeInfo.PublicKey()) == 0 { return nil, status.Error(codes.NotFound, "no such node") } - if nodeInfo.IsOffline() { + if nodeInfo.Status().IsOffline() { return nil, status.Error(codes.FailedPrecondition, "node is already offline") } From d4be2f20d4a240dd5a46f09b07b432667ca52f24 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 18 Sep 2024 12:23:46 +0300 Subject: [PATCH 004/591] [#1382] morph: Replace deprecated methods Signed-off-by: Dmitrii Stepanov --- pkg/morph/client/netmap/netmap.go | 6 +++--- pkg/morph/client/netmap/netmap_test.go | 14 +++++++------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go index 61bbf5f17..f7b5c3ba4 100644 --- a/pkg/morph/client/netmap/netmap.go +++ b/pkg/morph/client/netmap/netmap.go @@ -136,11 +136,11 @@ func decodeNodeInfo(dst *netmap.NodeInfo, itemNode stackitem.Item) error { default: return fmt.Errorf("unsupported state %v", node.State) case netmapcontract.NodeStateOnline: - dst.SetOnline() + dst.SetStatus(netmap.Online) case netmapcontract.NodeStateOffline: - dst.SetOffline() + dst.SetStatus(netmap.Offline) case netmapcontract.NodeStateMaintenance: - dst.SetMaintenance() + dst.SetStatus(netmap.Maintenance) } return nil diff --git a/pkg/morph/client/netmap/netmap_test.go b/pkg/morph/client/netmap/netmap_test.go index a8a306197..e686e271e 100644 --- a/pkg/morph/client/netmap/netmap_test.go +++ b/pkg/morph/client/netmap/netmap_test.go @@ -20,11 +20,11 @@ func Test_stackItemsToNodeInfos(t *testing.T) { switch i % 3 { default: - expected[i].SetOffline() + expected[i].SetStatus(netmap.Offline) case int(netmapcontract.NodeStateOnline): - expected[i].SetOnline() + expected[i].SetStatus(netmap.Online) case int(netmapcontract.NodeStateMaintenance): - expected[i].SetMaintenance() + expected[i].SetStatus(netmap.Maintenance) } expected[i].SetPublicKey(pub) @@ -38,12 +38,12 @@ func Test_stackItemsToNodeInfos(t *testing.T) { var state int64 - switch { - case expected[i].IsOnline(): + switch expected[i].Status() { + case netmap.Online: state = int64(netmapcontract.NodeStateOnline) - case expected[i].IsOffline(): + case netmap.Offline: state = int64(netmapcontract.NodeStateOffline) - case expected[i].IsMaintenance(): + case netmap.Maintenance: state = int64(netmapcontract.NodeStateMaintenance) } From a603d14d080e2485fdedee4b92306b1ce4aee2b0 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 18 Sep 2024 12:24:53 +0300 Subject: [PATCH 005/591] [#1382] ir: Replace deprecated methods Signed-off-by: Dmitrii Stepanov --- pkg/innerring/processors/netmap/cleanup_table.go | 2 +- pkg/innerring/processors/netmap/cleanup_table_test.go | 2 +- pkg/innerring/processors/netmap/handlers_test.go | 2 +- .../processors/netmap/nodevalidation/state/validator.go | 4 ++-- .../netmap/nodevalidation/state/validator_test.go | 8 ++++---- pkg/innerring/processors/netmap/process_peers.go | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/innerring/processors/netmap/cleanup_table.go b/pkg/innerring/processors/netmap/cleanup_table.go index c18611569..abd5b089a 100644 --- a/pkg/innerring/processors/netmap/cleanup_table.go +++ b/pkg/innerring/processors/netmap/cleanup_table.go @@ -60,7 +60,7 @@ func (c *cleanupTable) update(snapshot netmap.NetMap, now uint64) { } access.binNodeInfo = binNodeInfo - access.maintenance = nmNodes[i].IsMaintenance() + access.maintenance = nmNodes[i].Status().IsMaintenance() newMap[keyString] = access } diff --git a/pkg/innerring/processors/netmap/cleanup_table_test.go b/pkg/innerring/processors/netmap/cleanup_table_test.go index ae5620733..208bd5496 100644 --- a/pkg/innerring/processors/netmap/cleanup_table_test.go +++ b/pkg/innerring/processors/netmap/cleanup_table_test.go @@ -127,7 +127,7 @@ func TestCleanupTable(t *testing.T) { t.Run("skip maintenance nodes", func(t *testing.T) { cnt := 0 - infos[1].SetMaintenance() + infos[1].SetStatus(netmap.Maintenance) key := netmap.StringifyPublicKey(infos[1]) c.update(networkMap, 5) diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go index b34abb78c..8875880bf 100644 --- a/pkg/innerring/processors/netmap/handlers_test.go +++ b/pkg/innerring/processors/netmap/handlers_test.go @@ -146,7 +146,7 @@ func TestAddPeer(t *testing.T) { require.Nil(t, nc.notaryInvokes, "invalid notary invokes") - node.SetOnline() + node.SetStatus(netmap.Online) ev = netmapEvent.AddPeer{ NodeBytes: node.Marshal(), Request: &payload.P2PNotaryRequest{ diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go index 4094e50a5..e5165f618 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go +++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go @@ -56,11 +56,11 @@ func (x *NetMapCandidateValidator) SetNetworkSettings(netSettings NetworkSetting // // See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods. func (x *NetMapCandidateValidator) VerifyAndUpdate(node *netmap.NodeInfo) error { - if node.IsOnline() { + if node.Status().IsOnline() { return nil } - if node.IsMaintenance() { + if node.Status().IsMaintenance() { return x.netSettings.MaintenanceModeAllowed() } diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go index a557628f0..b81d7243b 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go +++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go @@ -41,22 +41,22 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { }, { name: "ONLINE", - preparer: (*netmap.NodeInfo).SetOnline, + preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Online) }, valid: true, }, { name: "OFFLINE", - preparer: (*netmap.NodeInfo).SetOffline, + preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Offline) }, valid: false, }, { name: "MAINTENANCE/allowed", - preparer: (*netmap.NodeInfo).SetMaintenance, + preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) }, valid: true, }, { name: "MAINTENANCE/disallowed", - preparer: (*netmap.NodeInfo).SetMaintenance, + preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) }, valid: false, validatorPreparer: func(v *state.NetMapCandidateValidator) { var s testNetworkSettings diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go index 9e6e8c283..c8c7928a3 100644 --- a/pkg/innerring/processors/netmap/process_peers.go +++ b/pkg/innerring/processors/netmap/process_peers.go @@ -62,7 +62,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { // But there is no guarantee that code will be executed in the same order. // That is why we need to perform `addPeerIR` only in case when node is online, // because in scope of this method, contract set state `ONLINE` for the node. - if updated && nodeInfo.IsOnline() { + if updated && nodeInfo.Status().IsOnline() { np.log.Info(logs.NetmapApprovingNetworkMapCandidate, zap.String("key", keyString)) From ac1eee091dfbb9193c407ac237cd53a26f4f83d9 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 18 Sep 2024 12:27:10 +0300 Subject: [PATCH 006/591] [#1382] node: Replace deprecated methods Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 8 ++++++-- cmd/frostfs-node/netmap.go | 26 +++++++++++++++----------- cmd/internal/common/netmap.go | 8 ++++---- 3 files changed, 25 insertions(+), 17 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index ed3a65c25..63f410b89 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1182,7 +1182,9 @@ func (c *cfg) bootstrapWithState(stateSetter func(*netmap.NodeInfo)) error { // bootstrapOnline calls cfg.bootstrapWithState with "online" state. func bootstrapOnline(c *cfg) error { - return c.bootstrapWithState((*netmap.NodeInfo).SetOnline) + return c.bootstrapWithState(func(ni *netmap.NodeInfo) { + ni.SetStatus(netmap.Online) + }) } // bootstrap calls bootstrapWithState with: @@ -1193,7 +1195,9 @@ func (c *cfg) bootstrap() error { st := c.cfgNetmap.state.controlNetmapStatus() if st == control.NetmapStatus_MAINTENANCE { c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState) - return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance) + return c.bootstrapWithState(func(ni *netmap.NodeInfo) { + ni.SetStatus(netmap.Maintenance) + }) } c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState, diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 8104b1dc1..c0b87492c 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -61,13 +61,15 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) { if ni != nil { s.nodeInfo.Store(*ni) - switch { - case ni.IsOnline(): + switch ni.Status() { + case netmapSDK.Online: ctrlNetSt = control.NetmapStatus_ONLINE - case ni.IsOffline(): + case netmapSDK.Offline: ctrlNetSt = control.NetmapStatus_OFFLINE - case ni.IsMaintenance(): + case netmapSDK.Maintenance: ctrlNetSt = control.NetmapStatus_MAINTENANCE + case netmapSDK.UnspecifiedState: + ctrlNetSt = control.NetmapStatus_STATUS_UNDEFINED } } else { ctrlNetSt = control.NetmapStatus_OFFLINE @@ -78,7 +80,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) { // nil ni means that the node is not included // in the netmap - niOld.SetOffline() + niOld.SetStatus(netmapSDK.Offline) s.nodeInfo.Store(niOld) } @@ -139,7 +141,7 @@ func initNetmapService(ctx context.Context, c *cfg) { network.WriteToNodeInfo(c.localAddr, &c.cfgNodeInfo.localInfo) c.cfgNodeInfo.localInfo.SetPublicKey(c.key.PublicKey().Bytes()) parseAttributes(c) - c.cfgNodeInfo.localInfo.SetOffline() + c.cfgNodeInfo.localInfo.SetStatus(netmapSDK.Offline) if c.cfgMorph.client == nil { initMorphComponents(ctx, c) @@ -252,7 +254,7 @@ func initNetmapState(c *cfg) { zap.String("state", stateWord), ) - if ni != nil && ni.IsMaintenance() { + if ni != nil && ni.Status().IsMaintenance() { c.isMaintenance.Store(true) } @@ -263,13 +265,15 @@ func initNetmapState(c *cfg) { func nodeState(ni *netmapSDK.NodeInfo) string { if ni != nil { - switch { - case ni.IsOnline(): + switch ni.Status() { + case netmapSDK.Online: return "online" - case ni.IsOffline(): + case netmapSDK.Offline: return "offline" - case ni.IsMaintenance(): + case netmapSDK.Maintenance: return "maintenance" + case netmapSDK.UnspecifiedState: + return "undefined" } } return "undefined" diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go index 79b03a726..f550552d2 100644 --- a/cmd/internal/common/netmap.go +++ b/cmd/internal/common/netmap.go @@ -14,14 +14,14 @@ func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo, ) { var strState string - switch { + switch node.Status() { default: strState = "STATE_UNSUPPORTED" - case node.IsOnline(): + case netmap.Online: strState = "ONLINE" - case node.IsOffline(): + case netmap.Offline: strState = "OFFLINE" - case node.IsMaintenance(): + case netmap.Maintenance: strState = "MAINTENANCE" } From 3441fff05dd61647e7bd069db34f320e7e9efe9a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 18 Sep 2024 12:27:39 +0300 Subject: [PATCH 007/591] [#1382] cli: Replace deprecated methods Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-cli/modules/netmap/nodeinfo.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go index b6ec48f35..ae4bb329a 100644 --- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go +++ b/cmd/frostfs-cli/modules/netmap/nodeinfo.go @@ -49,14 +49,14 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) { cmd.Println("key:", hex.EncodeToString(i.PublicKey())) var stateWord string - switch { + switch i.Status() { default: stateWord = "" - case i.IsOnline(): + case netmap.Online: stateWord = "online" - case i.IsOffline(): + case netmap.Offline: stateWord = "offline" - case i.IsMaintenance(): + case netmap.Maintenance: stateWord = "maintenance" } From 61d5e140e051f92222fa9152d6bd807d505ca1e8 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Wed, 18 Sep 2024 12:13:15 +0300 Subject: [PATCH 008/591] [#1383] object: Add restrictions for `Patch` method * `Patch` can't be applied for non-regular type object (tombstones, locks etc.) * Complex object parts can't be patched. So, if an object has EC/Split header, it won't be patched. Signed-off-by: Airat Arifullin --- pkg/services/object/patch/streamer.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go index 85c28cda0..73def8c7c 100644 --- a/pkg/services/object/patch/streamer.go +++ b/pkg/services/object/patch/streamer.go @@ -57,12 +57,31 @@ func toFullObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart { return hs } +func isLinkObject(hdr *objectV2.HeaderWithSignature) bool { + split := hdr.GetHeader().GetSplit() + return len(split.GetChildren()) > 0 && split.GetParent() != nil +} + +func isComplexObjectPart(hdr *objectV2.HeaderWithSignature) bool { + return hdr.GetHeader().GetEC() != nil || hdr.GetHeader().GetSplit() != nil +} + func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error { hdrWithSig, addr, err := s.readHeader(ctx, req) if err != nil { return err } + if hdrWithSig.GetHeader().GetObjectType() != objectV2.TypeRegular { + return errors.New("non-regular object can't be patched") + } + if isLinkObject(hdrWithSig) { + return errors.New("linking object can't be patched") + } + if isComplexObjectPart(hdrWithSig) { + return errors.New("complex object parts can't be patched") + } + commonPrm, err := util.CommonPrmFromV2(req) if err != nil { return err From 945b7c740b0deb4a2f16bb85f20efd8820762f53 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Wed, 18 Sep 2024 18:14:54 +0300 Subject: [PATCH 009/591] [#1372] adm/morph: Add delta flag to 'force-new-epoch' Signed-off-by: Alexander Chuprov --- cmd/frostfs-adm/internal/commonflags/flags.go | 1 + cmd/frostfs-adm/internal/modules/morph/helper/netmap.go | 8 ++++++-- cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go | 3 ++- cmd/frostfs-adm/internal/modules/morph/netmap/root.go | 2 ++ cmd/frostfs-adm/internal/modules/morph/node/remove.go | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go index 81395edb0..b51d2e115 100644 --- a/cmd/frostfs-adm/internal/commonflags/flags.go +++ b/cmd/frostfs-adm/internal/commonflags/flags.go @@ -39,4 +39,5 @@ const ( CustomZoneFlag = "domain" AlphabetSizeFlag = "size" AllFlag = "all" + DeltaFlag = "delta" ) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go index 7a778f8c3..fb8f03783 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go @@ -72,13 +72,17 @@ func InvalidConfigValueErr(key string) error { return fmt.Errorf("invalid %s config value from netmap contract", key) } -func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160) error { +func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160, countEpoch int64) error { + if countEpoch <= 0 { + return errors.New("number of epochs cannot be less than 1") + } + curr, err := unwrap.Int64(wCtx.ReadOnlyInvoker.Call(nmHash, "epoch")) if err != nil { return errors.New("can't fetch current epoch from the netmap contract") } - newEpoch := curr + 1 + newEpoch := curr + countEpoch wCtx.Command.Printf("Current epoch: %d, increase to %d.\n", curr, newEpoch) // In NeoFS this is done via Notary contract. Here, however, we can form the diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go index df9a03fd1..5e4e9c725 100644 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go +++ b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go @@ -4,6 +4,7 @@ import ( "fmt" "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "github.com/nspcc-dev/neo-go/pkg/io" @@ -30,7 +31,7 @@ func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error { } bw := io.NewBufBinWriter() - if err := helper.EmitNewEpochCall(bw, wCtx, nmHash); err != nil { + if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, viper.GetInt64(commonflags.DeltaFlag)); err != nil { return err } diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go index 31fda860e..0288bcdc5 100644 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go @@ -22,6 +22,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + _ = viper.BindPFlag(commonflags.DeltaFlag, cmd.Flags().Lookup(commonflags.DeltaFlag)) }, RunE: ForceNewEpochCmd, } @@ -35,6 +36,7 @@ func initForceNewEpochCmd() { ForceNewEpoch.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) ForceNewEpoch.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) ForceNewEpoch.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file") + ForceNewEpoch.Flags().Int64(commonflags.DeltaFlag, 1, "Number of epochs to increase the current epoch") } func init() { diff --git a/cmd/frostfs-adm/internal/modules/morph/node/remove.go b/cmd/frostfs-adm/internal/modules/morph/node/remove.go index 0a19102ba..e47451e0c 100644 --- a/cmd/frostfs-adm/internal/modules/morph/node/remove.go +++ b/cmd/frostfs-adm/internal/modules/morph/node/remove.go @@ -53,7 +53,7 @@ func RemoveNodesCmd(cmd *cobra.Command, args []string) error { int64(netmapcontract.NodeStateOffline), nodeKeys[i].Bytes()) } - if err := helper.EmitNewEpochCall(bw, wCtx, nmHash); err != nil { + if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, 1); err != nil { return err } From 1361db91ee37d3da938dc5146cc3f15f9ee33517 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 16 Sep 2024 11:09:51 +0300 Subject: [PATCH 010/591] [#1301] adm/morph: Add flag -v to 'Tokens' Signed-off-by: Alexander Chuprov --- .../internal/modules/morph/nns/tokens.go | 44 ++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go index 6e8ffb40a..3c7136e9d 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go @@ -1,15 +1,25 @@ package nns import ( + "math/big" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" + client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "github.com/spf13/cobra" ) +const ( + verboseDesc = "Include additional information about CNAME record." +) + func initTokensCmd() { Cmd.AddCommand(tokensCmd) tokensCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) tokensCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + tokensCmd.Flags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand, false, verboseDesc) } func listTokens(cmd *cobra.Command, _ []string) { @@ -18,7 +28,39 @@ func listTokens(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "unable to get tokens: %w", err) for toks, err := it.Next(10); err == nil && len(toks) > 0; toks, err = it.Next(10) { for _, token := range toks { - cmd.Println(string(token)) + output := string(token) + if verbose, _ := cmd.Flags().GetBool(commonflags.Verbose); verbose { + cname, err := getCnameRecord(c, token) + commonCmd.ExitOnErr(cmd, "", err) + if cname != "" { + output += " (CNAME: " + cname + ")" + } + } + cmd.Println(output) } } } + +func getCnameRecord(c *client.Contract, token []byte) (string, error) { + items, err := c.GetRecords(string(token), big.NewInt(int64(nns.CNAME))) + + // GetRecords returns the error "not an array" if the domain does not contain records. + if err != nil && strings.Contains(err.Error(), "not an array") { + return "", nil + } + + if err != nil { + return "", err + } + + if len(items) == 0 { + return "", nil + } + + record, err := items[0].TryBytes() + if err != nil { + return "", err + } + + return string(record), nil +} From 5a53f9c4fd52243dd36c69e62d79f344342d4349 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Thu, 19 Sep 2024 14:19:16 +0300 Subject: [PATCH 011/591] [#1301] go.mod: Bump frostfs-contract Signed-off-by: Alexander Chuprov --- go.mod | 6 +++--- go.sum | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 78dce0131..502761866 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22 require ( code.gitea.io/sdk/gitea v0.17.1 git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e - git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e + git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 @@ -28,7 +28,7 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/mr-tron/base58 v1.2.0 github.com/multiformats/go-multiaddr v0.12.1 - github.com/nspcc-dev/neo-go v0.106.2 + github.com/nspcc-dev/neo-go v0.106.3 github.com/olekukonko/tablewriter v0.0.5 github.com/panjf2000/ants/v2 v2.9.0 github.com/prometheus/client_golang v1.19.0 @@ -100,7 +100,7 @@ require ( github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect - github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect + github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec // indirect github.com/nspcc-dev/rfc6979 v0.2.1 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect diff --git a/go.sum b/go.sum index dd0e31088..85d9df443 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e h1:740ABnOBYx4o6jxULHdSSnVW2fYIO35ohg+Uz59sxd0= git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c h1:9JmXxWcqWaOXModBwtj/WlPYOfSk2jRSDoRsDRYvgnA= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c/go.mod h1:DVb1AqP323wiEwCtlfoJJTERF4Xji2L39X6T8kv26RE= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= @@ -188,8 +188,8 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk= github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2/go.mod h1:U5VfmPNM88P4RORFb6KSUVBdJBDhlqggJZYGXGPxOcc= -github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d h1:Vcb7YkZuUSSIC+WF/xV3UDfHbAxZgyT2zGleJP3Ig5k= -github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY= +github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec h1:vDrbVXF2+2uP0RlkZmem3QYATcXCu9BzzGGCNsNcK7Q= +github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY= github.com/nspcc-dev/rfc6979 v0.2.1 h1:8wWxkamHWFmO790GsewSoKUSJjVnL1fmdRpokU/RgRM= github.com/nspcc-dev/rfc6979 v0.2.1/go.mod h1:Tk7h5kyUWkhjyO3zUgFFhy1v2vQv3BvQEntakdtqrWc= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= From 53a90634fc0a55be636a220b461be731f8a91b3f Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Thu, 19 Sep 2024 14:19:41 +0300 Subject: [PATCH 012/591] [#1301] adm/morph: Add 'delete' domains Signed-off-by: Alexander Chuprov --- .../morph/nns/{register.go => domains.go} | 20 +++++++++++++++++++ .../internal/modules/morph/nns/root.go | 10 ++++++++++ 2 files changed, 30 insertions(+) rename cmd/frostfs-adm/internal/modules/morph/nns/{register.go => domains.go} (73%) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/register.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go similarity index 73% rename from cmd/frostfs-adm/internal/modules/morph/nns/register.go rename to cmd/frostfs-adm/internal/modules/morph/nns/domains.go index d05d9f171..3684db94a 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/register.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go @@ -42,3 +42,23 @@ func registerDomain(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "register domain error: %w", err) cmd.Println("Domain registered successfully") } + +func initDeleteCmd() { + Cmd.AddCommand(deleteCmd) + deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) + + _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag) +} + +func deleteDomain(cmd *cobra.Command, _ []string) { + c, actor, _ := getRPCClient(cmd) + + name, _ := cmd.Flags().GetString(nnsNameFlag) + h, vub, err := c.DeleteDomain(name) + + _, err = actor.Wait(h, vub, err) + commonCmd.ExitOnErr(cmd, "delete domain error: %w", err) + cmd.Println("Domain deleted successfully") +} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go index e528e4b7b..56774c292 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go @@ -42,6 +42,15 @@ var ( }, Run: registerDomain, } + deleteCmd = &cobra.Command{ + Use: "delete", + Short: "Delete a domain by name", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + }, + Run: deleteDomain, + } renewCmd = &cobra.Command{ Use: "renew", Short: "Increases domain expiration date", @@ -91,6 +100,7 @@ var ( func init() { initTokensCmd() initRegisterCmd() + initDeleteCmd() initRenewCmd() initUpdateCmd() initAddRecordCmd() From c290d079fd71ddc851cce8d06f496d27ceedc168 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Fri, 20 Sep 2024 10:53:02 +0300 Subject: [PATCH 013/591] [#1312] go.mod: Update sdk-go Signed-off-by: Aleksey Savchuk --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 502761866..9817f8527 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 git.frostfs.info/TrueCloudLab/tzhash v1.8.0 diff --git a/go.sum b/go.sum index 85d9df443..3c6dd9a99 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69 h1:DJExzndXf6hztcQ8zHlBOJV/+FA6k2FpRGUcTDWqq2M= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69/go.mod h1:GeNpo12HcEW4J412sH5yf8xFYapxlrt5fcYzRwg0Ino= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98 h1:ijUci3thz0EwWkuRJDocW5D1RkVAJlt9xNG4CYepC90= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98/go.mod h1:GeNpo12HcEW4J412sH5yf8xFYapxlrt5fcYzRwg0Ino= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM= From c34b8acedde282bbe81efccea772a923ee570a8f Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Fri, 20 Sep 2024 10:58:22 +0300 Subject: [PATCH 014/591] [#1312] Drop handling of system attributes with NeoFS prefix Signed-off-by: Aleksey Savchuk --- cmd/frostfs-cli/modules/container/list_objects.go | 13 ++----------- pkg/core/object/fmt.go | 2 +- pkg/local_object_storage/metabase/put.go | 4 ---- pkg/local_object_storage/metabase/upgrade.go | 2 +- pkg/services/object_manager/tombstone/checker.go | 2 +- 5 files changed, 5 insertions(+), 18 deletions(-) diff --git a/cmd/frostfs-cli/modules/container/list_objects.go b/cmd/frostfs-cli/modules/container/list_objects.go index ff2f8cf45..d5850359d 100644 --- a/cmd/frostfs-cli/modules/container/list_objects.go +++ b/cmd/frostfs-cli/modules/container/list_objects.go @@ -1,9 +1,6 @@ package container import ( - "strings" - - v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" @@ -67,14 +64,8 @@ var listContainerObjectsCmd = &cobra.Command{ resHead, err := internalclient.HeadObject(cmd.Context(), prmHead) if err == nil { - attrs := resHead.Header().Attributes() - for i := range attrs { - attrKey := attrs[i].Key() - if !strings.HasPrefix(attrKey, v2object.SysAttributePrefix) && !strings.HasPrefix(attrKey, v2object.SysAttributePrefixNeoFS) { - // FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97 - // Use dedicated method to skip system attributes. - cmd.Printf(" %s: %s\n", attrKey, attrs[i].Value()) - } + for _, attr := range resHead.Header().UserAttributes() { + cmd.Printf(" %s: %s\n", attr.Key(), attr.Value()) } } else { cmd.Printf(" failed to read attributes: %v\n", err) diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go index 96f721806..317d62cb0 100644 --- a/pkg/core/object/fmt.go +++ b/pkg/core/object/fmt.go @@ -361,7 +361,7 @@ func (v *FormatValidator) checkIfExpired(ctx context.Context, obj *objectSDK.Obj func expirationEpochAttribute(obj *objectSDK.Object) (uint64, error) { for _, a := range obj.Attributes() { - if a.Key() != objectV2.SysAttributeExpEpoch && a.Key() != objectV2.SysAttributeExpEpochNeoFS { + if a.Key() != objectV2.SysAttributeExpEpoch { continue } diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index ff79a0387..087529895 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -387,10 +387,6 @@ func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) { attributes = ech.ParentAttributes() } for _, attr := range attributes { - if attr.Key() == objectV2.SysAttributeExpEpochNeoFS { - expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64) - return expEpoch, err == nil - } if attr.Key() == objectV2.SysAttributeExpEpoch { expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64) return expEpoch, err == nil diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go index f677dcf8e..b5de430dc 100644 --- a/pkg/local_object_storage/metabase/upgrade.go +++ b/pkg/local_object_storage/metabase/upgrade.go @@ -252,7 +252,7 @@ func getNextObjectsWithExpirationEpochBatch(ctx context.Context, db *bbolt.DB, i continue } attributeKey := string(attrKey[1+cidSize:]) - if attributeKey != objectV2.SysAttributeExpEpochNeoFS && attributeKey != objectV2.SysAttributeExpEpoch { + if attributeKey != objectV2.SysAttributeExpEpoch { continue } var containerID cid.ID diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go index c3c810001..48a08b693 100644 --- a/pkg/services/object_manager/tombstone/checker.go +++ b/pkg/services/object_manager/tombstone/checker.go @@ -74,7 +74,7 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr func (g *ExpirationChecker) handleTS(addr string, ts *objectSDK.Object, reqEpoch uint64) bool { for _, atr := range ts.Attributes() { - if atr.Key() == objectV2.SysAttributeExpEpoch || atr.Key() == objectV2.SysAttributeExpEpochNeoFS { + if atr.Key() == objectV2.SysAttributeExpEpoch { epoch, err := strconv.ParseUint(atr.Value(), 10, 64) if err != nil { g.log.Warn( From f71418b73cfb49306ec1a191621b954a75105b18 Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Fri, 20 Sep 2024 10:24:40 +0000 Subject: [PATCH 015/591] [#1386] frostfs-adm: Add info to error messages These error messages bubble up to human users - adding more context helps to find the cause of the issue faster. Signed-off-by: Vitaliy Potyarkin --- .../modules/morph/initialize/initialize_roles.go | 10 ++++++++-- .../morph/initialize/initialize_transfer.go | 15 ++++++++++++++- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go index a6815ee13..05bc83a8b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go @@ -1,6 +1,8 @@ package initialize import ( + "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" "github.com/nspcc-dev/neo-go/pkg/io" @@ -29,10 +31,14 @@ func setNotaryAndAlphabetNodes(c *helper.InitializeContext) error { callflag.States|callflag.AllowNotify, int64(noderoles.NeoFSAlphabet), pubs) if err := c.SendCommitteeTx(w.Bytes(), false); err != nil { - return err + return fmt.Errorf("send committee transaction: %w", err) } - return c.AwaitTx() + err := c.AwaitTx() + if err != nil { + err = fmt.Errorf("await committee transaction: %w", err) + } + return err } func setRolesFinished(c *helper.InitializeContext) (bool, error) { diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go index b7102fa13..d7b0ec86c 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go @@ -3,6 +3,7 @@ package initialize import ( "fmt" "math/big" + "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" @@ -144,5 +145,17 @@ func createNEP17MultiTransferTx(c helper.Client, acc *wallet.Account, recipients if err != nil { return nil, fmt.Errorf("can't create actor: %w", err) } - return act.MakeRun(w.Bytes()) + tx, err := act.MakeRun(w.Bytes()) + if err != nil { + sum := make(map[util.Uint160]int64) + for _, recipient := range recipients { + sum[recipient.Token] += recipient.Amount + } + detail := make([]string, 0, len(sum)) + for _, value := range sum { + detail = append(detail, fmt.Sprintf("amount=%v", value)) + } + err = fmt.Errorf("transfer failed: from=%s(%s) %s: %w", acc.Label, acc.Address, strings.Join(detail, " "), err) + } + return tx, err } From 0b87be804a63760fc7e43a51cc1b00c5aeedbb34 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 17 Sep 2024 11:24:48 +0300 Subject: [PATCH 016/591] [#1381] engine: Fix tests Drop not required `Eventually` calls. Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/shard/delete_test.go | 14 ++++------- pkg/local_object_storage/shard/get_test.go | 23 ++++--------------- pkg/local_object_storage/shard/head_test.go | 19 ++------------- pkg/local_object_storage/shard/inhume_test.go | 2 +- 4 files changed, 11 insertions(+), 47 deletions(-) diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go index 9f205fa5d..574250a93 100644 --- a/pkg/local_object_storage/shard/delete_test.go +++ b/pkg/local_object_storage/shard/delete_test.go @@ -3,7 +3,6 @@ package shard import ( "context" "testing" - "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" @@ -58,19 +57,14 @@ func testShard(t *testing.T, hasWriteCache bool, payloadSize int) { _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) - _, err = testGet(t, sh, getPrm, hasWriteCache) + _, err = sh.Get(context.Background(), getPrm) require.NoError(t, err) if hasWriteCache { - sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false}) - require.Eventually(t, func() bool { - _, err = sh.Delete(context.Background(), delPrm) - return err == nil - }, 30*time.Second, 10*time.Millisecond) - } else { - _, err = sh.Delete(context.Background(), delPrm) - require.NoError(t, err) + require.NoError(t, sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false})) } + _, err = sh.Delete(context.Background(), delPrm) + require.NoError(t, err) _, err = sh.Get(context.Background(), getPrm) require.True(t, client.IsErrObjectNotFound(err)) diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go index 8a7c6972d..d0eecf74e 100644 --- a/pkg/local_object_storage/shard/get_test.go +++ b/pkg/local_object_storage/shard/get_test.go @@ -5,11 +5,9 @@ import ( "context" "errors" "testing" - "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" @@ -49,7 +47,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) { getPrm.SetAddress(object.AddressOf(obj)) - res, err := testGet(t, sh, getPrm, hasWriteCache) + res, err := sh.Get(context.Background(), getPrm) require.NoError(t, err) require.Equal(t, obj, res.Object()) }) @@ -67,7 +65,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) { getPrm.SetAddress(object.AddressOf(obj)) - res, err := testGet(t, sh, getPrm, hasWriteCache) + res, err := sh.Get(context.Background(), getPrm) require.NoError(t, err) require.Equal(t, obj, res.Object()) }) @@ -95,13 +93,13 @@ func testShardGet(t *testing.T, hasWriteCache bool) { getPrm.SetAddress(object.AddressOf(child)) - res, err := testGet(t, sh, getPrm, hasWriteCache) + res, err := sh.Get(context.Background(), getPrm) require.NoError(t, err) require.True(t, binaryEqual(child, res.Object())) getPrm.SetAddress(object.AddressOf(parent)) - _, err = testGet(t, sh, getPrm, hasWriteCache) + _, err = sh.Get(context.Background(), getPrm) var si *objectSDK.SplitInfoError require.True(t, errors.As(err, &si)) @@ -115,19 +113,6 @@ func testShardGet(t *testing.T, hasWriteCache bool) { }) } -func testGet(t *testing.T, sh *Shard, getPrm GetPrm, hasWriteCache bool) (GetRes, error) { - res, err := sh.Get(context.Background(), getPrm) - if hasWriteCache { - require.Eventually(t, func() bool { - if client.IsErrObjectNotFound(err) { - res, err = sh.Get(context.Background(), getPrm) - } - return !client.IsErrObjectNotFound(err) - }, time.Second, time.Millisecond*100) - } - return res, err -} - // binary equal is used when object contains empty lists in the structure and // requre.Equal fails on comparing and []{} lists. func binaryEqual(a, b *objectSDK.Object) bool { diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go index 1f4631993..c65bbb1e3 100644 --- a/pkg/local_object_storage/shard/head_test.go +++ b/pkg/local_object_storage/shard/head_test.go @@ -4,11 +4,9 @@ import ( "context" "errors" "testing" - "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/stretchr/testify/require" @@ -46,7 +44,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) { headPrm.SetAddress(object.AddressOf(obj)) - res, err := testHead(t, sh, headPrm, hasWriteCache) + res, err := sh.Head(context.Background(), headPrm) require.NoError(t, err) require.Equal(t, obj.CutPayload(), res.Object()) }) @@ -74,7 +72,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) { var siErr *objectSDK.SplitInfoError - _, err = testHead(t, sh, headPrm, hasWriteCache) + _, err = sh.Head(context.Background(), headPrm) require.True(t, errors.As(err, &siErr)) headPrm.SetAddress(object.AddressOf(parent)) @@ -85,16 +83,3 @@ func testShardHead(t *testing.T, hasWriteCache bool) { require.Equal(t, parent.CutPayload(), head.Object()) }) } - -func testHead(t *testing.T, sh *Shard, headPrm HeadPrm, hasWriteCache bool) (HeadRes, error) { - res, err := sh.Head(context.Background(), headPrm) - if hasWriteCache { - require.Eventually(t, func() bool { - if client.IsErrObjectNotFound(err) { - res, err = sh.Head(context.Background(), headPrm) - } - return !client.IsErrObjectNotFound(err) - }, time.Second, time.Millisecond*100) - } - return res, err -} diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go index 82754568f..1353d5d94 100644 --- a/pkg/local_object_storage/shard/inhume_test.go +++ b/pkg/local_object_storage/shard/inhume_test.go @@ -48,7 +48,7 @@ func testShardInhume(t *testing.T, hasWriteCache bool) { _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) - _, err = testGet(t, sh, getPrm, hasWriteCache) + _, err = sh.Get(context.Background(), getPrm) require.NoError(t, err) _, err = sh.Inhume(context.Background(), inhPrm) From d4493a6d082011cfa24df68b41d92c7b905fda27 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 15:07:10 +0300 Subject: [PATCH 017/591] [#1390] getSvc: Fix Head EC1.1 If local EC chunk found, but remote node is off, then `HEAD --raw` request returns object not found. Signed-off-by: Dmitrii Stepanov --- pkg/services/object/get/container.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go index d22b14192..034768c81 100644 --- a/pkg/services/object/get/container.go +++ b/pkg/services/object/get/container.go @@ -26,8 +26,10 @@ func (r *request) executeOnContainer(ctx context.Context) { return } + localStatus := r.status + for { - if r.processCurrentEpoch(ctx) { + if r.processCurrentEpoch(ctx, localStatus) { break } @@ -43,7 +45,7 @@ func (r *request) executeOnContainer(ctx context.Context) { } } -func (r *request) processCurrentEpoch(ctx context.Context) bool { +func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool { r.log.Debug(logs.ProcessEpoch, zap.Uint64("number", r.curProcEpoch), ) @@ -56,7 +58,11 @@ func (r *request) processCurrentEpoch(ctx context.Context) bool { ctx, cancel := context.WithCancel(ctx) defer cancel() - r.status = statusUndefined + if localStatus == statusEC { // possible only for raw == true and local == false + r.status = statusEC + } else { + r.status = statusUndefined + } for { addrs := traverser.Next() From 7c56564b81e4eb6f25418e7dbaf7ce534ac999ae Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Tue, 24 Sep 2024 11:46:15 +0300 Subject: [PATCH 018/591] [#1392] object: Fix target initialization within put streamer * Remove `relay` field from put streamer as it's no longer used; * Fix initialization of `Relay` object writer parameter. Signed-off-by: Airat Arifullin --- pkg/services/object/put/streamer.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index f3803d433..d08e7fafa 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" @@ -15,8 +14,6 @@ type Streamer struct { *objectwriter.Config target transformer.ChunkedObjectWriter - - relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error } var errNotInit = errors.New("stream not initialized") @@ -35,7 +32,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error { Header: prm.hdr, Container: prm.cnr, TraverseOpts: prm.traverseOpts, - Relay: p.relay, + Relay: prm.relay, } var err error From 3bb65ba820274a2014b3abfe6e11a98047b2059f Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Tue, 24 Sep 2024 11:46:15 +0300 Subject: [PATCH 019/591] [#1392] object: Fix target initialization within put streamer * Remove `relay` field from put streamer as it's no longer used; * Fix initialization of `Relay` object writer parameter. Signed-off-by: Airat Arifullin --- pkg/services/object/put/streamer.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index f3803d433..d08e7fafa 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer" @@ -15,8 +14,6 @@ type Streamer struct { *objectwriter.Config target transformer.ChunkedObjectWriter - - relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error } var errNotInit = errors.New("stream not initialized") @@ -35,7 +32,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error { Header: prm.hdr, Container: prm.cnr, TraverseOpts: prm.traverseOpts, - Relay: p.relay, + Relay: prm.relay, } var err error From 839dead226534887ffbb21b07327724e42cd2135 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 16 Sep 2024 12:38:45 +0300 Subject: [PATCH 020/591] [#1297] getSvc: Return AccessDenied instead of ObjectNotFound Do not replace the access denied error if it was received earlier. Signed-off-by: Dmitrii Stepanov --- pkg/services/object/get/remote.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go index 163767c43..f2639f8e6 100644 --- a/pkg/services/object/get/remote.go +++ b/pkg/services/object/get/remote.go @@ -41,7 +41,7 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool { r.status = statusUndefined if errors.As(err, &errAccessDenied) { r.err = err - } else { + } else if r.err == nil || !errors.As(r.err, &errAccessDenied) { r.err = new(apistatus.ObjectNotFound) } } From bdf386366c4e268d9f151c38c4eb5c837a49ab25 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 16 Sep 2024 12:40:12 +0300 Subject: [PATCH 021/591] [#1297] dev: Bump neo-go version Signed-off-by: Dmitrii Stepanov --- dev/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev/docker-compose.yml b/dev/docker-compose.yml index 9d026797c..be1956e65 100644 --- a/dev/docker-compose.yml +++ b/dev/docker-compose.yml @@ -3,7 +3,7 @@ version: "2.4" services: neo-go: - image: nspccdev/neo-go:0.105.0 + image: nspccdev/neo-go:0.106.0 container_name: neo-go command: ["node", "--config-path", "/config", "--privnet", "--debug"] stop_signal: SIGKILL From 34e6a309c6b1cdd4e277f76b63a6b5d01b094115 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Wed, 18 Sep 2024 12:15:32 +0300 Subject: [PATCH 022/591] [#1356] engine: Evacuate object from shards concurrently Signed-off-by: Anton Nikiforov --- cmd/frostfs-cli/modules/control/evacuation.go | 15 +- pkg/local_object_storage/engine/evacuate.go | 270 +++++++++++------- .../engine/evacuate_test.go | 41 ++- pkg/local_object_storage/metabase/list.go | 167 +++++++++++ pkg/local_object_storage/shard/list.go | 65 +++++ pkg/services/control/server/evacuate_async.go | 14 +- pkg/services/control/service.proto | 4 + pkg/services/control/service_frostfs.pb.go | 68 ++++- 8 files changed, 533 insertions(+), 111 deletions(-) diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go index 6fa5ed75c..04a67e5b5 100644 --- a/cmd/frostfs-cli/modules/control/evacuation.go +++ b/cmd/frostfs-cli/modules/control/evacuation.go @@ -21,6 +21,9 @@ const ( noProgressFlag = "no-progress" scopeFlag = "scope" + containerWorkerCountFlag = "container-worker-count" + objectWorkerCountFlag = "object-worker-count" + scopeAll = "all" scopeObjects = "objects" scopeTrees = "trees" @@ -64,12 +67,16 @@ func startEvacuateShard(cmd *cobra.Command, _ []string) { pk := key.Get(cmd) ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag) + containerWorkerCount, _ := cmd.Flags().GetUint32(containerWorkerCountFlag) + objectWorkerCount, _ := cmd.Flags().GetUint32(objectWorkerCountFlag) req := &control.StartShardEvacuationRequest{ Body: &control.StartShardEvacuationRequest_Body{ - Shard_ID: getShardIDList(cmd), - IgnoreErrors: ignoreErrors, - Scope: getEvacuationScope(cmd), + Shard_ID: getShardIDList(cmd), + IgnoreErrors: ignoreErrors, + Scope: getEvacuationScope(cmd), + ContainerWorkerCount: containerWorkerCount, + ObjectWorkerCount: objectWorkerCount, }, } @@ -371,6 +378,8 @@ func initControlStartEvacuationShardCmd() { flags.String(scopeFlag, scopeAll, fmt.Sprintf("Evacuation scope; possible values: %s, %s, %s", scopeTrees, scopeObjects, scopeAll)) flags.Bool(awaitFlag, false, "Block execution until evacuation is completed") flags.Bool(noProgressFlag, false, fmt.Sprintf("Print progress if %s provided", awaitFlag)) + flags.Uint32(containerWorkerCountFlag, 0, "Count of concurrent container evacuation workers") + flags.Uint32(objectWorkerCountFlag, 0, "Count of concurrent object evacuation workers") startEvacuationShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) } diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index 7bef6edfb..3db556a8f 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -24,6 +23,16 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +const ( + // containerWorkerCountDefault is a default value of the count of + // concurrent container evacuation workers. + containerWorkerCountDefault = 10 + // objectWorkerCountDefault is a default value of the count of + // concurrent object evacuation workers. + objectWorkerCountDefault = 10 ) var ( @@ -79,6 +88,9 @@ type EvacuateShardPrm struct { IgnoreErrors bool Async bool Scope EvacuateScope + + ContainerWorkerCount uint32 + ObjectWorkerCount uint32 } // EvacuateShardRes represents result of the EvacuateShard operation. @@ -189,8 +201,6 @@ func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes { return res } -const defaultEvacuateBatchSize = 100 - type pooledShard struct { hashedShard pool util.WorkerPool @@ -242,8 +252,16 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev return nil, err } + var mtx sync.RWMutex + copyShards := func() []pooledShard { + mtx.RLock() + defer mtx.RUnlock() + t := make([]pooledShard, len(shards)) + copy(t, shards) + return t + } eg.Go(func() error { - return e.evacuateShards(egCtx, shardIDs, prm, res, shards, shardsToEvacuate) + return e.evacuateShards(egCtx, shardIDs, prm, res, copyShards, shardsToEvacuate) }) if prm.Async { @@ -261,7 +279,7 @@ func ctxOrBackground(ctx context.Context, background bool) context.Context { } func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, ) error { var err error ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards", @@ -287,13 +305,39 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p return err } - for _, shardID := range shardIDs { - if err = e.evacuateShard(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil { - e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) - return err + ctx, cancel, egShard, egContainer, egObject := e.createErrorGroupsForEvacuation(ctx, prm) + continueLoop := true + for i := 0; continueLoop && i < len(shardIDs); i++ { + select { + case <-ctx.Done(): + continueLoop = false + default: + egShard.Go(func() error { + err := e.evacuateShard(ctx, cancel, shardIDs[i], prm, res, shards, shardsToEvacuate, egContainer, egObject) + if err != nil { + cancel(err) + } + return err + }) } } + err = egShard.Wait() + if err != nil { + err = fmt.Errorf("shard error: %w", err) + } + errContainer := egContainer.Wait() + errObject := egObject.Wait() + if errContainer != nil { + err = errors.Join(err, fmt.Errorf("container error: %w", errContainer)) + } + if errObject != nil { + err = errors.Join(err, fmt.Errorf("object error: %w", errObject)) + } + if err != nil { + e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) + return err + } e.log.Info(logs.EngineFinishedSuccessfullyShardsEvacuation, zap.Strings("shard_ids", shardIDs), @@ -309,6 +353,27 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p return nil } +func (e *StorageEngine) createErrorGroupsForEvacuation(ctx context.Context, prm EvacuateShardPrm) ( + context.Context, context.CancelCauseFunc, *errgroup.Group, *errgroup.Group, *errgroup.Group, +) { + operationCtx, cancel := context.WithCancelCause(ctx) + egObject, _ := errgroup.WithContext(operationCtx) + objectWorkerCount := prm.ObjectWorkerCount + if objectWorkerCount == 0 { + objectWorkerCount = objectWorkerCountDefault + } + egObject.SetLimit(int(objectWorkerCount)) + egContainer, _ := errgroup.WithContext(operationCtx) + containerWorkerCount := prm.ContainerWorkerCount + if containerWorkerCount == 0 { + containerWorkerCount = containerWorkerCountDefault + } + egContainer.SetLimit(int(containerWorkerCount)) + egShard, _ := errgroup.WithContext(operationCtx) + + return operationCtx, cancel, egShard, egContainer, egObject +} + func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getTotals") defer span.End() @@ -335,8 +400,9 @@ func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, sha return nil } -func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, +func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, + shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + egContainer *errgroup.Group, egObject *errgroup.Group, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard", trace.WithAttributes( @@ -345,11 +411,10 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm E defer span.End() if prm.Scope.WithObjects() { - if err := e.evacuateShardObjects(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil { + if err := e.evacuateShardObjects(ctx, cancel, shardID, prm, res, shards, shardsToEvacuate, egContainer, egObject); err != nil { return err } } - if prm.Scope.WithTrees() && shardsToEvacuate[shardID].PiloramaEnabled() { if err := e.evacuateShardTrees(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil { return err @@ -359,44 +424,60 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm E return nil } -func (e *StorageEngine) evacuateShardObjects(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, +func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, + shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + egContainer *errgroup.Group, egObject *errgroup.Group, ) error { - var listPrm shard.ListWithCursorPrm - listPrm.WithCount(defaultEvacuateBatchSize) - sh := shardsToEvacuate[shardID] - sh.SetEvacuationInProgress(true) - - var c *meta.Cursor - for { - listPrm.WithCursor(c) - - // TODO (@fyrchik): #1731 this approach doesn't work in degraded modes - // because ListWithCursor works only with the metabase. - listRes, err := sh.ListWithCursor(ctx, listPrm) - if err != nil { - if errors.Is(err, meta.ErrEndOfListing) || errors.Is(err, shard.ErrDegradedMode) { - break + var cntPrm shard.IterateOverContainersPrm + cntPrm.Handler = func(ctx context.Context, name []byte, _ cid.ID) error { + select { + case <-ctx.Done(): + return context.Cause(ctx) + default: + } + egContainer.Go(func() error { + var objPrm shard.IterateOverObjectsInContainerPrm + objPrm.BucketName = name + objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error { + select { + case <-ctx.Done(): + return context.Cause(ctx) + default: + } + egObject.Go(func() error { + err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate) + if err != nil { + cancel(err) + } + return err + }) + return nil + } + err := sh.IterateOverObjectsInContainer(ctx, objPrm) + if err != nil { + cancel(err) } - e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return err - } - - if err = e.evacuateObjects(ctx, sh, listRes.AddressList(), prm, res, shards, shardsToEvacuate); err != nil { - return err - } - - c = listRes.Cursor() + }) + return nil } - return nil + + sh.SetEvacuationInProgress(true) + err := sh.IterateOverContainers(ctx, cntPrm) + if err != nil { + cancel(err) + e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField, + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + } + return err } func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, + getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, ) error { sh := shardsToEvacuate[shardID] + shards := getShards() var listPrm pilorama.TreeListTreesPrm first := true @@ -637,68 +718,65 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) return shards, nil } -func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, toEvacuate []object.Info, prm EvacuateShardPrm, res *EvacuateShardRes, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, +func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes, + getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, ) error { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects", - trace.WithAttributes( - attribute.Int("objects_count", len(toEvacuate)), - )) + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects") defer span.End() - for i := range toEvacuate { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - addr := toEvacuate[i].Address + select { + case <-ctx.Done(): + return context.Cause(ctx) + default: + } - var getPrm shard.GetPrm - getPrm.SetAddress(addr) - getPrm.SkipEvacCheck(true) + shards := getShards() + addr := objInfo.Address - getRes, err := sh.Get(ctx, getPrm) - if err != nil { - if prm.IgnoreErrors { - res.objFailed.Add(1) - continue - } - e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - return err - } + var getPrm shard.GetPrm + getPrm.SetAddress(addr) + getPrm.SkipEvacCheck(true) - evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), sh, shards, shardsToEvacuate, res) - if err != nil { - return err - } - - if evacuatedLocal { - continue - } - - if prm.ObjectsHandler == nil { - // Do not check ignoreErrors flag here because - // ignoring errors on put make this command kinda useless. - return fmt.Errorf("%w: %s", errPutShard, toEvacuate[i]) - } - - moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object()) - if err != nil { - e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - return err - } - if moved { - res.objEvacuated.Add(1) - } else if prm.IgnoreErrors { + getRes, err := shardsToEvacuate[shardID].Get(ctx, getPrm) + if err != nil { + if prm.IgnoreErrors { res.objFailed.Add(1) - e.log.Warn(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - } else { - return fmt.Errorf("object %s was not replicated", addr) + return nil } + e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + return err + } + + evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res) + if err != nil { + return err + } + + if evacuatedLocal { + return nil + } + + if prm.ObjectsHandler == nil { + // Do not check ignoreErrors flag here because + // ignoring errors on put make this command kinda useless. + return fmt.Errorf("%w: %s", errPutShard, objInfo) + } + + moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object()) + if err != nil { + e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + return err + } + if moved { + res.objEvacuated.Add(1) + } else if prm.IgnoreErrors { + res.objFailed.Add(1) + e.log.Warn(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + } else { + return fmt.Errorf("object %s was not replicated", addr) } return nil } diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index 28529fab9..f72333399 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -6,6 +6,8 @@ import ( "fmt" "path/filepath" "strconv" + "sync" + "sync/atomic" "testing" "time" @@ -174,13 +176,13 @@ func TestEvacuateObjectsNetwork(t *testing.T) { errReplication := errors.New("handler error") acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) (bool, error) { - var n uint64 + var n atomic.Uint64 return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) { - if n == max { + if n.Load() == max { return false, errReplication } - n++ + n.Add(1) for i := range objects { if addr == objectCore.AddressOf(objects[i]) { require.Equal(t, objects[i], obj) @@ -314,6 +316,36 @@ func TestEvacuateCancellation(t *testing.T) { require.Equal(t, uint64(0), res.ObjectsEvacuated()) } +func TestEvacuateCancellationByError(t *testing.T) { + t.Parallel() + e, ids, _ := newEngineEvacuate(t, 2, 10) + defer func() { + require.NoError(t, e.Close(context.Background())) + }() + + require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + + var prm EvacuateShardPrm + prm.ShardID = ids[1:2] + var once atomic.Bool + prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) { + var err error + flag := true + if once.CompareAndSwap(false, true) { + err = errors.New("test error") + flag = false + } + return flag, err + } + prm.Scope = EvacuateScopeObjects + prm.ObjectWorkerCount = 2 + prm.ContainerWorkerCount = 2 + + _, err := e.Evacuate(context.Background(), prm) + require.ErrorContains(t, err, "test error") +} + func TestEvacuateSingleProcess(t *testing.T) { e, ids, _ := newEngineEvacuate(t, 2, 3) defer func() { @@ -531,6 +563,7 @@ func TestEvacuateTreesRemote(t *testing.T) { require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + mutex := sync.Mutex{} evacuatedTreeOps := make(map[string][]*pilorama.Move) var prm EvacuateShardPrm prm.ShardID = ids @@ -545,7 +578,9 @@ func TestEvacuateTreesRemote(t *testing.T) { if op.Time == 0 { return true, "", nil } + mutex.Lock() evacuatedTreeOps[key] = append(evacuatedTreeOps[key], &op) + mutex.Unlock() height = op.Time + 1 } } diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index b4326a92c..5943be7f4 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -1,6 +1,7 @@ package meta import ( + "bytes" "context" "time" @@ -61,6 +62,20 @@ func (l ListRes) Cursor() *Cursor { return l.cursor } +// IterateOverContainersPrm contains parameters for IterateOverContainers operation. +type IterateOverContainersPrm struct { + // Handler function executed upon containers in db. + Handler func(context.Context, []byte, cid.ID) error +} + +// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation. +type IterateOverObjectsInContainerPrm struct { + // BucketName container's bucket name. + BucketName []byte + // Handler function executed upon objects in db. + Handler func(context.Context, *objectcore.Info) error +} + // ListWithCursor lists physical objects available in metabase starting from // cursor. Includes objects of all types. Does not include inhumed objects. // Use cursor value from response for consecutive requests. @@ -259,3 +274,155 @@ func parseContainerIDWithPrefix(containerID *cid.ID, name []byte) ([]byte, byte) return rawID, name[0] } + +// IterateOverContainers lists physical containers available in metabase starting from first. +func (db *DB) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error { + var ( + startedAt = time.Now() + success = false + ) + defer func() { + db.metrics.AddMethodDuration("IterateOverContainers", time.Since(startedAt), success) + }() + _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverContainers", + trace.WithAttributes( + attribute.Bool("has_handler", prm.Handler != nil), + )) + defer span.End() + + db.modeMtx.RLock() + defer db.modeMtx.RUnlock() + + if db.mode.NoMetabase() { + return ErrDegradedMode + } + + err := db.boltDB.View(func(tx *bbolt.Tx) error { + return db.iterateOverContainers(ctx, tx, prm) + }) + success = err == nil + return metaerr.Wrap(err) +} + +func (db *DB) iterateOverContainers(ctx context.Context, tx *bbolt.Tx, prm IterateOverContainersPrm) error { + var containerID cid.ID + for _, prefix := range [][]byte{{byte(primaryPrefix)}, {byte(lockersPrefix)}, {byte(tombstonePrefix)}} { + c := tx.Cursor() + for name, _ := c.Seek(prefix); name != nil && bytes.HasPrefix(name, prefix); name, _ = c.Next() { + cidRaw, _ := parseContainerIDWithPrefix(&containerID, name) + if cidRaw == nil { + continue + } + + bktName := make([]byte, len(name)) + copy(bktName, name) + var cnt cid.ID + copy(cnt[:], containerID[:]) + err := prm.Handler(ctx, bktName, cnt) + if err != nil { + return err + } + } + } + + return nil +} + +// IterateOverObjectsInContainer iterate over physical objects available in metabase starting from first. +func (db *DB) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error { + var ( + startedAt = time.Now() + success = false + ) + defer func() { + db.metrics.AddMethodDuration("IterateOverObjectsInContainer", time.Since(startedAt), success) + }() + _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverObjectsInContainer", + trace.WithAttributes( + attribute.Bool("has_handler", prm.Handler != nil), + )) + defer span.End() + + db.modeMtx.RLock() + defer db.modeMtx.RUnlock() + + if db.mode.NoMetabase() { + return ErrDegradedMode + } + + var containerID cid.ID + cidRaw, prefix := parseContainerIDWithPrefix(&containerID, prm.BucketName) + if cidRaw == nil { + return nil + } + err := db.boltDB.View(func(tx *bbolt.Tx) error { + return db.iterateOverObjectsInContainer(ctx, tx, cidRaw, prefix, containerID, prm) + }) + success = err == nil + return metaerr.Wrap(err) +} + +func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, cidRaw []byte, prefix byte, + containerID cid.ID, prm IterateOverObjectsInContainerPrm, +) error { + bkt := tx.Bucket(prm.BucketName) + if bkt == nil { + return nil + } + graveyardBkt := tx.Bucket(graveyardBucketName) + garbageBkt := tx.Bucket(garbageBucketName) + c := bkt.Cursor() + k, v := c.First() + + var objType objectSDK.Type + + switch prefix { + case primaryPrefix: + objType = objectSDK.TypeRegular + case lockersPrefix: + objType = objectSDK.TypeLock + case tombstonePrefix: + objType = objectSDK.TypeTombstone + default: + return nil + } + + for ; k != nil; k, v = c.Next() { + var obj oid.ID + if err := obj.Decode(k); err != nil { + break + } + + if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 { + continue + } + + var isLinkingObj bool + var ecInfo *objectcore.ECInfo + if objType == objectSDK.TypeRegular { + var o objectSDK.Object + if err := o.Unmarshal(v); err != nil { + return err + } + isLinkingObj = isLinkObject(&o) + ecHeader := o.ECHeader() + if ecHeader != nil { + ecInfo = &objectcore.ECInfo{ + ParentID: ecHeader.Parent(), + Index: ecHeader.Index(), + Total: ecHeader.Total(), + } + } + } + + var a oid.Address + a.SetContainer(containerID) + a.SetObject(obj) + objInfo := objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo} + err := prm.Handler(ctx, &objInfo) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index 08ea81a0c..9f56ec750 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -34,6 +34,20 @@ func (r ListContainersRes) Containers() []cid.ID { return r.containers } +// IterateOverContainersPrm contains parameters for IterateOverContainers operation. +type IterateOverContainersPrm struct { + // Handler function executed upon containers in db. + Handler func(context.Context, []byte, cid.ID) error +} + +// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation. +type IterateOverObjectsInContainerPrm struct { + // BucketName container's bucket name. + BucketName []byte + // Handler function executed upon containers in db. + Handler func(context.Context, *objectcore.Info) error +} + // ListWithCursorPrm contains parameters for ListWithCursor operation. type ListWithCursorPrm struct { count uint32 @@ -164,3 +178,54 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List cursor: res.Cursor(), }, nil } + +// IterateOverContainers lists physical containers presented in shard. +func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error { + _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverContainers", + trace.WithAttributes( + attribute.Bool("has_handler", prm.Handler != nil), + )) + defer span.End() + + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { + return ErrDegradedMode + } + + var metaPrm meta.IterateOverContainersPrm + metaPrm.Handler = prm.Handler + err := s.metaBase.IterateOverContainers(ctx, metaPrm) + if err != nil { + return fmt.Errorf("could not iterate over containers: %w", err) + } + + return nil +} + +// IterateOverObjectsInContainer lists physical objects presented in shard for provided container's bucket name. +func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error { + _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverObjectsInContainer", + trace.WithAttributes( + attribute.Bool("has_handler", prm.Handler != nil), + )) + defer span.End() + + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { + return ErrDegradedMode + } + + var metaPrm meta.IterateOverObjectsInContainerPrm + metaPrm.BucketName = prm.BucketName + metaPrm.Handler = prm.Handler + err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) + if err != nil { + return fmt.Errorf("could not iterate over objects: %w", err) + } + + return nil +} diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go index aacebe9e3..bdc6f7c38 100644 --- a/pkg/services/control/server/evacuate_async.go +++ b/pkg/services/control/server/evacuate_async.go @@ -23,12 +23,14 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha } prm := engine.EvacuateShardPrm{ - ShardID: s.getShardIDList(req.GetBody().GetShard_ID()), - IgnoreErrors: req.GetBody().GetIgnoreErrors(), - ObjectsHandler: s.replicateObject, - TreeHandler: s.replicateTree, - Async: true, - Scope: engine.EvacuateScope(req.GetBody().GetScope()), + ShardID: s.getShardIDList(req.GetBody().GetShard_ID()), + IgnoreErrors: req.GetBody().GetIgnoreErrors(), + ObjectsHandler: s.replicateObject, + TreeHandler: s.replicateTree, + Async: true, + Scope: engine.EvacuateScope(req.GetBody().GetScope()), + ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(), + ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(), } _, err = s.s.Evacuate(ctx, prm) diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto index 04994328a..88a06de22 100644 --- a/pkg/services/control/service.proto +++ b/pkg/services/control/service.proto @@ -394,6 +394,10 @@ message StartShardEvacuationRequest { bool ignore_errors = 2; // Evacuation scope. uint32 scope = 3; + // Count of concurrent container evacuation workers. + uint32 container_worker_count = 4; + // Count of concurrent object evacuation workers. + uint32 object_worker_count = 5; } Body body = 1; diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go index 019cac290..e92a8acd1 100644 --- a/pkg/services/control/service_frostfs.pb.go +++ b/pkg/services/control/service_frostfs.pb.go @@ -6511,9 +6511,11 @@ func (x *StartShardEvacuationRequest_Body_Scope) FromString(s string) bool { } type StartShardEvacuationRequest_Body struct { - Shard_ID [][]byte `json:"shardID"` - IgnoreErrors bool `json:"ignoreErrors"` - Scope uint32 `json:"scope"` + Shard_ID [][]byte `json:"shardID"` + IgnoreErrors bool `json:"ignoreErrors"` + Scope uint32 `json:"scope"` + ContainerWorkerCount uint32 `json:"containerWorkerCount"` + ObjectWorkerCount uint32 `json:"objectWorkerCount"` } var ( @@ -6533,6 +6535,8 @@ func (x *StartShardEvacuationRequest_Body) StableSize() (size int) { size += proto.RepeatedBytesSize(1, x.Shard_ID) size += proto.BoolSize(2, x.IgnoreErrors) size += proto.UInt32Size(3, x.Scope) + size += proto.UInt32Size(4, x.ContainerWorkerCount) + size += proto.UInt32Size(5, x.ObjectWorkerCount) return size } @@ -6558,6 +6562,12 @@ func (x *StartShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMar if x.Scope != 0 { mm.AppendUint32(3, x.Scope) } + if x.ContainerWorkerCount != 0 { + mm.AppendUint32(4, x.ContainerWorkerCount) + } + if x.ObjectWorkerCount != 0 { + mm.AppendUint32(5, x.ObjectWorkerCount) + } } // UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. @@ -6587,6 +6597,18 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err er return fmt.Errorf("cannot unmarshal field %s", "Scope") } x.Scope = data + case 4: // ContainerWorkerCount + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerWorkerCount") + } + x.ContainerWorkerCount = data + case 5: // ObjectWorkerCount + data, ok := fc.Uint32() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ObjectWorkerCount") + } + x.ObjectWorkerCount = data } } return nil @@ -6618,6 +6640,24 @@ func (x *StartShardEvacuationRequest_Body) GetScope() uint32 { func (x *StartShardEvacuationRequest_Body) SetScope(v uint32) { x.Scope = v } +func (x *StartShardEvacuationRequest_Body) GetContainerWorkerCount() uint32 { + if x != nil { + return x.ContainerWorkerCount + } + return 0 +} +func (x *StartShardEvacuationRequest_Body) SetContainerWorkerCount(v uint32) { + x.ContainerWorkerCount = v +} +func (x *StartShardEvacuationRequest_Body) GetObjectWorkerCount() uint32 { + if x != nil { + return x.ObjectWorkerCount + } + return 0 +} +func (x *StartShardEvacuationRequest_Body) SetObjectWorkerCount(v uint32) { + x.ObjectWorkerCount = v +} // MarshalJSON implements the json.Marshaler interface. func (x *StartShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) { @@ -6653,6 +6693,16 @@ func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer) out.RawString(prefix) out.Uint32(x.Scope) } + { + const prefix string = ",\"containerWorkerCount\":" + out.RawString(prefix) + out.Uint32(x.ContainerWorkerCount) + } + { + const prefix string = ",\"objectWorkerCount\":" + out.RawString(prefix) + out.Uint32(x.ObjectWorkerCount) + } out.RawByte('}') } @@ -6706,6 +6756,18 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { f = in.Uint32() x.Scope = f } + case "containerWorkerCount": + { + var f uint32 + f = in.Uint32() + x.ContainerWorkerCount = f + } + case "objectWorkerCount": + { + var f uint32 + f = in.Uint32() + x.ObjectWorkerCount = f + } } in.WantComma() } From 8434f3dbfc850839c759430cea9640f3c87e5f95 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 19 Sep 2024 17:00:58 +0300 Subject: [PATCH 023/591] [#1385] metabase: Use `Batch` for delete-related operations Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/metabase/counter.go | 4 ++-- pkg/local_object_storage/metabase/delete.go | 2 +- pkg/local_object_storage/metabase/graveyard.go | 2 +- pkg/local_object_storage/metabase/inhume.go | 2 +- pkg/local_object_storage/metabase/lock.go | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go index 275099ff2..3ead0d9a0 100644 --- a/pkg/local_object_storage/metabase/counter.go +++ b/pkg/local_object_storage/metabase/counter.go @@ -654,7 +654,7 @@ func (db *DB) DeleteContainerSize(ctx context.Context, id cid.ID) error { return ErrReadOnlyMode } - err := db.boltDB.Update(func(tx *bbolt.Tx) error { + err := db.boltDB.Batch(func(tx *bbolt.Tx) error { b := tx.Bucket(containerVolumeBucketName) key := make([]byte, cidSize) @@ -737,7 +737,7 @@ func (db *DB) DeleteContainerCount(ctx context.Context, id cid.ID) error { return ErrReadOnlyMode } - err := db.boltDB.Update(func(tx *bbolt.Tx) error { + err := db.boltDB.Batch(func(tx *bbolt.Tx) error { b := tx.Bucket(containerCounterBucketName) key := make([]byte, cidSize) diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index 00c8d06e0..e5e9840a0 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -112,7 +112,7 @@ func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { var err error var res DeleteRes - err = db.boltDB.Update(func(tx *bbolt.Tx) error { + err = db.boltDB.Batch(func(tx *bbolt.Tx) error { res, err = db.deleteGroup(tx, prm.addrs) return err }) diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go index 80d40fb78..31f95d6ed 100644 --- a/pkg/local_object_storage/metabase/graveyard.go +++ b/pkg/local_object_storage/metabase/graveyard.go @@ -282,7 +282,7 @@ func (db *DB) DropGraves(ctx context.Context, tss []TombstonedObject) error { buf := make([]byte, addressKeySize) - return db.boltDB.Update(func(tx *bbolt.Tx) error { + return db.boltDB.Batch(func(tx *bbolt.Tx) error { bkt := tx.Bucket(graveyardBucketName) if bkt == nil { return nil diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index b62accc43..3aae15061 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -181,7 +181,7 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { inhumedByCnrID: make(map[cid.ID]ObjectCounters), } currEpoch := db.epochState.CurrentEpoch() - err := db.boltDB.Update(func(tx *bbolt.Tx) error { + err := db.boltDB.Batch(func(tx *bbolt.Tx) error { return db.inhumeTx(tx, currEpoch, prm, &res) }) success = err == nil diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go index 732ba426d..6b78ef392 100644 --- a/pkg/local_object_storage/metabase/lock.go +++ b/pkg/local_object_storage/metabase/lock.go @@ -78,7 +78,7 @@ func (db *DB) lockInternal(locked []oid.ID, cnr cid.ID, locker oid.ID) error { } key := make([]byte, cidSize) - return metaerr.Wrap(db.boltDB.Update(func(tx *bbolt.Tx) error { + return metaerr.Wrap(db.boltDB.Batch(func(tx *bbolt.Tx) error { if firstIrregularObjectType(tx, cnr, bucketKeysLocked...) != objectSDK.TypeRegular { return logicerr.Wrap(new(apistatus.LockNonRegularObject)) } @@ -143,7 +143,7 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) { var unlockedObjects []oid.Address - if err := db.boltDB.Update(func(tx *bbolt.Tx) error { + if err := db.boltDB.Batch(func(tx *bbolt.Tx) error { for i := range lockers { unlocked, err := freePotentialLocks(tx, lockers[i].Container(), lockers[i].Object()) if err != nil { From 76268e3ea2a73072119ea1963f914646c029e08a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 20 Sep 2024 13:28:21 +0300 Subject: [PATCH 024/591] [#1385] metabase: Validate that tombstone and target have the same container ID Target container ID is taken from tombstone: cmd/frostfs-node/object.go:507 Also object of type `TOMBSTONE` contains objectID, so tombstone and tombstoned object must have the same containerID. Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/engine/lock_test.go | 8 +++-- .../metabase/containers_test.go | 4 +-- .../metabase/control_test.go | 2 +- .../metabase/counter_test.go | 30 ++++++++++++------- .../metabase/delete_test.go | 6 ++-- .../metabase/exists_test.go | 2 +- pkg/local_object_storage/metabase/get_test.go | 3 +- .../metabase/graveyard_test.go | 27 ++++++++++------- pkg/local_object_storage/metabase/inhume.go | 18 +++++++++++ .../metabase/inhume_test.go | 21 +++++++++---- .../metabase/iterators_test.go | 6 ++++ .../metabase/list_test.go | 2 +- .../metabase/lock_test.go | 10 +++++-- .../metabase/select_test.go | 6 +--- .../metabase/storage_id_test.go | 2 +- .../shard/metrics_test.go | 13 ++++---- 16 files changed, 108 insertions(+), 52 deletions(-) diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go index 7fa7c27ef..9e6758fb4 100644 --- a/pkg/local_object_storage/engine/lock_test.go +++ b/pkg/local_object_storage/engine/lock_test.go @@ -199,7 +199,9 @@ func TestLockExpiration(t *testing.T) { require.NoError(t, err) var inhumePrm InhumePrm - inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj)) + tombAddr := oidtest.Address() + tombAddr.SetContainer(cnr) + inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj)) var objLockedErr *apistatus.ObjectLocked _, err = e.Inhume(context.Background(), inhumePrm) @@ -209,7 +211,9 @@ func TestLockExpiration(t *testing.T) { e.HandleNewEpoch(context.Background(), lockerExpiresAfter+1) // 4. - inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj)) + tombAddr = oidtest.Address() + tombAddr.SetContainer(cnr) + inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj)) require.Eventually(t, func() bool { _, err = e.Inhume(context.Background(), inhumePrm) diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go index 8b1874458..110be68ad 100644 --- a/pkg/local_object_storage/metabase/containers_test.go +++ b/pkg/local_object_storage/metabase/containers_test.go @@ -67,7 +67,7 @@ func TestDB_Containers(t *testing.T) { assertContains(cnrs, cnr) - require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.Address())) + require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.ID())) cnrs, err = db.Containers(context.Background()) require.NoError(t, err) @@ -164,7 +164,7 @@ func TestDB_ContainerSize(t *testing.T) { require.NoError(t, metaInhume( db, object.AddressOf(obj), - oidtest.Address(), + oidtest.ID(), )) volume -= int(obj.PayloadSize()) diff --git a/pkg/local_object_storage/metabase/control_test.go b/pkg/local_object_storage/metabase/control_test.go index 0354a5eb6..2a64881cb 100644 --- a/pkg/local_object_storage/metabase/control_test.go +++ b/pkg/local_object_storage/metabase/control_test.go @@ -41,7 +41,7 @@ func TestReset(t *testing.T) { err = putBig(db, obj) require.NoError(t, err) - err = metaInhume(db, addrToInhume, oidtest.Address()) + err = metaInhume(db, addrToInhume, oidtest.ID()) require.NoError(t, err) assertExists(addr, true, nil) diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go index d1f808a63..dccccd456 100644 --- a/pkg/local_object_storage/metabase/counter_test.go +++ b/pkg/local_object_storage/metabase/counter_test.go @@ -156,13 +156,18 @@ func TestCounters(t *testing.T) { } var prm meta.InhumePrm - prm.SetTombstoneAddress(oidtest.Address()) - prm.SetAddresses(inhumedObjs...) + for _, o := range inhumedObjs { + tombAddr := oidtest.Address() + tombAddr.SetContainer(o.Container()) - res, err := db.Inhume(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, uint64(len(inhumedObjs)), res.LogicInhumed()) - require.Equal(t, uint64(len(inhumedObjs)), res.UserInhumed()) + prm.SetTombstoneAddress(tombAddr) + prm.SetAddresses(o) + + res, err := db.Inhume(context.Background(), prm) + require.NoError(t, err) + require.Equal(t, uint64(1), res.LogicInhumed()) + require.Equal(t, uint64(1), res.UserInhumed()) + } c, err := db.ObjectCounters() require.NoError(t, err) @@ -296,11 +301,16 @@ func TestCounters(t *testing.T) { } var prm meta.InhumePrm - prm.SetTombstoneAddress(oidtest.Address()) - prm.SetAddresses(inhumedObjs...) + for _, o := range inhumedObjs { + tombAddr := oidtest.Address() + tombAddr.SetContainer(o.Container()) - _, err := db.Inhume(context.Background(), prm) - require.NoError(t, err) + prm.SetTombstoneAddress(tombAddr) + prm.SetAddresses(o) + + _, err := db.Inhume(context.Background(), prm) + require.NoError(t, err) + } c, err := db.ObjectCounters() require.NoError(t, err) diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go index cb85157e7..fe5f7833b 100644 --- a/pkg/local_object_storage/metabase/delete_test.go +++ b/pkg/local_object_storage/metabase/delete_test.go @@ -40,12 +40,12 @@ func TestDB_Delete(t *testing.T) { // inhume parent and child so they will be on graveyard ts := testutil.GenerateObjectWithCID(cnr) - err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts)) + err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts).Object()) require.NoError(t, err) ts = testutil.GenerateObjectWithCID(cnr) - err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts)) + err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts).Object()) require.NoError(t, err) // delete object @@ -108,7 +108,7 @@ func TestGraveOnlyDelete(t *testing.T) { addr := oidtest.Address() // inhume non-existent object by address - require.NoError(t, metaInhume(db, addr, oidtest.Address())) + require.NoError(t, metaInhume(db, addr, oidtest.ID())) // delete the object data require.NoError(t, metaDelete(db, addr)) diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go index 0087c1e31..1e4148eba 100644 --- a/pkg/local_object_storage/metabase/exists_test.go +++ b/pkg/local_object_storage/metabase/exists_test.go @@ -37,7 +37,7 @@ func TestDB_Exists(t *testing.T) { require.True(t, exists) t.Run("removed object", func(t *testing.T) { - err := metaInhume(db, object.AddressOf(regular), oidtest.Address()) + err := metaInhume(db, object.AddressOf(regular), oidtest.ID()) require.NoError(t, err) exists, err := metaExists(db, object.AddressOf(regular)) diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go index 7654d2cd8..f0caaea70 100644 --- a/pkg/local_object_storage/metabase/get_test.go +++ b/pkg/local_object_storage/metabase/get_test.go @@ -150,9 +150,8 @@ func TestDB_Get(t *testing.T) { t.Run("get removed object", func(t *testing.T) { obj := oidtest.Address() - ts := oidtest.Address() - require.NoError(t, metaInhume(db, obj, ts)) + require.NoError(t, metaInhume(db, obj, oidtest.ID())) _, err := metaGet(db, obj, false) require.True(t, client.IsErrObjectAlreadyRemoved(err)) diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go index 75c7e2852..b9c6ce28c 100644 --- a/pkg/local_object_storage/metabase/graveyard_test.go +++ b/pkg/local_object_storage/metabase/graveyard_test.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "github.com/stretchr/testify/require" @@ -114,11 +115,12 @@ func TestDB_IterateDeletedObjects(t *testing.T) { db := newDB(t) defer func() { require.NoError(t, db.Close()) }() + cnr := cidtest.ID() // generate and put 4 objects - obj1 := testutil.GenerateObject() - obj2 := testutil.GenerateObject() - obj3 := testutil.GenerateObject() - obj4 := testutil.GenerateObject() + obj1 := testutil.GenerateObjectWithCID(cnr) + obj2 := testutil.GenerateObjectWithCID(cnr) + obj3 := testutil.GenerateObjectWithCID(cnr) + obj4 := testutil.GenerateObjectWithCID(cnr) var err error @@ -138,6 +140,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) { // inhume with tombstone addrTombstone := oidtest.Address() + addrTombstone.SetContainer(cnr) inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2)) inhumePrm.SetTombstoneAddress(addrTombstone) @@ -201,11 +204,12 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) { db := newDB(t) defer func() { require.NoError(t, db.Close()) }() + cnr := cidtest.ID() // generate and put 4 objects - obj1 := testutil.GenerateObject() - obj2 := testutil.GenerateObject() - obj3 := testutil.GenerateObject() - obj4 := testutil.GenerateObject() + obj1 := testutil.GenerateObjectWithCID(cnr) + obj2 := testutil.GenerateObjectWithCID(cnr) + obj3 := testutil.GenerateObjectWithCID(cnr) + obj4 := testutil.GenerateObjectWithCID(cnr) var err error @@ -223,6 +227,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) { // inhume with tombstone addrTombstone := oidtest.Address() + addrTombstone.SetContainer(cnr) var inhumePrm meta.InhumePrm inhumePrm.SetAddresses( @@ -392,9 +397,10 @@ func TestDB_DropGraves(t *testing.T) { db := newDB(t) defer func() { require.NoError(t, db.Close()) }() + cnr := cidtest.ID() // generate and put 2 objects - obj1 := testutil.GenerateObject() - obj2 := testutil.GenerateObject() + obj1 := testutil.GenerateObjectWithCID(cnr) + obj2 := testutil.GenerateObjectWithCID(cnr) var err error @@ -406,6 +412,7 @@ func TestDB_DropGraves(t *testing.T) { // inhume with tombstone addrTombstone := oidtest.Address() + addrTombstone.SetContainer(cnr) var inhumePrm meta.InhumePrm inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2)) diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index 3aae15061..77bb84af1 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -143,6 +143,20 @@ func (p *InhumePrm) SetForceGCMark() { p.forceRemoval = true } +func (p *InhumePrm) validate() error { + if p == nil { + return nil + } + if p.tomb != nil { + for _, addr := range p.target { + if addr.Container() != p.tomb.Container() { + return fmt.Errorf("object %s and tombstone %s have different container ID", addr, p.tomb) + } + } + } + return nil +} + var errBreakBucketForEach = errors.New("bucket ForEach break") // ErrLockObjectRemoval is returned when inhume operation is being @@ -171,6 +185,10 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { db.modeMtx.RLock() defer db.modeMtx.RUnlock() + if err := prm.validate(); err != nil { + return InhumeRes{}, err + } + if db.mode.NoMetabase() { return InhumeRes{}, ErrDegradedMode } else if db.mode.ReadOnly() { diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go index 163fbec2a..277316f7b 100644 --- a/pkg/local_object_storage/metabase/inhume_test.go +++ b/pkg/local_object_storage/metabase/inhume_test.go @@ -9,6 +9,7 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "github.com/stretchr/testify/require" @@ -21,12 +22,10 @@ func TestDB_Inhume(t *testing.T) { raw := testutil.GenerateObject() testutil.AddAttribute(raw, "foo", "bar") - tombstoneID := oidtest.Address() - err := putBig(db, raw) require.NoError(t, err) - err = metaInhume(db, object.AddressOf(raw), tombstoneID) + err = metaInhume(db, object.AddressOf(raw), oidtest.ID()) require.NoError(t, err) _, err = metaExists(db, object.AddressOf(raw)) @@ -43,13 +42,20 @@ func TestInhumeTombOnTomb(t *testing.T) { var ( err error + cnr = cidtest.ID() addr1 = oidtest.Address() addr2 = oidtest.Address() addr3 = oidtest.Address() + addr4 = oidtest.Address() inhumePrm meta.InhumePrm existsPrm meta.ExistsPrm ) + addr1.SetContainer(cnr) + addr2.SetContainer(cnr) + addr3.SetContainer(cnr) + addr4.SetContainer(cnr) + inhumePrm.SetAddresses(addr1) inhumePrm.SetTombstoneAddress(addr2) @@ -84,7 +90,7 @@ func TestInhumeTombOnTomb(t *testing.T) { require.True(t, client.IsErrObjectAlreadyRemoved(err)) inhumePrm.SetAddresses(addr1) - inhumePrm.SetTombstoneAddress(oidtest.Address()) + inhumePrm.SetTombstoneAddress(addr4) // try to inhume addr1 (which is already a tombstone in graveyard) _, err = db.Inhume(context.Background(), inhumePrm) @@ -117,10 +123,13 @@ func TestInhumeLocked(t *testing.T) { require.ErrorAs(t, err, &e) } -func metaInhume(db *meta.DB, target, tomb oid.Address) error { +func metaInhume(db *meta.DB, target oid.Address, tomb oid.ID) error { var inhumePrm meta.InhumePrm inhumePrm.SetAddresses(target) - inhumePrm.SetTombstoneAddress(tomb) + var tombAddr oid.Address + tombAddr.SetContainer(target.Container()) + tombAddr.SetObject(tomb) + inhumePrm.SetTombstoneAddress(tombAddr) _, err := db.Inhume(context.Background(), inhumePrm) return err diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go index 54d56d923..777a94a6f 100644 --- a/pkg/local_object_storage/metabase/iterators_test.go +++ b/pkg/local_object_storage/metabase/iterators_test.go @@ -9,6 +9,7 @@ import ( object2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" @@ -71,11 +72,16 @@ func TestDB_IterateCoveredByTombstones(t *testing.T) { db := newDB(t) defer func() { require.NoError(t, db.Close()) }() + cnr := cidtest.ID() ts := oidtest.Address() protected1 := oidtest.Address() protected2 := oidtest.Address() protectedLocked := oidtest.Address() garbage := oidtest.Address() + ts.SetContainer(cnr) + protected1.SetContainer(cnr) + protected2.SetContainer(cnr) + protectedLocked.SetContainer(cnr) var prm meta.InhumePrm var err error diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index 6207497b1..bc1726bd6 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -110,7 +110,7 @@ func TestLisObjectsWithCursor(t *testing.T) { err = putBig(db, obj) require.NoError(t, err) ts := testutil.GenerateObjectWithCID(containerID) - err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts)) + err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts).Object()) require.NoError(t, err) // add one child object (do not include parent into expected) diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go index 62a109b02..9601cb2be 100644 --- a/pkg/local_object_storage/metabase/lock_test.go +++ b/pkg/local_object_storage/metabase/lock_test.go @@ -73,7 +73,9 @@ func TestDB_Lock(t *testing.T) { _, err := db.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) - inhumePrm.SetTombstoneAddress(oidtest.Address()) + tombAddr := oidtest.Address() + tombAddr.SetContainer(objAddr.Container()) + inhumePrm.SetTombstoneAddress(tombAddr) _, err = db.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) @@ -89,7 +91,9 @@ func TestDB_Lock(t *testing.T) { _, err = db.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) - inhumePrm.SetTombstoneAddress(oidtest.Address()) + tombAddr = oidtest.Address() + tombAddr.SetContainer(objAddr.Container()) + inhumePrm.SetTombstoneAddress(tombAddr) _, err = db.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) }) @@ -103,7 +107,7 @@ func TestDB_Lock(t *testing.T) { var objLockedErr *apistatus.ObjectLocked // try to inhume locked object using tombstone - err := metaInhume(db, objAddr, lockAddr) + err := metaInhume(db, objAddr, lockAddr.Object()) require.ErrorAs(t, err, &objLockedErr) // free locked object diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index 6469bbdbc..fcd5d3a90 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -352,11 +352,7 @@ func TestDB_SelectInhume(t *testing.T) { object.AddressOf(raw2), ) - var tombstone oid.Address - tombstone.SetContainer(cnr) - tombstone.SetObject(oidtest.ID()) - - err = metaInhume(db, object.AddressOf(raw2), tombstone) + err = metaInhume(db, object.AddressOf(raw2), oidtest.ID()) require.NoError(t, err) fs = objectSDK.SearchFilters{} diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go index aaf6480ab..a86e42bd2 100644 --- a/pkg/local_object_storage/metabase/storage_id_test.go +++ b/pkg/local_object_storage/metabase/storage_id_test.go @@ -43,7 +43,7 @@ func TestDB_StorageID(t *testing.T) { cnrID, ok := deleted.ContainerID() require.True(t, ok) ts := testutil.GenerateObjectWithCID(cnrID) - require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts))) + require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts).Object())) // check StorageID for object without storageID fetchedStorageID, err = metaStorageID(db, object.AddressOf(raw2)) diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go index 01a85da97..56622326a 100644 --- a/pkg/local_object_storage/shard/metrics_test.go +++ b/pkg/local_object_storage/shard/metrics_test.go @@ -17,6 +17,7 @@ import ( cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "github.com/stretchr/testify/require" ) @@ -308,17 +309,19 @@ func TestCounters(t *testing.T) { t.Run("inhume_TS", func(t *testing.T) { var prm InhumePrm - ts := objectcore.AddressOf(testutil.GenerateObject()) phy := mm.getObjectCounter(physical) logic := mm.getObjectCounter(logical) custom := mm.getObjectCounter(user) inhumedNumber := int(phy / 4) - prm.SetTarget(ts, addrFromObjs(oo[:inhumedNumber])...) - - _, err := sh.Inhume(context.Background(), prm) - require.NoError(t, err) + for _, o := range addrFromObjs(oo[:inhumedNumber]) { + ts := oidtest.Address() + ts.SetContainer(o.Container()) + prm.SetTarget(ts, o) + _, err := sh.Inhume(context.Background(), prm) + require.NoError(t, err) + } for i := range inhumedNumber { cid, ok := oo[i].ContainerID() From fd18aa363b7b33f8b662f9b4bffaf9f3099216a6 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 20 Sep 2024 13:32:05 +0300 Subject: [PATCH 025/591] [#1385] metabase: Optimize `isTomb` check As tombstone and target must have the same containerID, do not iterate other containers. Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/metabase/inhume.go | 29 ++++++++------------- 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index 77bb84af1..12f27d330 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -377,11 +377,8 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck return targetBucket, value, nil } -func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, key []byte) (bool, error) { - targetIsTomb, err := isTomb(graveyardBKT, key) - if err != nil { - return false, err - } +func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, addressKey []byte) (bool, error) { + targetIsTomb := isTomb(graveyardBKT, addressKey) // do not add grave if target is a tombstone if targetIsTomb { @@ -390,7 +387,7 @@ func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, key []byte) (bool // if tombstone appears object must be // additionally marked with GC - return false, garbageBKT.Put(key, zeroValue) + return false, garbageBKT.Put(addressKey, zeroValue) } func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Bucket, targetKey []byte, cnr cid.ID, obj *objectSDK.Object, res *InhumeRes) error { @@ -410,25 +407,21 @@ func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Buc return nil } -func isTomb(graveyardBucket *bbolt.Bucket, key []byte) (bool, error) { +func isTomb(graveyardBucket *bbolt.Bucket, addressKey []byte) bool { targetIsTomb := false // iterate over graveyard and check if target address // is the address of tombstone in graveyard. - err := graveyardBucket.ForEach(func(_, v []byte) error { + // tombstone must have the same container ID as key. + c := graveyardBucket.Cursor() + containerPrefix := addressKey[:cidSize] + for k, v := c.Seek(containerPrefix); k != nil && bytes.HasPrefix(k, containerPrefix); k, v = c.Next() { // check if graveyard has record with key corresponding // to tombstone address (at least one) - targetIsTomb = bytes.Equal(v, key) - + targetIsTomb = bytes.Equal(v, addressKey) if targetIsTomb { - // break bucket iterator - return errBreakBucketForEach + break } - - return nil - }) - if err != nil && !errors.Is(err, errBreakBucketForEach) { - return false, err } - return targetIsTomb, nil + return targetIsTomb } From 95597d34371db6555739c4e92640cd8f8862ee7e Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 08:56:02 +0300 Subject: [PATCH 026/591] [#1388] golangci: Make `unused` linter stricker Add aditional checks. The most important false positive - structs used as map keys. Signed-off-by: Dmitrii Stepanov --- .golangci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index 971f0d0e7..33cf88d8a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -38,6 +38,10 @@ linters-settings: alias: pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object alias: objectSDK + unused: + field-writes-are-uses: false + exported-fields-are-used: false + local-variables-are-used: false custom: truecloudlab-linters: path: bin/linters/external_linters.so From 2bd560e52846b77d2902370cfaa80d54fcd77c46 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 08:57:18 +0300 Subject: [PATCH 027/591] [#1388] cli: Drop unused flag/parameter Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-cli/internal/client/client.go | 7 ------- cmd/frostfs-cli/modules/object/head.go | 3 --- 2 files changed, 10 deletions(-) diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index 03a987a57..dcd67f0d9 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -565,13 +565,6 @@ type HeadObjectPrm struct { commonObjectPrm objectAddressPrm rawPrm - - mainOnly bool -} - -// SetMainOnlyFlag sets flag to get only main fields of an object header in terms of FrostFS API. -func (x *HeadObjectPrm) SetMainOnlyFlag(v bool) { - x.mainOnly = v } // HeadObjectRes groups the resulting values of HeadObject operation. diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go index 14797dc41..cf2e2d5e6 100644 --- a/cmd/frostfs-cli/modules/object/head.go +++ b/cmd/frostfs-cli/modules/object/head.go @@ -38,7 +38,6 @@ func initObjectHeadCmd() { _ = objectHeadCmd.MarkFlagRequired(commonflags.OIDFlag) flags.String(fileFlag, "", "File to write header to. Default: stdout.") - flags.Bool("main-only", false, "Return only main fields") flags.Bool(commonflags.JSON, false, "Marshal output in JSON") flags.Bool("proto", false, "Marshal output in Protobuf") flags.Bool(rawFlag, false, rawFlagDesc) @@ -49,7 +48,6 @@ func getObjectHeader(cmd *cobra.Command, _ []string) { var obj oid.ID objAddr := readObjectAddress(cmd, &cnr, &obj) - mainOnly, _ := cmd.Flags().GetBool("main-only") pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) @@ -62,7 +60,6 @@ func getObjectHeader(cmd *cobra.Command, _ []string) { raw, _ := cmd.Flags().GetBool(rawFlag) prm.SetRawFlag(raw) prm.SetAddress(objAddr) - prm.SetMainOnlyFlag(mainOnly) res, err := internalclient.HeadObject(cmd.Context(), prm) if err != nil { From b69e07da7af2c8167e02585a723008fa2753f848 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 09:02:00 +0300 Subject: [PATCH 028/591] [#1388] metrics: Mark nolint:unused metrics Although these fields could be deleted, I annotated them so that all the metrics used would be defined in one place. Signed-off-by: Dmitrii Stepanov --- internal/metrics/innerring.go | 3 ++- internal/metrics/node.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/internal/metrics/innerring.go b/internal/metrics/innerring.go index f6b14a632..f3f529d05 100644 --- a/internal/metrics/innerring.go +++ b/internal/metrics/innerring.go @@ -17,7 +17,8 @@ type InnerRingServiceMetrics struct { eventDuration *prometheus.HistogramVec morphCacheMetrics *morphCacheMetrics logMetrics logger.LogMetrics - appInfo *ApplicationInfo + // nolint: unused + appInfo *ApplicationInfo } // NewInnerRingMetrics returns new instance of metrics collectors for inner ring. diff --git a/internal/metrics/node.go b/internal/metrics/node.go index d9e401446..711387875 100644 --- a/internal/metrics/node.go +++ b/internal/metrics/node.go @@ -25,7 +25,8 @@ type NodeMetrics struct { morphClient *morphClientMetrics morphCache *morphCacheMetrics log logger.LogMetrics - appInfo *ApplicationInfo + // nolint: unused + appInfo *ApplicationInfo } func NewNodeMetrics() *NodeMetrics { From aedb55f913d151669885aa6bc8ea5e83269a60b0 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 09:06:19 +0300 Subject: [PATCH 029/591] [#1388] governance: Drop unused Signed-off-by: Dmitrii Stepanov --- pkg/innerring/initialization.go | 1 - .../processors/governance/handlers_test.go | 21 ------------------- .../processors/governance/processor.go | 3 --- 3 files changed, 25 deletions(-) diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index 7da0a9794..c4aaeda56 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -163,7 +163,6 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli Log: s.log, Metrics: s.irMetrics, FrostFSClient: frostfsCli, - NetmapClient: s.netmapClient, AlphabetState: s, EpochState: s, Voter: s, diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go index b73e24318..87040bdef 100644 --- a/pkg/innerring/processors/governance/handlers_test.go +++ b/pkg/innerring/processors/governance/handlers_test.go @@ -8,7 +8,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" @@ -38,7 +37,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) { alphabetKeys: testKeys.mainnetKeys, } f := &testFrostFSClient{} - nm := &testNetmapClient{} proc, err := New( &Params{ @@ -50,7 +48,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) { MorphClient: m, MainnetClient: mn, FrostFSClient: f, - NetmapClient: nm, }, ) @@ -73,10 +70,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) { }, }, v.votes, "invalid vote calls") - var irUpdateExp []nmClient.UpdateIRPrm - - require.EqualValues(t, irUpdateExp, nm.updates, "invalid IR updates") - var expAlphabetUpdate client.UpdateAlphabetListPrm expAlphabetUpdate.SetHash(ev.txHash) expAlphabetUpdate.SetList(testKeys.newInnerRingExp) @@ -119,7 +112,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) { alphabetKeys: testKeys.mainnetKeys, } f := &testFrostFSClient{} - nm := &testNetmapClient{} proc, err := New( &Params{ @@ -131,7 +123,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) { MorphClient: m, MainnetClient: mn, FrostFSClient: f, - NetmapClient: nm, }, ) @@ -155,9 +146,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) { }, }, v.votes, "invalid vote calls") - var irUpdatesExp []nmClient.UpdateIRPrm - require.EqualValues(t, irUpdatesExp, nm.updates, "invalid IR updates") - var alpabetUpdExp client.UpdateAlphabetListPrm alpabetUpdExp.SetList(testKeys.newInnerRingExp) alpabetUpdExp.SetHash(ev.TxHash) @@ -293,12 +281,3 @@ func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) c.updates = append(c.updates, p) return nil } - -type testNetmapClient struct { - updates []nmClient.UpdateIRPrm -} - -func (c *testNetmapClient) UpdateInnerRing(p nmClient.UpdateIRPrm) error { - c.updates = append(c.updates, p) - return nil -} diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go index fa267eade..6daea417e 100644 --- a/pkg/innerring/processors/governance/processor.go +++ b/pkg/innerring/processors/governance/processor.go @@ -79,7 +79,6 @@ type ( metrics metrics.Register pool *ants.Pool frostfsClient FrostFSClient - netmapClient NetmapClient alphabetState AlphabetState epochState EpochState @@ -105,7 +104,6 @@ type ( MorphClient MorphClient MainnetClient MainnetClient FrostFSClient FrostFSClient - NetmapClient NetmapClient } ) @@ -146,7 +144,6 @@ func New(p *Params) (*Processor, error) { metrics: metricsRegister, pool: pool, frostfsClient: p.FrostFSClient, - netmapClient: p.NetmapClient, alphabetState: p.AlphabetState, epochState: p.EpochState, voter: p.Voter, From e319bf403e7ddd24d9527829a9d5863643635ff8 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 09:08:11 +0300 Subject: [PATCH 030/591] [#1388] apeSvc: Drop unused and make annotations Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 1 - cmd/frostfs-node/object.go | 1 - cmd/frostfs-node/policy_engine.go | 4 +++- pkg/ape/chainbase/option.go | 10 ---------- pkg/services/object/ape/service.go | 6 +----- 5 files changed, 4 insertions(+), 18 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 63f410b89..0ffa8c45b 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1082,7 +1082,6 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) { localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase() } else { localOverrideDB = chainbase.NewBoltLocalOverrideDatabase( - chainbase.WithLogger(c.log), chainbase.WithPath(nodeconfig.PersistentPolicyRules(c.appCfg).Path()), chainbase.WithPerm(nodeconfig.PersistentPolicyRules(c.appCfg).Perm()), chainbase.WithNoSync(nodeconfig.PersistentPolicyRules(c.appCfg).NoSync()), diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 610e2c363..9d4e35ca8 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -473,7 +473,6 @@ func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFe func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service { return objectAPE.NewService( - c.log, objectAPE.NewChecker( c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), diff --git a/cmd/frostfs-node/policy_engine.go b/cmd/frostfs-node/policy_engine.go index 22fda2b4c..55f76cc76 100644 --- a/cmd/frostfs-node/policy_engine.go +++ b/cmd/frostfs-node/policy_engine.go @@ -21,7 +21,9 @@ type accessPolicyEngine struct { var _ engine.MorphRuleChainStorageReader = (*morphAPEChainCache)(nil) type morphAPEChainCacheKey struct { - name chain.Name + // nolint:unused + name chain.Name + // nolint:unused target engine.Target } diff --git a/pkg/ape/chainbase/option.go b/pkg/ape/chainbase/option.go index e547701fb..590b7a885 100644 --- a/pkg/ape/chainbase/option.go +++ b/pkg/ape/chainbase/option.go @@ -5,9 +5,7 @@ import ( "os" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.etcd.io/bbolt" - "go.uber.org/zap" ) type Option func(*cfg) @@ -18,7 +16,6 @@ type cfg struct { noSync bool maxBatchDelay time.Duration maxBatchSize int - log *logger.Logger } func defaultCfg() *cfg { @@ -26,7 +23,6 @@ func defaultCfg() *cfg { perm: os.ModePerm, maxBatchDelay: bbolt.DefaultMaxBatchDelay, maxBatchSize: bbolt.DefaultMaxBatchSize, - log: &logger.Logger{Logger: zap.L()}, } } @@ -59,9 +55,3 @@ func WithMaxBatchSize(maxBatchSize int) Option { c.maxBatchSize = maxBatchSize } } - -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = l - } -} diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go index a1634e7c5..6eedaf99e 100644 --- a/pkg/services/object/ape/service.go +++ b/pkg/services/object/ape/service.go @@ -12,7 +12,6 @@ import ( objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -24,8 +23,6 @@ import ( var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext") type Service struct { - log *logger.Logger - apeChecker Checker next objectSvc.ServiceServer @@ -67,9 +64,8 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service) } } -func NewService(log *logger.Logger, apeChecker Checker, next objectSvc.ServiceServer) *Service { +func NewService(apeChecker Checker, next objectSvc.ServiceServer) *Service { return &Service{ - log: log, apeChecker: apeChecker, next: next, } From 580cd551807cea0ad2b9dfe9fbd21da0b55d6282 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 09:10:03 +0300 Subject: [PATCH 031/591] [#1388] getSvc: Drop unused Signed-off-by: Dmitrii Stepanov --- pkg/services/object/get/assembleec.go | 2 +- pkg/services/object/get/assemblerec.go | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go index a58602bf7..03f913bbf 100644 --- a/pkg/services/object/get/assembleec.go +++ b/pkg/services/object/get/assembleec.go @@ -43,7 +43,7 @@ func (r *request) assembleEC(ctx context.Context) { } r.prm.common = r.prm.common.WithLocalOnly(false) - assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.containerSource, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch) + assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch) r.log.Debug(logs.GetAssemblingECObject, zap.Uint64("range_offset", r.ctxRange().GetOffset()), diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go index dde0d7dad..44d9af3a2 100644 --- a/pkg/services/object/get/assemblerec.go +++ b/pkg/services/object/get/assemblerec.go @@ -34,7 +34,6 @@ type assemblerec struct { rng *objectSDK.Range remoteStorage ecRemoteStorage localStorage localStorage - cs container.Source log *logger.Logger head bool traverserGenerator traverserGenerator @@ -47,7 +46,6 @@ func newAssemblerEC( rng *objectSDK.Range, remoteStorage ecRemoteStorage, localStorage localStorage, - cs container.Source, log *logger.Logger, head bool, tg traverserGenerator, @@ -59,7 +57,6 @@ func newAssemblerEC( ecInfo: ecInfo, remoteStorage: remoteStorage, localStorage: localStorage, - cs: cs, log: log, head: head, traverserGenerator: tg, From 63a567a1de8d40b87c5e0cfcb99235eb4079f059 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 09:10:38 +0300 Subject: [PATCH 032/591] [#1388] engine: Drop unused Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/engine/control.go | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index 4778cf539..80fb3f9ed 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -249,23 +249,9 @@ func (e *StorageEngine) ResumeExecution() error { } type ReConfiguration struct { - errorsThreshold uint32 - shardPoolSize uint32 - shards map[string][]shard.Option // meta path -> shard opts } -// SetErrorsThreshold sets a size amount of errors after which -// shard is moved to read-only mode. -func (rCfg *ReConfiguration) SetErrorsThreshold(errorsThreshold uint32) { - rCfg.errorsThreshold = errorsThreshold -} - -// SetShardPoolSize sets a size of worker pool for each shard. -func (rCfg *ReConfiguration) SetShardPoolSize(shardPoolSize uint32) { - rCfg.shardPoolSize = shardPoolSize -} - // AddShard adds a shard for the reconfiguration. // Shard identifier is calculated from paths used in blobstor. func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) { From 004ff9e9bf68174fbb64df6cbc81f98ced8755d3 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 09:11:15 +0300 Subject: [PATCH 033/591] [#1388] blobstor: Drop unused Signed-off-by: Dmitrii Stepanov --- .../blobstor/memstore/control.go | 16 ++++++++-------- .../blobstor/memstore/memstore_test.go | 2 -- .../blobstor/memstore/option.go | 15 +-------------- 3 files changed, 9 insertions(+), 24 deletions(-) diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go index 449d4352a..83da52eb7 100644 --- a/pkg/local_object_storage/blobstor/memstore/control.go +++ b/pkg/local_object_storage/blobstor/memstore/control.go @@ -10,11 +10,11 @@ func (s *memstoreImpl) Open(mod mode.ComponentMode) error { return nil } -func (s *memstoreImpl) Init() error { return nil } -func (s *memstoreImpl) Close() error { return nil } -func (s *memstoreImpl) Type() string { return Type } -func (s *memstoreImpl) Path() string { return s.rootPath } -func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc } -func (s *memstoreImpl) Compressor() *compression.Config { return s.compression } -func (s *memstoreImpl) SetReportErrorFunc(f func(string, error)) { s.reportError = f } -func (s *memstoreImpl) SetParentID(string) {} +func (s *memstoreImpl) Init() error { return nil } +func (s *memstoreImpl) Close() error { return nil } +func (s *memstoreImpl) Type() string { return Type } +func (s *memstoreImpl) Path() string { return s.rootPath } +func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc } +func (s *memstoreImpl) Compressor() *compression.Config { return s.compression } +func (s *memstoreImpl) SetReportErrorFunc(func(string, error)) {} +func (s *memstoreImpl) SetParentID(string) {} diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go index 8d1480dff..dd130e5f9 100644 --- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go +++ b/pkg/local_object_storage/blobstor/memstore/memstore_test.go @@ -8,7 +8,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/stretchr/testify/require" ) @@ -16,7 +15,6 @@ import ( func TestSimpleLifecycle(t *testing.T) { s := New( WithRootPath("memstore"), - WithLogger(test.NewLogger(t)), ) defer func() { require.NoError(t, s.Close()) }() require.NoError(t, s.Open(mode.ComponentReadWrite)) diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go index 3d67b1e9c..97a03993d 100644 --- a/pkg/local_object_storage/blobstor/memstore/option.go +++ b/pkg/local_object_storage/blobstor/memstore/option.go @@ -2,33 +2,20 @@ package memstore import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) type cfg struct { - log *logger.Logger rootPath string readOnly bool compression *compression.Config - reportError func(string, error) } func defaultConfig() *cfg { - return &cfg{ - log: &logger.Logger{Logger: zap.L()}, - reportError: func(string, error) {}, - } + return &cfg{} } type Option func(*cfg) -func WithLogger(l *logger.Logger) Option { - return func(c *cfg) { - c.log = l - } -} - func WithRootPath(p string) Option { return func(c *cfg) { c.rootPath = p From 401c398704f15c1d516fbcc04f842d9d3fb8c2d3 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 09:12:01 +0300 Subject: [PATCH 034/591] [#1388] metabase: Drop unused Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/metabase/delete.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index e5e9840a0..4ad11164f 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -77,8 +77,6 @@ func (p *DeletePrm) SetAddresses(addrs ...oid.Address) { type referenceNumber struct { all, cur int - addr oid.Address - obj *objectSDK.Object } @@ -295,9 +293,8 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter nRef, ok := refCounter[k] if !ok { nRef = &referenceNumber{ - all: parentLength(tx, parAddr), - addr: parAddr, - obj: parent, + all: parentLength(tx, parAddr), + obj: parent, } refCounter[k] = nRef From d1d6e3471c2e902c29480a091545f09c4daaf335 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 09:12:32 +0300 Subject: [PATCH 035/591] [#1388] signSvc: Drop unused Signed-off-by: Dmitrii Stepanov --- pkg/services/object/sign.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go index 35367aafe..f5ae97b62 100644 --- a/pkg/services/object/sign.go +++ b/pkg/services/object/sign.go @@ -10,8 +10,6 @@ import ( ) type SignService struct { - key *ecdsa.PrivateKey - sigSvc *util.SignService svc ServiceServer @@ -48,7 +46,6 @@ type getRangeStreamSigner struct { func NewSignService(key *ecdsa.PrivateKey, svc ServiceServer) *SignService { return &SignService{ - key: key, sigSvc: util.NewUnarySignService(key), svc: svc, } From bdd57c8b6b03f78ed74c31db41f5bbd0f3c84beb Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 09:12:54 +0300 Subject: [PATCH 036/591] [#1388] sessionSvc: Add nolint annotations Used as map key. Signed-off-by: Dmitrii Stepanov --- pkg/services/session/storage/temporary/storage.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/services/session/storage/temporary/storage.go b/pkg/services/session/storage/temporary/storage.go index ee93dee71..9ae9db9dc 100644 --- a/pkg/services/session/storage/temporary/storage.go +++ b/pkg/services/session/storage/temporary/storage.go @@ -9,7 +9,9 @@ import ( ) type key struct { + // nolint:unused tokenID string + // nolint:unused ownerID string } From a2ab6d4942046c3bca59addd2b73ce3b58251b84 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 09:13:27 +0300 Subject: [PATCH 037/591] [#1388] node: Drop unused Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 1 - cmd/frostfs-node/container.go | 6 ------ cmd/frostfs-node/netmap.go | 1 - 3 files changed, 8 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 0ffa8c45b..c625b575f 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -602,7 +602,6 @@ type cfgNetmap struct { needBootstrap bool reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime - startEpoch uint64 // epoch number when application is started } type cfgNodeInfo struct { diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index 5a29aac76..6733140d2 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -128,9 +128,6 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c cnrRdr.lister = client cnrRdr.eacl = c.cfgObject.eaclSource cnrRdr.src = c.cfgObject.cnrSource - - cnrWrt.cacheEnabled = true - cnrWrt.eacls = cachedEACLStorage } return cnrRdr, cnrWrt @@ -247,9 +244,6 @@ func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) { type morphContainerWriter struct { neoClient *cntClient.Client - - cacheEnabled bool - eacls ttlEACLStorage } func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) { diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index c0b87492c..5e4585f85 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -259,7 +259,6 @@ func initNetmapState(c *cfg) { } c.cfgNetmap.state.setCurrentEpoch(epoch) - c.cfgNetmap.startEpoch = epoch c.setContractNodeInfo(ni) } From 29e4cf7ba1c88552172bdbb19dade34ea9ff5ba2 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 23 Sep 2024 10:51:05 +0300 Subject: [PATCH 038/591] [#1388] ir: Annotate cmode as nolint Signed-off-by: Dmitrii Stepanov --- pkg/innerring/innerring.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index 50a37845b..53a07e36c 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -103,6 +103,8 @@ type ( // to the application. runners []func(chan<- error) error + // cmode used for upgrade scenario. + // nolint:unused cmode *atomic.Bool } From 4fbfffd44c4e0f4aa7bc88052eff8400a0421f7c Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 24 Sep 2024 12:13:11 +0300 Subject: [PATCH 039/591] [#1388] putSvc: Drop unused Signed-off-by: Dmitrii Stepanov --- pkg/services/object/put/prm.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go index 0c8f12b45..52a7c102c 100644 --- a/pkg/services/object/put/prm.go +++ b/pkg/services/object/put/prm.go @@ -2,7 +2,6 @@ package putsvc import ( "context" - "crypto/ecdsa" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" @@ -21,8 +20,6 @@ type PutInitPrm struct { traverseOpts []placement.Option relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error - - privateKey *ecdsa.PrivateKey } type PutChunkPrm struct { @@ -68,11 +65,3 @@ func (p *PutChunkPrm) WithChunk(v []byte) *PutChunkPrm { return p } - -func (p *PutInitPrm) WithPrivateKey(v *ecdsa.PrivateKey) *PutInitPrm { - if p != nil { - p.privateKey = v - } - - return p -} From 772b471aab53774e1d2cf11ae7db28166a47ec45 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 24 Sep 2024 15:58:52 +0300 Subject: [PATCH 040/591] [#1388] lens: Add nolint annotations Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-lens/internal/schema/common/raw.go | 2 ++ cmd/frostfs-lens/internal/schema/writecache/types.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/cmd/frostfs-lens/internal/schema/common/raw.go b/cmd/frostfs-lens/internal/schema/common/raw.go index 0990e24c3..55051554c 100644 --- a/cmd/frostfs-lens/internal/schema/common/raw.go +++ b/cmd/frostfs-lens/internal/schema/common/raw.go @@ -7,6 +7,8 @@ import ( ) type RawEntry struct { + // key and value used for record dump. + // nolint:unused key, value []byte } diff --git a/cmd/frostfs-lens/internal/schema/writecache/types.go b/cmd/frostfs-lens/internal/schema/writecache/types.go index 3f71c5366..11e6f3fcd 100644 --- a/cmd/frostfs-lens/internal/schema/writecache/types.go +++ b/cmd/frostfs-lens/internal/schema/writecache/types.go @@ -16,6 +16,8 @@ type ( DefaultRecord struct { addr oid.Address + // data used for record dump. + // nolint:unused data []byte } ) From a5e1aa22c963fe612d6d2d3316ee7ca0482f0d09 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 25 Sep 2024 17:15:03 +0300 Subject: [PATCH 041/591] [#1394] putSvc: Fix relay Signed-off-by: Dmitrii Stepanov --- pkg/services/object/common/target/target.go | 9 +++++---- pkg/services/object/patch/streamer.go | 2 +- pkg/services/object/put/streamer.go | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go index 980c4c6bd..a2d6b4d39 100644 --- a/pkg/services/object/common/target/target.go +++ b/pkg/services/object/common/target/target.go @@ -13,16 +13,16 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" ) -func New(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { +func New(prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) { // prepare needed put parameters - if err := preparePrm(prm); err != nil { + if err := preparePrm(&prm); err != nil { return nil, fmt.Errorf("could not prepare put parameters: %w", err) } if prm.Header.Signature() != nil { - return newUntrustedTarget(prm) + return newUntrustedTarget(&prm) } - return newTrustedTarget(prm) + return newTrustedTarget(&prm) } func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { @@ -49,6 +49,7 @@ func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWrit } func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { + prm.Relay = nil // do not relay request without signature maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize() if maxPayloadSz == 0 { return nil, errors.New("could not obtain max object size parameter") diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go index 73def8c7c..c8ed6fdbf 100644 --- a/pkg/services/object/patch/streamer.go +++ b/pkg/services/object/patch/streamer.go @@ -112,7 +112,7 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error { } oV2.GetHeader().SetOwnerID(ownerID) - target, err := target.New(&objectwriter.Params{ + target, err := target.New(objectwriter.Params{ Config: s.Config, Common: commonPrm, Header: objectSDK.NewFromV2(oV2), diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index d08e7fafa..f71309d31 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -26,7 +26,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error { } // initialize destination target - prmTarget := &objectwriter.Params{ + prmTarget := objectwriter.Params{ Config: p.Config, Common: prm.common, Header: prm.hdr, From 5f22ba6f380fd9d41be070f000b10cc4432981b9 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Fri, 27 Sep 2024 13:45:57 +0300 Subject: [PATCH 042/591] [#1397] object: Correctly set namespace before APE check Signed-off-by: Airat Arifullin --- pkg/services/object/ape/checker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go index 3688638d0..3f6cc7c20 100644 --- a/pkg/services/object/ape/checker.go +++ b/pkg/services/object/ape/checker.go @@ -108,7 +108,7 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error { return c.checkerCore.CheckAPE(checkercore.CheckPrm{ Request: r, PublicKey: pub, - Namespace: prm.Method, + Namespace: prm.Namespace, Container: prm.Container, ContainerOwner: prm.ContainerOwner, BearerToken: prm.BearerToken, From d0ed29b3c73626f6bf881090f86bdc834d81acc1 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Tue, 3 Sep 2024 15:42:38 +0300 Subject: [PATCH 043/591] [#1350] node: Add ability to evacuate objects from `REP 1` only Signed-off-by: Anton Nikiforov --- cmd/frostfs-cli/modules/control/evacuation.go | 4 + docs/evacuation.md | 7 +- pkg/local_object_storage/engine/evacuate.go | 39 +++- .../engine/evacuate_test.go | 181 +++++++++++++++++- pkg/local_object_storage/metabase/list.go | 51 +++++ pkg/local_object_storage/shard/list.go | 30 ++- pkg/services/control/server/evacuate_async.go | 1 + pkg/services/control/service.proto | 2 + pkg/services/control/service_frostfs.pb.go | 31 +++ 9 files changed, 340 insertions(+), 6 deletions(-) diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go index 04a67e5b5..fffc5e33e 100644 --- a/cmd/frostfs-cli/modules/control/evacuation.go +++ b/cmd/frostfs-cli/modules/control/evacuation.go @@ -20,6 +20,7 @@ const ( awaitFlag = "await" noProgressFlag = "no-progress" scopeFlag = "scope" + repOneOnlyFlag = "rep-one-only" containerWorkerCountFlag = "container-worker-count" objectWorkerCountFlag = "object-worker-count" @@ -69,6 +70,7 @@ func startEvacuateShard(cmd *cobra.Command, _ []string) { ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag) containerWorkerCount, _ := cmd.Flags().GetUint32(containerWorkerCountFlag) objectWorkerCount, _ := cmd.Flags().GetUint32(objectWorkerCountFlag) + repOneOnly, _ := cmd.Flags().GetBool(repOneOnlyFlag) req := &control.StartShardEvacuationRequest{ Body: &control.StartShardEvacuationRequest_Body{ @@ -77,6 +79,7 @@ func startEvacuateShard(cmd *cobra.Command, _ []string) { Scope: getEvacuationScope(cmd), ContainerWorkerCount: containerWorkerCount, ObjectWorkerCount: objectWorkerCount, + RepOneOnly: repOneOnly, }, } @@ -380,6 +383,7 @@ func initControlStartEvacuationShardCmd() { flags.Bool(noProgressFlag, false, fmt.Sprintf("Print progress if %s provided", awaitFlag)) flags.Uint32(containerWorkerCountFlag, 0, "Count of concurrent container evacuation workers") flags.Uint32(objectWorkerCountFlag, 0, "Count of concurrent object evacuation workers") + flags.Bool(repOneOnlyFlag, false, "Evacuate objects only from containers with policy 'REP 1 ...'") startEvacuationShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) } diff --git a/docs/evacuation.md b/docs/evacuation.md index 885ce169a..d47d56d15 100644 --- a/docs/evacuation.md +++ b/docs/evacuation.md @@ -20,7 +20,12 @@ Because it is necessary to prevent removing by policer objects with policy `REP ## Commands -`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag. By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`). +`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag. +By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`). +To evacuate objects only from containers with policy `REP 1` use option `--rep-one-only`. +To adjust resource consumption required for evacuation use options: + - `--container-worker-count` count of concurrent container evacuation workers + - `--object-worker-count` count of concurrent object evacuation workers `frostfs-cli control shards evacuation stop` stops running evacuation process. diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index 3db556a8f..a618ff274 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -9,6 +9,7 @@ import ( "sync/atomic" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" @@ -16,6 +17,7 @@ import ( tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -88,6 +90,7 @@ type EvacuateShardPrm struct { IgnoreErrors bool Async bool Scope EvacuateScope + RepOneOnly bool ContainerWorkerCount uint32 ObjectWorkerCount uint32 @@ -288,6 +291,7 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p attribute.Bool("async", prm.Async), attribute.Bool("ignoreErrors", prm.IgnoreErrors), attribute.Stringer("scope", prm.Scope), + attribute.Bool("repOneOnly", prm.RepOneOnly), )) defer func() { @@ -430,13 +434,34 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context ) error { sh := shardsToEvacuate[shardID] var cntPrm shard.IterateOverContainersPrm - cntPrm.Handler = func(ctx context.Context, name []byte, _ cid.ID) error { + cntPrm.Handler = func(ctx context.Context, name []byte, cnt cid.ID) error { select { case <-ctx.Done(): return context.Cause(ctx) default: } egContainer.Go(func() error { + var skip bool + c, err := e.containerSource.Load().cs.Get(cnt) + if err != nil { + if client.IsErrContainerNotFound(err) { + skip = true + } else { + return err + } + } + if !skip && prm.RepOneOnly { + skip = e.isNotRepOne(c) + } + if skip { + countPrm := shard.CountAliveObjectsInBucketPrm{BucketName: name} + count, err := sh.CountAliveObjectsInBucket(ctx, countPrm) + if err != nil { + return err + } + res.objSkipped.Add(count) + return nil + } var objPrm shard.IterateOverObjectsInContainerPrm objPrm.BucketName = name objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error { @@ -454,7 +479,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context }) return nil } - err := sh.IterateOverObjectsInContainer(ctx, objPrm) + err = sh.IterateOverObjectsInContainer(ctx, objPrm) if err != nil { cancel(err) } @@ -781,6 +806,16 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI return nil } +func (e *StorageEngine) isNotRepOne(c *container.Container) bool { + p := c.Value.PlacementPolicy() + for i := range p.NumberOfReplicas() { + if p.ReplicaDescriptor(i).NumberOfObjects() > 1 { + return true + } + } + return false +} + func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, ) (bool, error) { diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index f72333399..8498c9245 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" @@ -20,14 +21,38 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" ) +type containerStorage struct { + cntmap map[cid.ID]*container.Container + latency time.Duration +} + +func (cs *containerStorage) Get(id cid.ID) (*coreContainer.Container, error) { + time.Sleep(cs.latency) + v, ok := cs.cntmap[id] + if !ok { + return nil, new(apistatus.ContainerNotFound) + } + coreCnt := coreContainer.Container{ + Value: *v, + } + return &coreCnt, nil +} + +func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) { + return nil, nil +} + func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEngine, []*shard.ID, []*objectSDK.Object) { dir := t.TempDir() @@ -61,10 +86,15 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng {Key: pilorama.AttributeVersion, Value: []byte("XXX")}, {Key: pilorama.AttributeFilename, Value: []byte("file.txt")}, } - + cnrMap := make(map[cid.ID]*container.Container) for _, sh := range ids { - for range objPerShard { + for i := range objPerShard { + // Create dummy container + cnr1 := container.Container{} + cnr1.SetAttribute("cnr", "cnr"+strconv.Itoa(i)) contID := cidtest.ID() + cnrMap[contID] = &cnr1 + obj := testutil.GenerateObjectWithCID(contID) objects = append(objects, obj) @@ -78,6 +108,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng require.NoError(t, err) } } + e.SetContainerSource(&containerStorage{cntmap: cnrMap}) return e, ids, objects } @@ -177,7 +208,10 @@ func TestEvacuateObjectsNetwork(t *testing.T) { acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) (bool, error) { var n atomic.Uint64 + var mtx sync.Mutex return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) { + mtx.Lock() + defer mtx.Unlock() if n.Load() == max { return false, errReplication } @@ -640,3 +674,146 @@ func TestEvacuateTreesRemote(t *testing.T) { require.Equal(t, expectedTreeOps, evacuatedTreeOps) } + +func TestEvacuateShardObjectsRepOneOnly(t *testing.T) { + e, ids, _ := newEngineEvacuate(t, 2, 0) + defer func() { + require.NoError(t, e.Close(context.Background())) + }() + + // Create container with policy REP 2 + cnr1 := container.Container{} + p1 := netmap.PlacementPolicy{} + p1.SetContainerBackupFactor(1) + x1 := netmap.ReplicaDescriptor{} + x1.SetNumberOfObjects(2) + p1.AddReplicas(x1) + x1 = netmap.ReplicaDescriptor{} + x1.SetNumberOfObjects(1) + p1.AddReplicas(x1) + cnr1.SetPlacementPolicy(p1) + cnr1.SetAttribute("cnr", "cnr1") + + var idCnr1 cid.ID + container.CalculateID(&idCnr1, cnr1) + + cnrmap := make(map[cid.ID]*container.Container) + var cids []cid.ID + cnrmap[idCnr1] = &cnr1 + cids = append(cids, idCnr1) + + // Create container with policy REP 1 + cnr2 := container.Container{} + p2 := netmap.PlacementPolicy{} + p2.SetContainerBackupFactor(1) + x2 := netmap.ReplicaDescriptor{} + x2.SetNumberOfObjects(1) + p2.AddReplicas(x2) + x2 = netmap.ReplicaDescriptor{} + x2.SetNumberOfObjects(1) + p2.AddReplicas(x2) + cnr2.SetPlacementPolicy(p2) + cnr2.SetAttribute("cnr", "cnr2") + + var idCnr2 cid.ID + container.CalculateID(&idCnr2, cnr2) + cnrmap[idCnr2] = &cnr2 + cids = append(cids, idCnr2) + + // Create container for simulate removing + cnr3 := container.Container{} + p3 := netmap.PlacementPolicy{} + p3.SetContainerBackupFactor(1) + x3 := netmap.ReplicaDescriptor{} + x3.SetNumberOfObjects(1) + p3.AddReplicas(x3) + cnr3.SetPlacementPolicy(p3) + cnr3.SetAttribute("cnr", "cnr3") + + var idCnr3 cid.ID + container.CalculateID(&idCnr3, cnr3) + cids = append(cids, idCnr3) + + e.SetContainerSource(&containerStorage{cntmap: cnrmap}) + + for _, sh := range ids { + for j := range 3 { + for range 4 { + obj := testutil.GenerateObjectWithCID(cids[j]) + var putPrm shard.PutPrm + putPrm.SetObject(obj) + _, err := e.shards[sh.String()].Put(context.Background(), putPrm) + require.NoError(t, err) + } + } + } + + var prm EvacuateShardPrm + prm.ShardID = ids[0:1] + prm.Scope = EvacuateScopeObjects + prm.RepOneOnly = true + + require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) + + res, err := e.Evacuate(context.Background(), prm) + require.NoError(t, err) + require.Equal(t, uint64(4), res.ObjectsEvacuated()) + require.Equal(t, uint64(8), res.ObjectsSkipped()) + require.Equal(t, uint64(0), res.ObjectsFailed()) +} + +func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) { + t.Skip() + e, ids, _ := newEngineEvacuate(t, 2, 0) + defer func() { + require.NoError(t, e.Close(context.Background())) + }() + + cnrmap := make(map[cid.ID]*container.Container) + var cids []cid.ID + // Create containers with policy REP 1 + for i := range 10_000 { + cnr1 := container.Container{} + p1 := netmap.PlacementPolicy{} + p1.SetContainerBackupFactor(1) + x1 := netmap.ReplicaDescriptor{} + x1.SetNumberOfObjects(2) + p1.AddReplicas(x1) + cnr1.SetPlacementPolicy(p1) + cnr1.SetAttribute("i", strconv.Itoa(i)) + + var idCnr1 cid.ID + container.CalculateID(&idCnr1, cnr1) + + cnrmap[idCnr1] = &cnr1 + cids = append(cids, idCnr1) + } + + e.SetContainerSource(&containerStorage{ + cntmap: cnrmap, + latency: time.Millisecond * 100, + }) + + for _, cnt := range cids { + for range 1 { + obj := testutil.GenerateObjectWithCID(cnt) + var putPrm shard.PutPrm + putPrm.SetObject(obj) + _, err := e.shards[ids[0].String()].Put(context.Background(), putPrm) + require.NoError(t, err) + } + } + + var prm EvacuateShardPrm + prm.ShardID = ids[0:1] + prm.Scope = EvacuateScopeObjects + prm.RepOneOnly = true + prm.ContainerWorkerCount = 10 + + require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) + + start := time.Now() + _, err := e.Evacuate(context.Background(), prm) + t.Logf("evacuate took %v\n", time.Since(start)) + require.NoError(t, err) +} diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index 5943be7f4..44f25246e 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -76,6 +76,12 @@ type IterateOverObjectsInContainerPrm struct { Handler func(context.Context, *objectcore.Info) error } +// CountAliveObjectsInBucketPrm contains parameters for IterateOverObjectsInContainer operation. +type CountAliveObjectsInBucketPrm struct { + // BucketName container's bucket name. + BucketName []byte +} + // ListWithCursor lists physical objects available in metabase starting from // cursor. Includes objects of all types. Does not include inhumed objects. // Use cursor value from response for consecutive requests. @@ -426,3 +432,48 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, c } return nil } + +// CountAliveObjectsInBucket count objects in bucket which aren't in graveyard or garbage. +func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjectsInBucketPrm) (uint64, error) { + var ( + startedAt = time.Now() + success = false + ) + defer func() { + db.metrics.AddMethodDuration("CountAliveObjectsInBucket", time.Since(startedAt), success) + }() + _, span := tracing.StartSpanFromContext(ctx, "metabase.CountAliveObjectsInBucket") + defer span.End() + + db.modeMtx.RLock() + defer db.modeMtx.RUnlock() + + if db.mode.NoMetabase() { + return 0, ErrDegradedMode + } + + cidRaw := prm.BucketName[1:bucketKeySize] + if cidRaw == nil { + return 0, nil + } + var count uint64 + err := db.boltDB.View(func(tx *bbolt.Tx) error { + bkt := tx.Bucket(prm.BucketName) + if bkt == nil { + return nil + } + graveyardBkt := tx.Bucket(graveyardBucketName) + garbageBkt := tx.Bucket(garbageBucketName) + c := bkt.Cursor() + k, _ := c.First() + for ; k != nil; k, _ = c.Next() { + if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 { + continue + } + count++ + } + return nil + }) + success = err == nil + return count, metaerr.Wrap(err) +} diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index 9f56ec750..f5d633b77 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -44,10 +44,16 @@ type IterateOverContainersPrm struct { type IterateOverObjectsInContainerPrm struct { // BucketName container's bucket name. BucketName []byte - // Handler function executed upon containers in db. + // Handler function executed upon objects in db. Handler func(context.Context, *objectcore.Info) error } +// CountAliveObjectsInBucketPrm contains parameters for CountAliveObjectsInBucket operation. +type CountAliveObjectsInBucketPrm struct { + // BucketName container's bucket name. + BucketName []byte +} + // ListWithCursorPrm contains parameters for ListWithCursor operation. type ListWithCursorPrm struct { count uint32 @@ -229,3 +235,25 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv return nil } + +// CountAliveObjectsInBucket count objects in bucket which aren't in graveyard or garbage. +func (s *Shard) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjectsInBucketPrm) (uint64, error) { + _, span := tracing.StartSpanFromContext(ctx, "shard.CountAliveObjectsInBucket") + defer span.End() + + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { + return 0, ErrDegradedMode + } + + var metaPrm meta.CountAliveObjectsInBucketPrm + metaPrm.BucketName = prm.BucketName + count, err := s.metaBase.CountAliveObjectsInBucket(ctx, metaPrm) + if err != nil { + return 0, fmt.Errorf("could not count alive objects in bucket: %w", err) + } + + return count, nil +} diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go index bdc6f7c38..146ac7e16 100644 --- a/pkg/services/control/server/evacuate_async.go +++ b/pkg/services/control/server/evacuate_async.go @@ -31,6 +31,7 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha Scope: engine.EvacuateScope(req.GetBody().GetScope()), ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(), ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(), + RepOneOnly: req.GetBody().GetRepOneOnly(), } _, err = s.s.Evacuate(ctx, prm) diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto index 88a06de22..ae1939e13 100644 --- a/pkg/services/control/service.proto +++ b/pkg/services/control/service.proto @@ -398,6 +398,8 @@ message StartShardEvacuationRequest { uint32 container_worker_count = 4; // Count of concurrent object evacuation workers. uint32 object_worker_count = 5; + // Choose for evacuation objects in `REP 1` containers only. + bool rep_one_only = 6; } Body body = 1; diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go index e92a8acd1..e16f082b1 100644 --- a/pkg/services/control/service_frostfs.pb.go +++ b/pkg/services/control/service_frostfs.pb.go @@ -6516,6 +6516,7 @@ type StartShardEvacuationRequest_Body struct { Scope uint32 `json:"scope"` ContainerWorkerCount uint32 `json:"containerWorkerCount"` ObjectWorkerCount uint32 `json:"objectWorkerCount"` + RepOneOnly bool `json:"repOneOnly"` } var ( @@ -6537,6 +6538,7 @@ func (x *StartShardEvacuationRequest_Body) StableSize() (size int) { size += proto.UInt32Size(3, x.Scope) size += proto.UInt32Size(4, x.ContainerWorkerCount) size += proto.UInt32Size(5, x.ObjectWorkerCount) + size += proto.BoolSize(6, x.RepOneOnly) return size } @@ -6568,6 +6570,9 @@ func (x *StartShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMar if x.ObjectWorkerCount != 0 { mm.AppendUint32(5, x.ObjectWorkerCount) } + if x.RepOneOnly { + mm.AppendBool(6, x.RepOneOnly) + } } // UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. @@ -6609,6 +6614,12 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err er return fmt.Errorf("cannot unmarshal field %s", "ObjectWorkerCount") } x.ObjectWorkerCount = data + case 6: // RepOneOnly + data, ok := fc.Bool() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "RepOneOnly") + } + x.RepOneOnly = data } } return nil @@ -6658,6 +6669,15 @@ func (x *StartShardEvacuationRequest_Body) GetObjectWorkerCount() uint32 { func (x *StartShardEvacuationRequest_Body) SetObjectWorkerCount(v uint32) { x.ObjectWorkerCount = v } +func (x *StartShardEvacuationRequest_Body) GetRepOneOnly() bool { + if x != nil { + return x.RepOneOnly + } + return false +} +func (x *StartShardEvacuationRequest_Body) SetRepOneOnly(v bool) { + x.RepOneOnly = v +} // MarshalJSON implements the json.Marshaler interface. func (x *StartShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) { @@ -6703,6 +6723,11 @@ func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer) out.RawString(prefix) out.Uint32(x.ObjectWorkerCount) } + { + const prefix string = ",\"repOneOnly\":" + out.RawString(prefix) + out.Bool(x.RepOneOnly) + } out.RawByte('}') } @@ -6768,6 +6793,12 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { f = in.Uint32() x.ObjectWorkerCount = f } + case "repOneOnly": + { + var f bool + f = in.Bool() + x.RepOneOnly = f + } } in.WantComma() } From 7f8a1dcf8e238a08af84a1ef9e180541f783b71f Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Mon, 30 Sep 2024 14:15:13 +0300 Subject: [PATCH 044/591] [#1400] adm: Support flag `alphabet-wallets` for commands `proxy-add/remove-account` Signed-off-by: Anton Nikiforov --- cmd/frostfs-adm/internal/modules/morph/proxy/root.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go index 082bc57d1..1854c8d2b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go @@ -30,11 +30,13 @@ var ( func initProxyAddAccount() { AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string") + AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func initProxyRemoveAccount() { RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string") + RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func init() { From a13219808a42f30839fe87ba3ea88a8fdd54f0ac Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Fri, 27 Sep 2024 12:39:43 +0300 Subject: [PATCH 045/591] [#1375] node: Configure of the container cache size Signed-off-by: Alexander Chuprov --- cmd/frostfs-node/cache.go | 8 ++-- cmd/frostfs-node/config.go | 2 + cmd/frostfs-node/config/morph/config.go | 15 ++++++ cmd/frostfs-node/container.go | 63 +++++++++++++------------ cmd/frostfs-node/morph.go | 1 + config/example/node.yaml | 1 + 6 files changed, 55 insertions(+), 35 deletions(-) diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go index 57f65d873..06142a46c 100644 --- a/cmd/frostfs-node/cache.go +++ b/cmd/frostfs-node/cache.go @@ -165,13 +165,11 @@ type ttlContainerStorage struct { delInfoCache *ttlNetCache[cid.ID, *container.DelInfo] } -func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage { - const containerCacheSize = 100 - - lruCnrCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) { +func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage { + lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.Container, error) { return v.Get(id) }, metrics.NewCacheMetrics("container")) - lruDelInfoCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.DelInfo, error) { + lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.DelInfo, error) { return v.DeletionInfo(id) }, metrics.NewCacheMetrics("container_deletion_info")) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index c625b575f..58a96879f 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -570,6 +570,8 @@ type cfgMorph struct { // TTL of Sidechain cached values. Non-positive value disables caching. cacheTTL time.Duration + containerCacheSize uint32 + proxyScriptHash neogoutil.Uint160 } diff --git a/cmd/frostfs-node/config/morph/config.go b/cmd/frostfs-node/config/morph/config.go index 1c536a0e2..d089870ea 100644 --- a/cmd/frostfs-node/config/morph/config.go +++ b/cmd/frostfs-node/config/morph/config.go @@ -30,6 +30,9 @@ const ( // FrostfsIDCacheSizeDefault is a default value of APE chain cache. FrostfsIDCacheSizeDefault = 10_000 + + // ContainerCacheSizeDefault represents the default size for the container cache. + ContainerCacheSizeDefault = 100 ) var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section") @@ -103,6 +106,18 @@ func CacheTTL(c *config.Config) time.Duration { return CacheTTLDefault } +// ContainerCacheSize returns the value of "container_cache_size" config parameter +// from "morph" section. +// +// Returns 0 if the value is not positive integer. +// Returns ContainerCacheSizeDefault if the value is missing. +func ContainerCacheSize(c *config.Config) uint32 { + if c.Sub(subsection).Value("container_cache_size") == nil { + return ContainerCacheSizeDefault + } + return config.Uint32Safe(c.Sub(subsection), "container_cache_size") +} + // SwitchInterval returns the value of "switch_interval" config parameter // from "morph" section. // diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index 6733140d2..729fcb8af 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -87,43 +87,46 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c cnrRdr.lister = client } else { // use RPC node as source of Container contract items (with caching) - cachedContainerStorage := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL) - cachedEACLStorage := newCachedEACLStorage(eACLFetcher, c.cfgMorph.cacheTTL) + c.cfgObject.cnrSource = cnrSrc + if c.cfgMorph.containerCacheSize > 0 { + containerCache := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize) - subscribeToContainerCreation(c, func(e event.Event) { - ev := e.(containerEvent.PutSuccess) + subscribeToContainerCreation(c, func(e event.Event) { + ev := e.(containerEvent.PutSuccess) - // read owner of the created container in order to update the reading cache. - // TODO: use owner directly from the event after neofs-contract#256 will become resolved - // but don't forget about the profit of reading the new container and caching it: - // creation success are most commonly tracked by polling GET op. - cnr, err := cnrSrc.Get(ev.ID) - if err == nil { - cachedContainerStorage.containerCache.set(ev.ID, cnr, nil) - } else { - // unlike removal, we expect successful receive of the container - // after successful creation, so logging can be useful - c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification, + // read owner of the created container in order to update the reading cache. + // TODO: use owner directly from the event after neofs-contract#256 will become resolved + // but don't forget about the profit of reading the new container and caching it: + // creation success are most commonly tracked by polling GET op. + cnr, err := cnrSrc.Get(ev.ID) + if err == nil { + containerCache.containerCache.set(ev.ID, cnr, nil) + } else { + // unlike removal, we expect successful receive of the container + // after successful creation, so logging can be useful + c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification, + zap.Stringer("id", ev.ID), + zap.Error(err), + ) + } + + c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt, zap.Stringer("id", ev.ID), - zap.Error(err), ) - } + }) - c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt, - zap.Stringer("id", ev.ID), - ) - }) - - subscribeToContainerRemoval(c, func(e event.Event) { - ev := e.(containerEvent.DeleteSuccess) - cachedContainerStorage.handleRemoval(ev.ID) - c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt, - zap.Stringer("id", ev.ID), - ) - }) + subscribeToContainerRemoval(c, func(e event.Event) { + ev := e.(containerEvent.DeleteSuccess) + containerCache.handleRemoval(ev.ID) + c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt, + zap.Stringer("id", ev.ID), + ) + }) + c.cfgObject.cnrSource = containerCache + } + cachedEACLStorage := newCachedEACLStorage(eACLFetcher, c.cfgMorph.cacheTTL) c.cfgObject.eaclSource = cachedEACLStorage - c.cfgObject.cnrSource = cachedContainerStorage cnrRdr.lister = client cnrRdr.eacl = c.cfgObject.eaclSource diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 7178cd97d..1bfcb8ac9 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -90,6 +90,7 @@ func initMorphComponents(ctx context.Context, c *cfg) { var netmapSource netmap.Source + c.cfgMorph.containerCacheSize = morphconfig.ContainerCacheSize(c.appCfg) c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg) if c.cfgMorph.cacheTTL == 0 { diff --git a/config/example/node.yaml b/config/example/node.yaml index 86be35ba8..2a80fba18 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -81,6 +81,7 @@ morph: cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching. # Default value: block time. It is recommended to have this value less or equal to block time. # Cached entities: containers, container lists, eACL tables. + container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache. switch_interval: 3m # interval b/w RPC switch attempts if the node is connected not to the highest priority node rpc_endpoint: # side chain NEO RPC endpoints; are shuffled and used one by one until the first success - address: wss://rpc1.morph.frostfs.info:40341/ws From 54eb0058229965b7ddd704fe4da2e24f41c20f3f Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 1 Oct 2024 14:39:36 +0300 Subject: [PATCH 046/591] [#1404] go.mod: Update api-go Fix #1398 Fix #1399 Signed-off-by: Evgenii Stratonikov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9817f8527..1023948bc 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22 require ( code.gitea.io/sdk/gitea v0.17.1 - git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e + git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d diff --git a/go.sum b/go.sum index 3c6dd9a99..5d719a027 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= -git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e h1:740ABnOBYx4o6jxULHdSSnVW2fYIO35ohg+Uz59sxd0= -git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU= +git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f h1:FZvX6CLzTQqMyMvOerIKMvIEJQbOImDjSooZx3AVRyE= +git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU= git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c h1:9JmXxWcqWaOXModBwtj/WlPYOfSk2jRSDoRsDRYvgnA= git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c/go.mod h1:DVb1AqP323wiEwCtlfoJJTERF4Xji2L39X6T8kv26RE= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= From 9c5ddc4dfeb6447ae7d9cc0d74db551271ac6eb1 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Wed, 2 Oct 2024 10:09:10 +0300 Subject: [PATCH 047/591] [#1407] tree: Set `ContainerOwner` in parameter for `CheckAPE` Signed-off-by: Airat Arifullin --- pkg/services/tree/ape.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go index 693b16e60..69cf59405 100644 --- a/pkg/services/tree/ape.go +++ b/pkg/services/tree/ape.go @@ -75,12 +75,13 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, } return s.apeChecker.CheckAPE(checkercore.CheckPrm{ - Request: request, - Namespace: namespace, - Container: cid, - PublicKey: publicKey, - BearerToken: bt, - SoftAPECheck: false, + Request: request, + Namespace: namespace, + Container: cid, + ContainerOwner: container.Value.Owner(), + PublicKey: publicKey, + BearerToken: bt, + SoftAPECheck: false, }) } From 57c31e9802ad19b8d64388315cd53a05515e353e Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Tue, 1 Oct 2024 16:09:05 +0300 Subject: [PATCH 048/591] [#1306] node: Allow tombstone_lifetime config to be loaded on the fly Signed-off-by: Ekaterina Lebedeva --- cmd/frostfs-node/config.go | 17 +++++++++++++++-- cmd/frostfs-node/object.go | 6 ++---- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 58a96879f..4ad9ec6c6 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -105,6 +105,10 @@ type applicationConfiguration struct { timestamp bool } + ObjectCfg struct { + tombstoneLifetime uint64 + } + EngineCfg struct { errorThreshold uint32 shardPoolSize uint32 @@ -223,6 +227,10 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error { a.LoggerCfg.destination = loggerconfig.Destination(c) a.LoggerCfg.timestamp = loggerconfig.Timestamp(c) + // Object + + a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c) + // Storage Engine a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c) @@ -624,7 +632,7 @@ type cfgObject struct { cfgLocalStorage cfgLocalStorage - tombstoneLifetime uint64 + tombstoneLifetime *atomic.Uint64 skipSessionTokenIssuerVerification bool } @@ -815,9 +823,11 @@ func initCfgGRPC() cfgGRPC { } func initCfgObject(appCfg *config.Config) cfgObject { + var tsLifetime atomic.Uint64 + tsLifetime.Store(objectconfig.TombstoneLifetime(appCfg)) return cfgObject{ pool: initObjectPool(appCfg), - tombstoneLifetime: objectconfig.TombstoneLifetime(appCfg), + tombstoneLifetime: &tsLifetime, skipSessionTokenIssuerVerification: objectconfig.Put(appCfg).SkipSessionTokenIssuerVerification(), } } @@ -1296,6 +1306,9 @@ func (c *cfg) reloadConfig(ctx context.Context) { components := c.getComponents(ctx, logPrm) + // Object + c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime) + // Storage Engine var rcfg engine.ReConfiguration diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 9d4e35ca8..47649c88b 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -109,13 +109,12 @@ func (s *objectSvc) GetRangeHash(ctx context.Context, req *object.GetRangeHashRe type delNetInfo struct { netmap.State - tsLifetime uint64 cfg *cfg } func (i *delNetInfo) TombstoneLifetime() (uint64, error) { - return i.tsLifetime, nil + return i.cfg.cfgObject.tombstoneLifetime.Load(), nil } // returns node owner ID calculated from configured private key. @@ -424,8 +423,7 @@ func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Servi sSearch, sPut, &delNetInfo{ - State: c.cfgNetmap.state, - tsLifetime: c.cfgObject.tombstoneLifetime, + State: c.cfgNetmap.state, cfg: c, }, From f45e75e3eb781662abbbc4f6820076c492fc1f0d Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 2 Oct 2024 11:18:35 +0300 Subject: [PATCH 049/591] [#1409] adm: Do not bind DeltaFlag to viper We bind flag that could be specified in config. This is not a config flag, just a command option. Also fix TestInitialize failures: ``` Error: Received unexpected error: number of epochs cannot be less than 1 Test: TestInitialize/16_nodes/force-new-epoch ``` Refs #1372 (945b7c740b0deb4) Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go | 3 ++- cmd/frostfs-adm/internal/modules/morph/netmap/root.go | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go index 5e4e9c725..5c5fa9988 100644 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go +++ b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go @@ -31,7 +31,8 @@ func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error { } bw := io.NewBufBinWriter() - if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, viper.GetInt64(commonflags.DeltaFlag)); err != nil { + delta, _ := cmd.Flags().GetInt64(commonflags.DeltaFlag) + if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, delta); err != nil { return err } diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go index 0288bcdc5..3300db36a 100644 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go @@ -22,7 +22,6 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.DeltaFlag, cmd.Flags().Lookup(commonflags.DeltaFlag)) }, RunE: ForceNewEpochCmd, } From 62028cd7ee0b5d825b71cfa11d1d87369b1da23d Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 2 Oct 2024 11:20:09 +0300 Subject: [PATCH 050/591] [#1409] adm: Uncommonize DeltaFlag It is used only in `force-new-epoch`, it is not _common_ between multiple commands. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/commonflags/flags.go | 1 - cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go | 5 +++-- cmd/frostfs-adm/internal/modules/morph/netmap/root.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go index b51d2e115..81395edb0 100644 --- a/cmd/frostfs-adm/internal/commonflags/flags.go +++ b/cmd/frostfs-adm/internal/commonflags/flags.go @@ -39,5 +39,4 @@ const ( CustomZoneFlag = "domain" AlphabetSizeFlag = "size" AllFlag = "all" - DeltaFlag = "delta" ) diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go index 5c5fa9988..94223dbd0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go +++ b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go @@ -4,7 +4,6 @@ import ( "fmt" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "github.com/nspcc-dev/neo-go/pkg/io" @@ -13,6 +12,8 @@ import ( "github.com/spf13/viper" ) +const deltaFlag = "delta" + func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error { wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) if err != nil { @@ -31,7 +32,7 @@ func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error { } bw := io.NewBufBinWriter() - delta, _ := cmd.Flags().GetInt64(commonflags.DeltaFlag) + delta, _ := cmd.Flags().GetInt64(deltaFlag) if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, delta); err != nil { return err } diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go index 3300db36a..55b7e64f0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go @@ -35,7 +35,7 @@ func initForceNewEpochCmd() { ForceNewEpoch.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) ForceNewEpoch.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) ForceNewEpoch.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file") - ForceNewEpoch.Flags().Int64(commonflags.DeltaFlag, 1, "Number of epochs to increase the current epoch") + ForceNewEpoch.Flags().Int64(deltaFlag, 1, "Number of epochs to increase the current epoch") } func init() { From f83f7feb8caa0ef5ab9a952a6a6d3e2f12a63486 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 2 Oct 2024 11:01:22 +0300 Subject: [PATCH 051/591] [#1391] adm: Properly check whether transfers were made Signed-off-by: Evgenii Stratonikov --- .../morph/initialize/initialize_transfer.go | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go index d7b0ec86c..7f1bfee2b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go @@ -27,12 +27,12 @@ const ( initialAlphabetGASAmount = 10_000 * native.GASFactor // initialProxyGASAmount represents the amount of GAS given to a proxy contract. initialProxyGASAmount = 50_000 * native.GASFactor - // alphabetGasRatio is a coefficient that defines the threshold below which - // the balance of the alphabet node is considered not replenished. The value - // of this coefficient is determined empirically. - alphabetGasRatio = 5 ) +func initialCommitteeGASAmount(c *helper.InitializeContext) int64 { + return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2 +} + func transferFunds(c *helper.InitializeContext) error { ok, err := transferFundsFinished(c) if ok || err != nil { @@ -59,7 +59,7 @@ func transferFunds(c *helper.InitializeContext) error { transferTarget{ Token: gas.Hash, Address: c.CommitteeAcc.Contract.ScriptHash(), - Amount: (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2, + Amount: initialCommitteeGASAmount(c), }, transferTarget{ Token: neo.Hash, @@ -80,12 +80,19 @@ func transferFunds(c *helper.InitializeContext) error { return c.AwaitTx() } +// transferFundsFinished checks balances of accounts we transfer GAS to. +// The stage is considered finished if the balance is greater than the half of what we need to transfer. func transferFundsFinished(c *helper.InitializeContext) (bool, error) { acc := c.Accounts[0] r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash) res, err := r.BalanceOf(acc.Contract.ScriptHash()) - return res.Cmp(big.NewInt(alphabetGasRatio*native.GASFactor)) == 1, err + if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 { + return false, err + } + + res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash()) + return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err } func transferGASToProxy(c *helper.InitializeContext) error { From 434048e8d959b29375c0d63a112b8eb8df8792d8 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 2 Oct 2024 11:28:00 +0300 Subject: [PATCH 052/591] [#1408] metabase: Fix EC search with slow and fast filters Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/metabase/select.go | 35 ++++++++++++- .../metabase/select_test.go | 50 +++++++++++++++++++ 2 files changed, 83 insertions(+), 2 deletions(-) diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index ed43fc41f..85d1b08ba 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -389,8 +389,7 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc return result, true } - buf := make([]byte, addressKeySize) - obj, err := db.get(tx, addr, buf, true, false, currEpoch) + obj, isECChunk, err := db.getObjectForSlowFilters(tx, addr, currEpoch) if err != nil { return result, false } @@ -401,17 +400,26 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc case v2object.FilterHeaderVersion: data = []byte(obj.Version().String()) case v2object.FilterHeaderHomomorphicHash: + if isECChunk { + return result, false // EC chunk and EC parent hashes are incomparable + } cs, _ := obj.PayloadHomomorphicHash() data = cs.Value() case v2object.FilterHeaderCreationEpoch: data = make([]byte, 8) binary.LittleEndian.PutUint64(data, obj.CreationEpoch()) case v2object.FilterHeaderPayloadLength: + if isECChunk { + return result, false // EC chunk and EC parent payload lengths are incomparable + } data = make([]byte, 8) binary.LittleEndian.PutUint64(data, obj.PayloadSize()) case v2object.FilterHeaderOwnerID: data = []byte(obj.OwnerID().EncodeToString()) case v2object.FilterHeaderPayloadHash: + if isECChunk { + return result, false // EC chunk and EC parent payload hashes are incomparable + } cs, _ := obj.PayloadChecksum() data = cs.Value() default: // user attribute @@ -439,6 +447,29 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc return result, true } +func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) { + buf := make([]byte, addressKeySize) + obj, err := db.get(tx, addr, buf, true, false, currEpoch) + if err != nil { + var ecInfoError *objectSDK.ECInfoError + if errors.As(err, &ecInfoError) { + for _, chunk := range ecInfoError.ECInfo().Chunks { + var objID oid.ID + if err = objID.ReadFromV2(chunk.ID); err != nil { + continue + } + addr.SetObject(objID) + obj, err = db.get(tx, addr, buf, true, false, currEpoch) + if err == nil { + return obj, true, nil + } + } + } + return nil, false, err + } + return obj, false, nil +} + func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) { objectAttributes := obj.Attributes() if ech := obj.ECHeader(); ech != nil { diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index fcd5d3a90..0c6ebc863 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -762,6 +762,56 @@ func TestDB_SelectOwnerID(t *testing.T) { }) } +func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) { + t.Parallel() + + db := newDB(t) + defer func() { require.NoError(t, db.Close()) }() + + cnr := cidtest.ID() + ecChunk1 := oidtest.ID() + ecChunk2 := oidtest.ID() + ecParent := oidtest.ID() + var ecParentAddr oid.Address + ecParentAddr.SetContainer(cnr) + ecParentAddr.SetObject(ecParent) + var ecParentAttr []objectSDK.Attribute + var attr objectSDK.Attribute + attr.SetKey(objectSDK.AttributeFilePath) + attr.SetValue("/1/2/3") + ecParentAttr = append(ecParentAttr, attr) + + chunkObj := testutil.GenerateObjectWithCID(cnr) + chunkObj.SetContainerID(cnr) + chunkObj.SetID(ecChunk1) + chunkObj.SetPayload([]byte{0, 1, 2, 3, 4}) + chunkObj.SetPayloadSize(uint64(5)) + chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 0, 3, []byte{}, 0)) + + chunkObj2 := testutil.GenerateObjectWithCID(cnr) + chunkObj2.SetContainerID(cnr) + chunkObj2.SetID(ecChunk2) + chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) + chunkObj2.SetPayloadSize(uint64(10)) + chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 1, 3, []byte{}, 0)) + + // put object with EC + + var prm meta.PutPrm + prm.SetObject(chunkObj) + _, err := db.Put(context.Background(), prm) + require.NoError(t, err) + + prm.SetObject(chunkObj2) + _, err = db.Put(context.Background(), prm) + require.NoError(t, err) + + fs := objectSDK.SearchFilters{} + fs.AddRootFilter() + fs.AddFilter(objectSDK.AttributeFilePath, "/1/2/3", objectSDK.MatchCommonPrefix) + testSelect(t, db, cnr, fs, ecParentAddr) +} + type testTarget struct { objects []*objectSDK.Object } From 01e3944b31e7daed8ca855244b833302daabe9cc Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 2 Oct 2024 14:36:10 +0300 Subject: [PATCH 053/591] [#1408] metabase: Fix tests No need to specify container ID for objects created with `testutil.GenerateObjectWithCID`. Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/metabase/delete_ec_test.go | 1 - pkg/local_object_storage/metabase/inhume_ec_test.go | 2 -- pkg/local_object_storage/metabase/select_test.go | 2 -- 3 files changed, 5 deletions(-) diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go index 66c79ecd7..a25627990 100644 --- a/pkg/local_object_storage/metabase/delete_ec_test.go +++ b/pkg/local_object_storage/metabase/delete_ec_test.go @@ -39,7 +39,6 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) { tombstoneID := oidtest.ID() chunkObj := testutil.GenerateObjectWithCID(cnr) - chunkObj.SetContainerID(cnr) chunkObj.SetID(ecChunk) chunkObj.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) chunkObj.SetPayloadSize(uint64(10)) diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go index c3b1e72da..32e412c79 100644 --- a/pkg/local_object_storage/metabase/inhume_ec_test.go +++ b/pkg/local_object_storage/metabase/inhume_ec_test.go @@ -35,14 +35,12 @@ func TestInhumeECObject(t *testing.T) { tombstoneID := oidtest.ID() chunkObj := testutil.GenerateObjectWithCID(cnr) - chunkObj.SetContainerID(cnr) chunkObj.SetID(ecChunk) chunkObj.SetPayload([]byte{0, 1, 2, 3, 4}) chunkObj.SetPayloadSize(uint64(5)) chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 0, 3, []byte{}, 0)) chunkObj2 := testutil.GenerateObjectWithCID(cnr) - chunkObj2.SetContainerID(cnr) chunkObj2.SetID(ecChunk2) chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) chunkObj2.SetPayloadSize(uint64(10)) diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index 0c6ebc863..bee778e2b 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -782,14 +782,12 @@ func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) { ecParentAttr = append(ecParentAttr, attr) chunkObj := testutil.GenerateObjectWithCID(cnr) - chunkObj.SetContainerID(cnr) chunkObj.SetID(ecChunk1) chunkObj.SetPayload([]byte{0, 1, 2, 3, 4}) chunkObj.SetPayloadSize(uint64(5)) chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 0, 3, []byte{}, 0)) chunkObj2 := testutil.GenerateObjectWithCID(cnr) - chunkObj2.SetContainerID(cnr) chunkObj2.SetID(ecChunk2) chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) chunkObj2.SetPayloadSize(uint64(10)) From 6c46044c9cba5f2e20e105b3efa7abe166fbf577 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 3 Oct 2024 10:19:26 +0300 Subject: [PATCH 054/591] [#1410] shard: Move MetricsWriter interface to a separate file Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/shard/metrics.go | 47 +++++++++++++++++++++++ pkg/local_object_storage/shard/shard.go | 44 --------------------- 2 files changed, 47 insertions(+), 44 deletions(-) create mode 100644 pkg/local_object_storage/shard/metrics.go diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go new file mode 100644 index 000000000..568c0de5e --- /dev/null +++ b/pkg/local_object_storage/shard/metrics.go @@ -0,0 +1,47 @@ +package shard + +import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + +// MetricsWriter is an interface that must store shard's metrics. +type MetricsWriter interface { + // SetObjectCounter must set object counter taking into account object type. + SetObjectCounter(objectType string, v uint64) + // AddToObjectCounter must update object counter taking into account object + // type. + // Negative parameter must decrease the counter. + AddToObjectCounter(objectType string, delta int) + // AddToContainerSize must add a value to the container size. + // Value can be negative. + AddToContainerSize(cnr string, value int64) + // AddToPayloadSize must add a value to the payload size. + // Value can be negative. + AddToPayloadSize(value int64) + // IncObjectCounter must increment shard's object counter taking into account + // object type. + IncObjectCounter(objectType string) + // SetShardID must set (update) the shard identifier that will be used in + // metrics. + SetShardID(id string) + // SetReadonly must set shard mode. + SetMode(mode mode.Mode) + // IncErrorCounter increment error counter. + IncErrorCounter() + // ClearErrorCounter clear error counter. + ClearErrorCounter() + // DeleteShardMetrics deletes shard metrics from registry. + DeleteShardMetrics() + // SetContainerObjectsCount sets container object count. + SetContainerObjectsCount(cnrID string, objectType string, value uint64) + // IncContainerObjectsCount increments container object count. + IncContainerObjectsCount(cnrID string, objectType string) + // SubContainerObjectsCount subtracts container object count. + SubContainerObjectsCount(cnrID string, objectType string, value uint64) + // IncRefillObjectsCount increments refill objects count. + IncRefillObjectsCount(path string, size int, success bool) + // SetRefillPercent sets refill percent. + SetRefillPercent(path string, percent uint32) + // SetRefillStatus sets refill status. + SetRefillStatus(path string, status string) + // SetEvacuationInProgress sets evacuation status + SetEvacuationInProgress(value bool) +} diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index 7496fc352..f5317b16c 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -57,50 +57,6 @@ type DeletedLockCallback func(context.Context, []oid.Address) // EmptyContainersCallback is a callback hanfling list of zero-size and zero-count containers. type EmptyContainersCallback func(context.Context, []cid.ID) -// MetricsWriter is an interface that must store shard's metrics. -type MetricsWriter interface { - // SetObjectCounter must set object counter taking into account object type. - SetObjectCounter(objectType string, v uint64) - // AddToObjectCounter must update object counter taking into account object - // type. - // Negative parameter must decrease the counter. - AddToObjectCounter(objectType string, delta int) - // AddToContainerSize must add a value to the container size. - // Value can be negative. - AddToContainerSize(cnr string, value int64) - // AddToPayloadSize must add a value to the payload size. - // Value can be negative. - AddToPayloadSize(value int64) - // IncObjectCounter must increment shard's object counter taking into account - // object type. - IncObjectCounter(objectType string) - // SetShardID must set (update) the shard identifier that will be used in - // metrics. - SetShardID(id string) - // SetReadonly must set shard mode. - SetMode(mode mode.Mode) - // IncErrorCounter increment error counter. - IncErrorCounter() - // ClearErrorCounter clear error counter. - ClearErrorCounter() - // DeleteShardMetrics deletes shard metrics from registry. - DeleteShardMetrics() - // SetContainerObjectsCount sets container object count. - SetContainerObjectsCount(cnrID string, objectType string, value uint64) - // IncContainerObjectsCount increments container object count. - IncContainerObjectsCount(cnrID string, objectType string) - // SubContainerObjectsCount subtracts container object count. - SubContainerObjectsCount(cnrID string, objectType string, value uint64) - // IncRefillObjectsCount increments refill objects count. - IncRefillObjectsCount(path string, size int, success bool) - // SetRefillPercent sets refill percent. - SetRefillPercent(path string, percent uint32) - // SetRefillStatus sets refill status. - SetRefillStatus(path string, status string) - // SetEvacuationInProgress sets evacuation status - SetEvacuationInProgress(value bool) -} - type cfg struct { m sync.RWMutex From 9206ce5cd2ea973feef6a53ae0453736efacbe11 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 3 Oct 2024 10:23:59 +0300 Subject: [PATCH 055/591] [#1410] shard: Provide the default implementation for MetricsWriter Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/shard/id.go | 4 +- pkg/local_object_storage/shard/metrics.go | 22 ++++++++++ pkg/local_object_storage/shard/mode.go | 4 +- pkg/local_object_storage/shard/shard.go | 49 +++++++++-------------- 4 files changed, 42 insertions(+), 37 deletions(-) diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go index 2fe68d270..a72313498 100644 --- a/pkg/local_object_storage/shard/id.go +++ b/pkg/local_object_storage/shard/id.go @@ -45,9 +45,7 @@ func (s *Shard) UpdateID() (err error) { } shardID := s.info.ID.String() - if s.cfg.metricsWriter != nil { - s.cfg.metricsWriter.SetShardID(shardID) - } + s.cfg.metricsWriter.SetShardID(shardID) if s.writeCache != nil && s.writeCache.GetMetrics() != nil { s.writeCache.GetMetrics().SetShardID(shardID) } diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go index 568c0de5e..6bf198048 100644 --- a/pkg/local_object_storage/shard/metrics.go +++ b/pkg/local_object_storage/shard/metrics.go @@ -45,3 +45,25 @@ type MetricsWriter interface { // SetEvacuationInProgress sets evacuation status SetEvacuationInProgress(value bool) } + +type noopMetrics struct{} + +var _ MetricsWriter = noopMetrics{} + +func (noopMetrics) SetObjectCounter(string, uint64) {} +func (noopMetrics) AddToObjectCounter(string, int) {} +func (noopMetrics) AddToContainerSize(string, int64) {} +func (noopMetrics) AddToPayloadSize(int64) {} +func (noopMetrics) IncObjectCounter(string) {} +func (noopMetrics) SetShardID(string) {} +func (noopMetrics) SetMode(mode.Mode) {} +func (noopMetrics) IncErrorCounter() {} +func (noopMetrics) ClearErrorCounter() {} +func (noopMetrics) DeleteShardMetrics() {} +func (noopMetrics) SetContainerObjectsCount(string, string, uint64) {} +func (noopMetrics) IncContainerObjectsCount(string, string) {} +func (noopMetrics) SubContainerObjectsCount(string, string, uint64) {} +func (noopMetrics) IncRefillObjectsCount(string, int, bool) {} +func (noopMetrics) SetRefillPercent(string, uint32) {} +func (noopMetrics) SetRefillStatus(string, string) {} +func (noopMetrics) SetEvacuationInProgress(bool) {} diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go index 1bab57448..d90a5f4b6 100644 --- a/pkg/local_object_storage/shard/mode.go +++ b/pkg/local_object_storage/shard/mode.go @@ -65,9 +65,7 @@ func (s *Shard) setMode(m mode.Mode) error { } s.info.Mode = m - if s.metricsWriter != nil { - s.metricsWriter.SetMode(s.info.Mode) - } + s.metricsWriter.SetMode(s.info.Mode) s.log.Info(logs.ShardShardModeSetSuccessfully, zap.Stringer("mode", s.info.Mode)) diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index f5317b16c..a57b548be 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -105,6 +105,7 @@ func defaultCfg() *cfg { reportErrorFunc: func(string, string, error) {}, zeroSizeContainersCallback: func(context.Context, []cid.ID) {}, zeroCountContainersCallback: func(context.Context, []cid.ID) {}, + metricsWriter: noopMetrics{}, } } @@ -384,7 +385,7 @@ const ( ) func (s *Shard) updateMetrics(ctx context.Context) { - if s.cfg.metricsWriter == nil || s.GetMode().NoMetabase() { + if s.GetMode().NoMetabase() { return } @@ -439,35 +440,29 @@ func (s *Shard) updateMetrics(ctx context.Context) { // incObjectCounter increment both physical and logical object // counters. func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) { - if s.cfg.metricsWriter != nil { - s.cfg.metricsWriter.IncObjectCounter(physical) - s.cfg.metricsWriter.IncObjectCounter(logical) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) - if isUser { - s.cfg.metricsWriter.IncObjectCounter(user) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) - } + s.cfg.metricsWriter.IncObjectCounter(physical) + s.cfg.metricsWriter.IncObjectCounter(logical) + s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) + s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) + if isUser { + s.cfg.metricsWriter.IncObjectCounter(user) + s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) } } func (s *Shard) decObjectCounterBy(typ string, v uint64) { - if s.cfg.metricsWriter != nil && v > 0 { + if v > 0 { s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v)) } } func (s *Shard) setObjectCounterBy(typ string, v uint64) { - if s.cfg.metricsWriter != nil && v > 0 { + if v > 0 { s.cfg.metricsWriter.SetObjectCounter(typ, v) } } func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) { - if s.cfg.metricsWriter == nil { - return - } - for cnrID, count := range byCnr { if count.Phy > 0 { s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) @@ -482,46 +477,38 @@ func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) } func (s *Shard) addToContainerSize(cnr string, size int64) { - if s.cfg.metricsWriter != nil && size != 0 { + if size != 0 { s.cfg.metricsWriter.AddToContainerSize(cnr, size) } } func (s *Shard) addToPayloadSize(size int64) { - if s.cfg.metricsWriter != nil && size != 0 { + if size != 0 { s.cfg.metricsWriter.AddToPayloadSize(size) } } func (s *Shard) setContainerObjectsCount(cnr string, typ string, v uint64) { - if s.cfg.metricsWriter != nil && v > 0 { + if v > 0 { s.metricsWriter.SetContainerObjectsCount(cnr, typ, v) } } func (s *Shard) IncErrorCounter() { - if s.cfg.metricsWriter != nil { - s.cfg.metricsWriter.IncErrorCounter() - } + s.cfg.metricsWriter.IncErrorCounter() } func (s *Shard) ClearErrorCounter() { - if s.cfg.metricsWriter != nil { - s.cfg.metricsWriter.ClearErrorCounter() - } + s.cfg.metricsWriter.ClearErrorCounter() } func (s *Shard) DeleteShardMetrics() { - if s.cfg.metricsWriter != nil { - s.cfg.metricsWriter.DeleteShardMetrics() - } + s.cfg.metricsWriter.DeleteShardMetrics() } func (s *Shard) SetEvacuationInProgress(val bool) { s.m.Lock() defer s.m.Unlock() s.info.EvacuationInProgress = val - if s.metricsWriter != nil { - s.metricsWriter.SetEvacuationInProgress(val) - } + s.metricsWriter.SetEvacuationInProgress(val) } From 9a87acb87ad243fcdd932e764a3f5f8d9c5c6657 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 3 Oct 2024 10:40:56 +0300 Subject: [PATCH 056/591] [#1410] engine: Provide the default implementation to MetricsRegister Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/engine/container.go | 8 +--- pkg/local_object_storage/engine/delete.go | 4 +- pkg/local_object_storage/engine/engine.go | 1 + pkg/local_object_storage/engine/get.go | 4 +- pkg/local_object_storage/engine/head.go | 4 +- pkg/local_object_storage/engine/inhume.go | 4 +- pkg/local_object_storage/engine/metrics.go | 45 +++++++++++++++++++ pkg/local_object_storage/engine/put.go | 4 +- pkg/local_object_storage/engine/range.go | 4 +- pkg/local_object_storage/engine/select.go | 8 +--- pkg/local_object_storage/engine/shards.go | 46 +++++++++----------- 11 files changed, 77 insertions(+), 55 deletions(-) diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index e45f502ac..6def02f12 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -68,9 +68,7 @@ func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) { } func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) { - if e.metrics != nil { - defer elapsed("EstimateContainerSize", e.metrics.AddMethodDuration)() - } + defer elapsed("EstimateContainerSize", e.metrics.AddMethodDuration)() e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { var csPrm shard.ContainerSizePrm @@ -116,9 +114,7 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) { } func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) { - if e.metrics != nil { - defer elapsed("ListContainers", e.metrics.AddMethodDuration)() - } + defer elapsed("ListContainers", e.metrics.AddMethodDuration)() uniqueIDs := make(map[string]cid.ID) diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 318f938fb..61cb6832d 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -68,9 +68,7 @@ func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRe } func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { - if e.metrics != nil { - defer elapsed("Delete", e.metrics.AddMethodDuration)() - } + defer elapsed("Delete", e.metrics.AddMethodDuration)() var locked struct { is bool diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 5e883a641..13efdcb84 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -219,6 +219,7 @@ func defaultCfg() *cfg { res := &cfg{ log: &logger.Logger{Logger: zap.L()}, shardPoolSize: 20, + metrics: noopMetrics{}, } res.containerSource.Store(&containerSource{}) return res diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index 253256c34..4a9199be7 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -66,9 +66,7 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er } func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { - if e.metrics != nil { - defer elapsed("Get", e.metrics.AddMethodDuration)() - } + defer elapsed("Get", e.metrics.AddMethodDuration)() errNotFound := new(apistatus.ObjectNotFound) diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go index 6857a3631..d2e3cfd99 100644 --- a/pkg/local_object_storage/engine/head.go +++ b/pkg/local_object_storage/engine/head.go @@ -68,9 +68,7 @@ func (e *StorageEngine) Head(ctx context.Context, prm HeadPrm) (res HeadRes, err func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.head") defer span.End() - if e.metrics != nil { - defer elapsed("Head", e.metrics.AddMethodDuration)() - } + defer elapsed("Head", e.metrics.AddMethodDuration)() var ( head *objectSDK.Object diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index 683713f94..35ce50f65 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -80,9 +80,7 @@ func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRe } func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { - if e.metrics != nil { - defer elapsed("Inhume", e.metrics.AddMethodDuration)() - } + defer elapsed("Inhume", e.metrics.AddMethodDuration)() var shPrm shard.InhumePrm if prm.forceRemoval { diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go index 1c088c754..75936206d 100644 --- a/pkg/local_object_storage/engine/metrics.go +++ b/pkg/local_object_storage/engine/metrics.go @@ -68,3 +68,48 @@ func (m *gcMetrics) AddExpiredObjectCollectionDuration(d time.Duration, success func (m *gcMetrics) AddInhumedObjectCount(count uint64, objectType string) { m.storage.AddInhumedObjectCount(m.shardID, count, objectType) } + +type ( + noopMetrics struct{} + noopWriteCacheMetrics struct{} + noopGCMetrics struct{} +) + +var ( + _ MetricRegister = noopMetrics{} + _ metrics.WriteCacheMetrics = noopWriteCacheMetrics{} + _ metrics.GCMetrics = noopGCMetrics{} +) + +func (noopMetrics) AddMethodDuration(string, time.Duration) {} +func (noopMetrics) SetObjectCounter(string, string, uint64) {} +func (noopMetrics) AddToObjectCounter(string, string, int) {} +func (noopMetrics) SetMode(string, mode.Mode) {} +func (noopMetrics) AddToContainerSize(string, int64) {} +func (noopMetrics) DeleteContainerSize(string) {} +func (noopMetrics) DeleteContainerCount(string) {} +func (noopMetrics) AddToPayloadCounter(string, int64) {} +func (noopMetrics) IncErrorCounter(string) {} +func (noopMetrics) ClearErrorCounter(string) {} +func (noopMetrics) DeleteShardMetrics(string) {} +func (noopMetrics) SetContainerObjectCounter(string, string, string, uint64) {} +func (noopMetrics) IncContainerObjectCounter(string, string, string) {} +func (noopMetrics) SubContainerObjectCounter(string, string, string, uint64) {} +func (noopMetrics) IncRefillObjectsCount(string, string, int, bool) {} +func (noopMetrics) SetRefillPercent(string, string, uint32) {} +func (noopMetrics) SetRefillStatus(string, string, string) {} +func (noopMetrics) SetEvacuationInProgress(string, bool) {} +func (noopMetrics) WriteCache() metrics.WriteCacheMetrics { return noopWriteCacheMetrics{} } +func (noopMetrics) GC() metrics.GCMetrics { return noopGCMetrics{} } + +func (noopWriteCacheMetrics) AddMethodDuration(string, string, string, string, bool, time.Duration) {} +func (noopWriteCacheMetrics) SetActualCount(string, string, string, uint64) {} +func (noopWriteCacheMetrics) SetEstimateSize(string, string, string, uint64) {} +func (noopWriteCacheMetrics) SetMode(string, string) {} +func (noopWriteCacheMetrics) IncOperationCounter(string, string, string, string, metrics.NullBool) {} +func (noopWriteCacheMetrics) Close(string, string) {} + +func (noopGCMetrics) AddRunDuration(string, time.Duration, bool) {} +func (noopGCMetrics) AddDeletedCount(string, uint64, uint64) {} +func (noopGCMetrics) AddExpiredObjectCollectionDuration(string, time.Duration, bool, string) {} +func (noopGCMetrics) AddInhumedObjectCount(string, uint64, string) {} diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index f92d83745..bf86402a7 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -72,9 +72,7 @@ func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) { } func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { - if e.metrics != nil { - defer elapsed("Put", e.metrics.AddMethodDuration)() - } + defer elapsed("Put", e.metrics.AddMethodDuration)() addr := object.AddressOf(prm.obj) diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index cbf26ff4e..498674fd2 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -82,9 +82,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error )) defer span.End() - if e.metrics != nil { - defer elapsed("GetRange", e.metrics.AddMethodDuration)() - } + defer elapsed("GetRange", e.metrics.AddMethodDuration)() var shPrm shard.RngPrm shPrm.SetAddress(prm.addr) diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index 6a8c9fab9..972a4f52a 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -59,9 +59,7 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe } func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) { - if e.metrics != nil { - defer elapsed("Search", e.metrics.AddMethodDuration)() - } + defer elapsed("Search", e.metrics.AddMethodDuration)() addrList := make([]oid.Address, 0) uniqueMap := make(map[string]struct{}) @@ -108,9 +106,7 @@ func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, } func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) { - if e.metrics != nil { - defer elapsed("ListObjects", e.metrics.AddMethodDuration)() - } + defer elapsed("ListObjects", e.metrics.AddMethodDuration)() addrList := make([]oid.Address, 0, limit) uniqueMap := make(map[string]struct{}) diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 2ad6859e4..96f54369b 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -116,9 +116,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh return nil, fmt.Errorf("could not add %s shard: %w", sh.ID().String(), err) } - if e.cfg.metrics != nil { - e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode()) - } + e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode()) return sh.ID(), nil } @@ -152,28 +150,26 @@ func (e *StorageEngine) appendMetrics(id *shard.ID, opts []shard.Option) []shard e.mtx.RLock() defer e.mtx.RUnlock() - if e.metrics != nil { - opts = append(opts, - shard.WithMetricsWriter( - &metricsWithID{ - id: id.String(), - mw: e.metrics, - }, - ), - shard.WithWriteCacheMetrics( - &writeCacheMetrics{ - shardID: id.String(), - metrics: e.metrics.WriteCache(), - }, - ), - shard.WithGCMetrics( - &gcMetrics{ - storage: e.metrics.GC(), - shardID: id.String(), - }, - ), - ) - } + opts = append(opts, + shard.WithMetricsWriter( + &metricsWithID{ + id: id.String(), + mw: e.metrics, + }, + ), + shard.WithWriteCacheMetrics( + &writeCacheMetrics{ + shardID: id.String(), + metrics: e.metrics.WriteCache(), + }, + ), + shard.WithGCMetrics( + &gcMetrics{ + storage: e.metrics.GC(), + shardID: id.String(), + }, + ), + ) return opts } From 963faa615ab0a70964821b3a3725c27ed5d7f60e Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 4 Oct 2024 14:58:45 +0300 Subject: [PATCH 057/591] [#1413] engine: Cleanup shard error reporting - `reportShardErrorBackground()` no longer differs from `reportShardError()`, reflect this in its name; - reuse common pieces of code to make it simpler. Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/engine/engine.go | 28 ++++------------------- pkg/local_object_storage/engine/shards.go | 2 +- 2 files changed, 5 insertions(+), 25 deletions(-) diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 13efdcb84..f40c9cc04 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -115,10 +115,8 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold) } -// reportShardErrorBackground increases shard error counter and logs an error. -// It is intended to be used from background workers and -// doesn't change shard mode because of possible deadlocks. -func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err error) { +// reportShardErrorByID increases shard error counter and logs an error. +func (e *StorageEngine) reportShardErrorByID(id string, msg string, err error) { e.mtx.RLock() sh, ok := e.shards[id] e.mtx.RUnlock() @@ -127,16 +125,7 @@ func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err er return } - if isLogical(err) { - e.log.Warn(msg, - zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error())) - return - } - - errCount := sh.errorCount.Add(1) - sh.Shard.IncErrorCounter() - e.reportShardErrorWithFlags(sh.Shard, errCount, msg, err) + e.reportShardError(sh, msg, err) } // reportShardError checks that the amount of errors doesn't exceed the configured threshold. @@ -156,16 +145,7 @@ func (e *StorageEngine) reportShardError( errCount := sh.errorCount.Add(1) sh.Shard.IncErrorCounter() - e.reportShardErrorWithFlags(sh.Shard, errCount, msg, err, fields...) -} -func (e *StorageEngine) reportShardErrorWithFlags( - sh *shard.Shard, - errCount uint32, - msg string, - err error, - fields ...zap.Field, -) { sid := sh.ID() e.log.Warn(msg, append([]zap.Field{ zap.Stringer("shard_id", sid), @@ -179,7 +159,7 @@ func (e *StorageEngine) reportShardErrorWithFlags( req := setModeRequest{ errorCount: errCount, - sh: sh, + sh: sh.Shard, isMeta: errors.As(err, new(metaerr.Error)), } diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 96f54369b..c3ccb5276 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -134,7 +134,7 @@ func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*sh shard.WithExpiredTombstonesCallback(e.processExpiredTombstones), shard.WithExpiredLocksCallback(e.processExpiredLocks), shard.WithDeletedLockCallback(e.processDeletedLocks), - shard.WithReportErrorFunc(e.reportShardErrorBackground), + shard.WithReportErrorFunc(e.reportShardErrorByID), shard.WithZeroSizeCallback(e.processZeroSizeContainers), shard.WithZeroCountCallback(e.processZeroCountContainers), )...) From 4dc9a1b300b2f22dbd9628713917347bc0d3a6ee Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 4 Oct 2024 15:07:20 +0300 Subject: [PATCH 058/591] [#1413] engine: Remove error counting methods from Shard All error counting and hangling logic is present on the engine level. Currently, we pass engine metrics with shard ID metric to shard, then export 3 methods to manipulate these metrics. In this commits all methods are removed and error counter is tracked on the engine level exlusively. Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/engine/engine.go | 2 +- pkg/local_object_storage/engine/shards.go | 6 +++--- pkg/local_object_storage/shard/metrics.go | 9 --------- pkg/local_object_storage/shard/shard.go | 12 ------------ 4 files changed, 4 insertions(+), 25 deletions(-) diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index f40c9cc04..3183d6137 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -144,7 +144,7 @@ func (e *StorageEngine) reportShardError( } errCount := sh.errorCount.Add(1) - sh.Shard.IncErrorCounter() + e.metrics.IncErrorCounter(sh.ID().String()) sid := sh.ID() e.log.Warn(msg, append([]zap.Field{ diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index c3ccb5276..aab2c423c 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -217,7 +217,7 @@ func (e *StorageEngine) removeShards(ids ...string) { continue } - sh.DeleteShardMetrics() + e.metrics.DeleteShardMetrics(id) ss = append(ss, sh) delete(e.shards, id) @@ -318,7 +318,7 @@ func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounte if id.String() == shID { if resetErrorCounter { sh.errorCount.Store(0) - sh.Shard.ClearErrorCounter() + e.metrics.ClearErrorCounter(shID) } return sh.SetMode(m) } @@ -422,7 +422,7 @@ func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) { for _, sh := range ss { idStr := sh.ID().String() - sh.DeleteShardMetrics() + e.metrics.DeleteShardMetrics(idStr) delete(e.shards, idStr) diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go index 6bf198048..91bf8d0ae 100644 --- a/pkg/local_object_storage/shard/metrics.go +++ b/pkg/local_object_storage/shard/metrics.go @@ -24,12 +24,6 @@ type MetricsWriter interface { SetShardID(id string) // SetReadonly must set shard mode. SetMode(mode mode.Mode) - // IncErrorCounter increment error counter. - IncErrorCounter() - // ClearErrorCounter clear error counter. - ClearErrorCounter() - // DeleteShardMetrics deletes shard metrics from registry. - DeleteShardMetrics() // SetContainerObjectsCount sets container object count. SetContainerObjectsCount(cnrID string, objectType string, value uint64) // IncContainerObjectsCount increments container object count. @@ -57,9 +51,6 @@ func (noopMetrics) AddToPayloadSize(int64) {} func (noopMetrics) IncObjectCounter(string) {} func (noopMetrics) SetShardID(string) {} func (noopMetrics) SetMode(mode.Mode) {} -func (noopMetrics) IncErrorCounter() {} -func (noopMetrics) ClearErrorCounter() {} -func (noopMetrics) DeleteShardMetrics() {} func (noopMetrics) SetContainerObjectsCount(string, string, uint64) {} func (noopMetrics) IncContainerObjectsCount(string, string) {} func (noopMetrics) SubContainerObjectsCount(string, string, uint64) {} diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index a57b548be..d7e723733 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -494,18 +494,6 @@ func (s *Shard) setContainerObjectsCount(cnr string, typ string, v uint64) { } } -func (s *Shard) IncErrorCounter() { - s.cfg.metricsWriter.IncErrorCounter() -} - -func (s *Shard) ClearErrorCounter() { - s.cfg.metricsWriter.ClearErrorCounter() -} - -func (s *Shard) DeleteShardMetrics() { - s.cfg.metricsWriter.DeleteShardMetrics() -} - func (s *Shard) SetEvacuationInProgress(val bool) { s.m.Lock() defer s.m.Unlock() From 2f710d8f945f90c5d65e4c9a0c53f0dfdcc4f291 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Fri, 4 Oct 2024 15:23:22 +0300 Subject: [PATCH 059/591] [#1414] metabase: Check parameter for `CountAliveObjectsInBucket` Signed-off-by: Anton Nikiforov --- pkg/local_object_storage/metabase/list.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index 44f25246e..74a529809 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -452,10 +452,11 @@ func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjec return 0, ErrDegradedMode } - cidRaw := prm.BucketName[1:bucketKeySize] - if cidRaw == nil { + if len(prm.BucketName) != bucketKeySize { return 0, nil } + + cidRaw := prm.BucketName[1:bucketKeySize] var count uint64 err := db.boltDB.View(func(tx *bbolt.Tx) error { bkt := tx.Bucket(prm.BucketName) From fc032838c037c7c649f80181ca71d8c9f6847e7d Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 7 Oct 2024 11:50:47 +0300 Subject: [PATCH 060/591] [#1215] blobstor/test: Cover iteration behaviour Signed-off-by: Evgenii Stratonikov --- .../blobstor/iterate_test.go | 172 ++++++------------ 1 file changed, 59 insertions(+), 113 deletions(-) diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go index 079728380..195d0bd31 100644 --- a/pkg/local_object_storage/blobstor/iterate_test.go +++ b/pkg/local_object_storage/blobstor/iterate_test.go @@ -3,10 +3,13 @@ package blobstor import ( "context" "encoding/binary" + "errors" "os" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" @@ -90,117 +93,60 @@ func TestIterateObjects(t *testing.T) { } func TestIterate_IgnoreErrors(t *testing.T) { - t.Skip() - // dir := t.TempDir() - // - // const ( - // smallSize = 512 - // objCount = 5 - // ) - // bsOpts := []Option{ - // WithCompressObjects(true), - // WithRootPath(dir), - // WithSmallSizeLimit(smallSize * 2), // + header - // WithBlobovniczaOpenedCacheSize(1), - // WithBlobovniczaShallowWidth(1), - // WithBlobovniczaShallowDepth(1)} - // bs := New(bsOpts...) - // require.NoError(t, bs.Open(false)) - // require.NoError(t, bs.Init()) - // - // addrs := make([]oid.Address, objCount) - // for i := range addrs { - // addrs[i] = oidtest.Address() - // - // obj := object.New() - // obj.SetContainerID(addrs[i].Container()) - // obj.SetID(addrs[i].Object()) - // obj.SetPayload(make([]byte, smallSize<<(i%2))) - // - // objData, err := obj.Marshal() - // require.NoError(t, err) - // - // _, err = bs.PutRaw(addrs[i], objData, true) - // require.NoError(t, err) - // } - // - // // Construct corrupted compressed object. - // buf := bytes.NewBuffer(nil) - // badObject := make([]byte, smallSize/2+1) - // enc, err := zstd.NewWriter(buf) - // require.NoError(t, err) - // rawData := enc.EncodeAll(badObject, nil) - // for i := 4; /* magic size */ i < len(rawData); i += 2 { - // rawData[i] ^= 0xFF - // } - // // Will be put uncompressed but fetched as compressed because of magic. - // _, err = bs.PutRaw(oidtest.Address(), rawData, false) - // require.NoError(t, err) - // require.NoError(t, bs.fsTree.Put(oidtest.Address(), rawData)) - // - // require.NoError(t, bs.Close()) - // - // // Increase width to have blobovnicza which is definitely empty. - // b := New(append(bsOpts, WithBlobovniczaShallowWidth(2))...) - // require.NoError(t, b.Open(false)) - // require.NoError(t, b.Init()) - // - // var p string - // for i := 0; i < 2; i++ { - // bp := filepath.Join(bs.rootPath, "1", strconv.FormatUint(uint64(i), 10)) - // if _, ok := bs.blobovniczas.opened.Get(bp); !ok { - // p = bp - // break - // } - // } - // require.NotEqual(t, "", p, "expected to not have at least 1 blobovnicza in cache") - // require.NoError(t, os.Chmod(p, 0)) - // - // require.NoError(t, b.Close()) - // require.NoError(t, bs.Open(false)) - // require.NoError(t, bs.Init()) - // - // var prm IteratePrm - // prm.SetIterationHandler(func(e IterationElement) error { - // return nil - // }) - // _, err = bs.Iterate(prm) - // require.Error(t, err) - // - // prm.IgnoreErrors() - // - // t.Run("skip invalid objects", func(t *testing.T) { - // actual := make([]oid.Address, 0, len(addrs)) - // prm.SetIterationHandler(func(e IterationElement) error { - // obj := object.New() - // err := obj.Unmarshal(e.data) - // if err != nil { - // return err - // } - // - // var addr oid.Address - // cnr, _ := obj.ContainerID() - // addr.SetContainer(cnr) - // id, _ := obj.ID() - // addr.SetObject(id) - // actual = append(actual, addr) - // return nil - // }) - // - // _, err := bs.Iterate(prm) - // require.NoError(t, err) - // require.ElementsMatch(t, addrs, actual) - // }) - // t.Run("return errors from handler", func(t *testing.T) { - // n := 0 - // expectedErr := errors.New("expected error") - // prm.SetIterationHandler(func(e IterationElement) error { - // if n++; n == objCount/2 { - // return expectedErr - // } - // return nil - // }) - // _, err := bs.Iterate(prm) - // require.ErrorIs(t, err, expectedErr) - // }) + ctx := context.Background() + + myErr := errors.New("unique error") + nopIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, nil } + panicIter := func(common.IteratePrm) (common.IterateRes, error) { panic("unreachable") } + errIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, myErr } + + var s1iter, s2iter func(common.IteratePrm) (common.IterateRes, error) + st1 := teststore.New( + teststore.WithSubstorage(memstore.New()), + teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) { + return s1iter(prm) + })) + st2 := teststore.New( + teststore.WithSubstorage(memstore.New()), + teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) { + return s2iter(prm) + })) + + bsOpts := []Option{WithStorages([]SubStorage{ + {Storage: st1}, + {Storage: st2}, + })} + bs := New(bsOpts...) + require.NoError(t, bs.Open(ctx, mode.ReadWrite)) + require.NoError(t, bs.Init()) + + nopHandler := func(e common.IterationElement) error { + return nil + } + + t.Run("no errors", func(t *testing.T) { + s1iter = nopIter + s2iter = nopIter + _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler}) + require.NoError(t, err) + }) + t.Run("error in the first sub storage, the second one is not iterated over", func(t *testing.T) { + s1iter = errIter + s2iter = panicIter + _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler}) + require.ErrorIs(t, err, myErr) + }) + + t.Run("ignore errors, storage 1", func(t *testing.T) { + s1iter = errIter + s2iter = nopIter + _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler}) + require.NoError(t, err) + }) + t.Run("ignore errors, storage 2", func(t *testing.T) { + s1iter = nopIter + s2iter = errIter + _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler}) + require.NoError(t, err) + }) } From a5de74a2492deb9e969c1d722c82bb8b050130df Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 7 Oct 2024 15:13:44 +0300 Subject: [PATCH 061/591] [#1418] go.mod: Update api-go Signed-off-by: Evgenii Stratonikov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1023948bc..f81ba9cf7 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22 require ( code.gitea.io/sdk/gitea v0.17.1 - git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f + git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3 git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d diff --git a/go.sum b/go.sum index 5d719a027..8aa087de4 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= -git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f h1:FZvX6CLzTQqMyMvOerIKMvIEJQbOImDjSooZx3AVRyE= -git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU= +git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3 h1:6QXNnfBgYx81UZsBdpPnQY+ZMSKGFbFc29wV7DJ/UG4= +git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU= git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c h1:9JmXxWcqWaOXModBwtj/WlPYOfSk2jRSDoRsDRYvgnA= git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c/go.mod h1:DVb1AqP323wiEwCtlfoJJTERF4Xji2L39X6T8kv26RE= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= From 5fbb2657ca9ca9bbc3aa2ca9239fbb55ea47cdc3 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 8 Oct 2024 10:02:14 +0300 Subject: [PATCH 062/591] [#1419] mod: Bump sdk-go version Signed-off-by: Dmitrii Stepanov --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index f81ba9cf7..91cc55a36 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 git.frostfs.info/TrueCloudLab/tzhash v1.8.0 @@ -60,7 +60,7 @@ require ( require ( git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect - github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect diff --git a/go.sum b/go.sum index 8aa087de4..728592ea5 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98 h1:ijUci3thz0EwWkuRJDocW5D1RkVAJlt9xNG4CYepC90= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98/go.mod h1:GeNpo12HcEW4J412sH5yf8xFYapxlrt5fcYzRwg0Ino= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa h1:Jr8hXNNFECLhC7S45HuyQms4U/gim1xILoU3g4ZZnHg= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa/go.mod h1:jmb7yxzZota9jWbC10p+7YR+6wwJPBj7J/Fl5VDkXys= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM= @@ -27,8 +27,8 @@ git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go. github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc= github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710= -github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= -github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= From 0c49bca19c82d574c9a93681bda77362edd5b88c Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Mon, 7 Oct 2024 18:32:26 +0300 Subject: [PATCH 063/591] [#1415] lens/explorer: Add timeout for opening database Signed-off-by: Aleksey Savchuk --- cmd/frostfs-lens/internal/blobovnicza/tui.go | 13 +------------ cmd/frostfs-lens/internal/meta/tui.go | 13 +------------ cmd/frostfs-lens/internal/tui/util.go | 13 +++++++++++++ cmd/frostfs-lens/internal/writecache/tui.go | 13 +------------ 4 files changed, 16 insertions(+), 36 deletions(-) diff --git a/cmd/frostfs-lens/internal/blobovnicza/tui.go b/cmd/frostfs-lens/internal/blobovnicza/tui.go index eb4a5ff59..4aa281616 100644 --- a/cmd/frostfs-lens/internal/blobovnicza/tui.go +++ b/cmd/frostfs-lens/internal/blobovnicza/tui.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" "github.com/rivo/tview" "github.com/spf13/cobra" - "go.etcd.io/bbolt" ) var tuiCMD = &cobra.Command{ @@ -43,7 +42,7 @@ func tuiFunc(cmd *cobra.Command, _ []string) { } func runTUI(cmd *cobra.Command) error { - db, err := openDB(false) + db, err := tui.OpenDB(vPath, false) if err != nil { return fmt.Errorf("couldn't open database: %w", err) } @@ -67,13 +66,3 @@ func runTUI(cmd *cobra.Command) error { app.SetRoot(ui, true).SetFocus(ui) return app.Run() } - -func openDB(writable bool) (*bbolt.DB, error) { - db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{ - ReadOnly: !writable, - }) - if err != nil { - return nil, err - } - return db, nil -} diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go index 00e8bf117..5a41f945c 100644 --- a/cmd/frostfs-lens/internal/meta/tui.go +++ b/cmd/frostfs-lens/internal/meta/tui.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" "github.com/rivo/tview" "github.com/spf13/cobra" - "go.etcd.io/bbolt" ) var tuiCMD = &cobra.Command{ @@ -44,7 +43,7 @@ func tuiFunc(cmd *cobra.Command, _ []string) { } func runTUI(cmd *cobra.Command) error { - db, err := openDB(false) + db, err := tui.OpenDB(vPath, false) if err != nil { return fmt.Errorf("couldn't open database: %w", err) } @@ -70,13 +69,3 @@ func runTUI(cmd *cobra.Command) error { app.SetRoot(ui, true).SetFocus(ui) return app.Run() } - -func openDB(writable bool) (*bbolt.DB, error) { - db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{ - ReadOnly: !writable, - }) - if err != nil { - return nil, err - } - return db, nil -} diff --git a/cmd/frostfs-lens/internal/tui/util.go b/cmd/frostfs-lens/internal/tui/util.go index d4e13b2a9..2d1ab3e33 100644 --- a/cmd/frostfs-lens/internal/tui/util.go +++ b/cmd/frostfs-lens/internal/tui/util.go @@ -3,12 +3,25 @@ package tui import ( "errors" "strings" + "time" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/mr-tron/base58" + "go.etcd.io/bbolt" ) +func OpenDB(path string, writable bool) (*bbolt.DB, error) { + db, err := bbolt.Open(path, 0o600, &bbolt.Options{ + ReadOnly: !writable, + Timeout: 100 * time.Millisecond, + }) + if err != nil { + return nil, err + } + return db, nil +} + func CIDParser(s string) (any, error) { data, err := base58.Decode(s) if err != nil { diff --git a/cmd/frostfs-lens/internal/writecache/tui.go b/cmd/frostfs-lens/internal/writecache/tui.go index 6b7532b08..b7e4d7c96 100644 --- a/cmd/frostfs-lens/internal/writecache/tui.go +++ b/cmd/frostfs-lens/internal/writecache/tui.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" "github.com/rivo/tview" "github.com/spf13/cobra" - "go.etcd.io/bbolt" ) var tuiCMD = &cobra.Command{ @@ -43,7 +42,7 @@ func tuiFunc(cmd *cobra.Command, _ []string) { } func runTUI(cmd *cobra.Command) error { - db, err := openDB(false) + db, err := tui.OpenDB(vPath, false) if err != nil { return fmt.Errorf("couldn't open database: %w", err) } @@ -67,13 +66,3 @@ func runTUI(cmd *cobra.Command) error { app.SetRoot(ui, true).SetFocus(ui) return app.Run() } - -func openDB(writable bool) (*bbolt.DB, error) { - db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{ - ReadOnly: !writable, - }) - if err != nil { - return nil, err - } - return db, nil -} From 899cd55c277b04b974e67df29b81146528d5c293 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 1 Oct 2024 13:28:46 +0300 Subject: [PATCH 064/591] [#1412] engine: PutPrm refactoring Use fields instead of methods. Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/engine/list_test.go | 6 +---- pkg/local_object_storage/engine/put.go | 24 ++++++-------------- 2 files changed, 8 insertions(+), 22 deletions(-) diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go index 11a6c7841..d683b5475 100644 --- a/pkg/local_object_storage/engine/list_test.go +++ b/pkg/local_object_storage/engine/list_test.go @@ -82,11 +82,7 @@ func TestListWithCursor(t *testing.T) { for range tt.objectNum { containerID := cidtest.ID() obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'}) - - var prm PutPrm - prm.WithObject(obj) - - err := e.Put(context.Background(), prm) + err := e.Put(context.Background(), PutPrm{Object: obj}) require.NoError(t, err) expected = append(expected, object.Info{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)}) } diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index bf86402a7..9ce31e791 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -22,7 +22,7 @@ import ( // PutPrm groups the parameters of Put operation. type PutPrm struct { - obj *objectSDK.Object + Object *objectSDK.Object } var errPutShard = errors.New("could not put object to any shard") @@ -41,13 +41,6 @@ type putToShardRes struct { err error } -// WithObject is a Put option to set object to save. -// -// Option is required. -func (p *PutPrm) WithObject(obj *objectSDK.Object) { - p.obj = obj -} - // Put saves the object to local storage. // // Returns any error encountered that @@ -59,7 +52,7 @@ func (p *PutPrm) WithObject(obj *objectSDK.Object) { func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put", trace.WithAttributes( - attribute.String("address", object.AddressOf(prm.obj).EncodeToString()), + attribute.String("address", object.AddressOf(prm.Object).EncodeToString()), )) defer span.End() @@ -74,13 +67,13 @@ func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) { func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { defer elapsed("Put", e.metrics.AddMethodDuration)() - addr := object.AddressOf(prm.obj) + addr := object.AddressOf(prm.Object) // In #1146 this check was parallelized, however, it became // much slower on fast machines for 4 shards. var parent oid.Address - if prm.obj.ECHeader() != nil { - parent.SetObject(prm.obj.ECHeader().Parent()) + if prm.Object.ECHeader() != nil { + parent.SetObject(prm.Object.ECHeader().Parent()) parent.SetContainer(addr.Container()) } var shPrm shard.ExistsPrm @@ -113,7 +106,7 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { // Shard was concurrently removed, skip. return false } - shRes = e.putToShard(ctx, sh, pool, addr, prm.obj) + shRes = e.putToShard(ctx, sh, pool, addr, prm.Object) return shRes.status != putToShardUnknown }) switch shRes.status { @@ -202,8 +195,5 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti // Put writes provided object to local storage. func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error { - var putPrm PutPrm - putPrm.WithObject(obj) - - return storage.Put(ctx, putPrm) + return storage.Put(ctx, PutPrm{Object: obj}) } From 1b520f79733e3628af5d47b597b5baff60f3f36a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 1 Oct 2024 15:27:06 +0300 Subject: [PATCH 065/591] [#1412] engine: Add `IsIndexedContainer` flag Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/object.go | 4 +- pkg/core/container/info.go | 103 ++++++++++++++++++ pkg/core/container/util.go | 12 ++ .../engine/control_test.go | 2 +- .../engine/delete_test.go | 8 +- .../engine/engine_test.go | 2 +- .../engine/inhume_test.go | 2 +- pkg/local_object_storage/engine/lock_test.go | 14 +-- pkg/local_object_storage/engine/put.go | 7 +- pkg/local_object_storage/engine/tree_test.go | 2 +- pkg/services/object/common/writer/ec.go | 3 +- pkg/services/object/common/writer/local.go | 9 +- pkg/services/object/common/writer/writer.go | 3 +- pkg/services/object/put/single.go | 11 +- pkg/services/policer/check.go | 2 +- pkg/services/policer/ec.go | 39 ++++--- pkg/services/replicator/pull.go | 3 +- pkg/services/replicator/put.go | 3 +- pkg/services/replicator/task.go | 3 + 19 files changed, 182 insertions(+), 50 deletions(-) create mode 100644 pkg/core/container/info.go diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 47649c88b..5c322886b 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -535,6 +535,6 @@ func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock) } -func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object) error { - return engine.Put(ctx, e.engine, o) +func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexableContainer bool) error { + return engine.Put(ctx, e.engine, o, indexableContainer) } diff --git a/pkg/core/container/info.go b/pkg/core/container/info.go new file mode 100644 index 000000000..62cc21553 --- /dev/null +++ b/pkg/core/container/info.go @@ -0,0 +1,103 @@ +package container + +import ( + "sync" + + utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" +) + +type Info struct { + Indexed bool + Removed bool +} + +type infoValue struct { + info Info + err error +} + +type InfoProvider interface { + Info(id cid.ID) (Info, error) +} + +type infoProvider struct { + mtx *sync.RWMutex + cache map[cid.ID]infoValue + kl *utilSync.KeyLocker[cid.ID] + + source Source + sourceErr error + sourceOnce *sync.Once + sourceFactory func() (Source, error) +} + +func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider { + return &infoProvider{ + mtx: &sync.RWMutex{}, + cache: make(map[cid.ID]infoValue), + sourceOnce: &sync.Once{}, + kl: utilSync.NewKeyLocker[cid.ID](), + sourceFactory: sourceFactory, + } +} + +func (r *infoProvider) Info(id cid.ID) (Info, error) { + v, found := r.tryGetFromCache(id) + if found { + return v.info, v.err + } + + return r.getFromSource(id) +} + +func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + value, found := r.cache[id] + return value, found +} + +func (r *infoProvider) getFromSource(id cid.ID) (Info, error) { + r.kl.Lock(id) + defer r.kl.Unlock(id) + + if v, ok := r.tryGetFromCache(id); ok { + return v.info, v.err + } + + r.sourceOnce.Do(func() { + r.source, r.sourceErr = r.sourceFactory() + }) + if r.sourceErr != nil { + return Info{}, r.sourceErr + } + + cnr, err := r.source.Get(id) + var civ infoValue + if err != nil { + if client.IsErrContainerNotFound(err) { + removed, err := WasRemoved(r.source, id) + if err != nil { + civ.err = err + } else { + civ.info.Removed = removed + } + } else { + civ.err = err + } + } else { + civ.info.Indexed = IsIndexedContainer(cnr.Value) + } + r.putToCache(id, civ) + return civ.info, civ.err +} + +func (r *infoProvider) putToCache(id cid.ID, ct infoValue) { + r.mtx.Lock() + defer r.mtx.Unlock() + + r.cache[id] = ct +} diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go index 98919284e..d27556807 100644 --- a/pkg/core/container/util.go +++ b/pkg/core/container/util.go @@ -4,6 +4,7 @@ import ( "errors" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" ) @@ -20,3 +21,14 @@ func WasRemoved(s Source, cid cid.ID) (bool, error) { } return false, err } + +// IsIndexedContainer returns True if container attributes should be indexed. +func IsIndexedContainer(cnr containerSDK.Container) bool { + var isS3Container bool + cnr.IterateAttributes(func(key, _ string) { + if key == ".s3-location-constraint" { + isS3Container = true + } + }) + return !isS3Container +} diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go index 2de92ae84..83babeca3 100644 --- a/pkg/local_object_storage/engine/control_test.go +++ b/pkg/local_object_storage/engine/control_test.go @@ -171,7 +171,7 @@ func TestExecBlocks(t *testing.T) { addr := object.AddressOf(obj) - require.NoError(t, Put(context.Background(), e, obj)) + require.NoError(t, Put(context.Background(), e, obj, false)) // block executions errBlock := errors.New("block exec err") diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go index 4a6758012..0904c9820 100644 --- a/pkg/local_object_storage/engine/delete_test.go +++ b/pkg/local_object_storage/engine/delete_test.go @@ -58,9 +58,9 @@ func TestDeleteBigObject(t *testing.T) { defer e.Close(context.Background()) for i := range children { - require.NoError(t, Put(context.Background(), e, children[i])) + require.NoError(t, Put(context.Background(), e, children[i], false)) } - require.NoError(t, Put(context.Background(), e, link)) + require.NoError(t, Put(context.Background(), e, link, false)) addrParent := object.AddressOf(parent) checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true) @@ -126,9 +126,9 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) { defer e.Close(context.Background()) for i := range children { - require.NoError(t, Put(context.Background(), e, children[i])) + require.NoError(t, Put(context.Background(), e, children[i], false)) } - require.NoError(t, Put(context.Background(), e, link)) + require.NoError(t, Put(context.Background(), e, link, false)) addrParent := object.AddressOf(parent) checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true) diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 525e17f34..88c523b76 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -54,7 +54,7 @@ func benchmarkExists(b *testing.B, shardNum int) { addr := oidtest.Address() for range 100 { obj := testutil.GenerateObjectWithCID(cidtest.ID()) - err := Put(context.Background(), e, obj) + err := Put(context.Background(), e, obj, false) if err != nil { b.Fatal(err) } diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 9daa113f8..f87679253 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -40,7 +40,7 @@ func TestStorageEngine_Inhume(t *testing.T) { e := testNewEngine(t).setShardsNum(t, 1).engine defer e.Close(context.Background()) - err := Put(context.Background(), e, parent) + err := Put(context.Background(), e, parent, false) require.NoError(t, err) var inhumePrm InhumePrm diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go index 9e6758fb4..3702f567f 100644 --- a/pkg/local_object_storage/engine/lock_test.go +++ b/pkg/local_object_storage/engine/lock_test.go @@ -97,7 +97,7 @@ func TestLockUserScenario(t *testing.T) { id, _ := obj.ID() objAddr.SetObject(id) - err = Put(context.Background(), e, obj) + err = Put(context.Background(), e, obj, false) require.NoError(t, err) // 2. @@ -105,7 +105,7 @@ func TestLockUserScenario(t *testing.T) { locker.WriteMembers([]oid.ID{id}) objectSDK.WriteLock(lockerObj, locker) - err = Put(context.Background(), e, lockerObj) + err = Put(context.Background(), e, lockerObj, false) require.NoError(t, err) err = e.Lock(context.Background(), cnr, lockerID, []oid.ID{id}) @@ -124,7 +124,7 @@ func TestLockUserScenario(t *testing.T) { tombObj.SetID(tombForLockID) tombObj.SetAttributes(a) - err = Put(context.Background(), e, tombObj) + err = Put(context.Background(), e, tombObj, false) require.NoError(t, err) inhumePrm.WithTarget(tombForLockAddr, lockerAddr) @@ -177,7 +177,7 @@ func TestLockExpiration(t *testing.T) { // 1. obj := testutil.GenerateObjectWithCID(cnr) - err = Put(context.Background(), e, obj) + err = Put(context.Background(), e, obj, false) require.NoError(t, err) // 2. @@ -189,7 +189,7 @@ func TestLockExpiration(t *testing.T) { lock.SetType(objectSDK.TypeLock) lock.SetAttributes(a) - err = Put(context.Background(), e, lock) + err = Put(context.Background(), e, lock, false) require.NoError(t, err) id, _ := obj.ID() @@ -254,14 +254,14 @@ func TestLockForceRemoval(t *testing.T) { // 1. obj := testutil.GenerateObjectWithCID(cnr) - err = Put(context.Background(), e, obj) + err = Put(context.Background(), e, obj, false) require.NoError(t, err) // 2. lock := testutil.GenerateObjectWithCID(cnr) lock.SetType(objectSDK.TypeLock) - err = Put(context.Background(), e, lock) + err = Put(context.Background(), e, lock, false) require.NoError(t, err) id, _ := obj.ID() diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 9ce31e791..41e566560 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -22,7 +22,8 @@ import ( // PutPrm groups the parameters of Put operation. type PutPrm struct { - Object *objectSDK.Object + Object *objectSDK.Object + IsIndexedContainer bool } var errPutShard = errors.New("could not put object to any shard") @@ -194,6 +195,6 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti } // Put writes provided object to local storage. -func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error { - return storage.Put(ctx, PutPrm{Object: obj}) +func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object, indexedContainer bool) error { + return storage.Put(ctx, PutPrm{Object: obj, IsIndexedContainer: indexedContainer}) } diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go index 6f694f082..21fcce415 100644 --- a/pkg/local_object_storage/engine/tree_test.go +++ b/pkg/local_object_storage/engine/tree_test.go @@ -37,7 +37,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { for i := range objCount { obj := testutil.GenerateObjectWithCID(cid) testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i)) - err := Put(context.Background(), te.ng, obj) + err := Put(context.Background(), te.ng, obj, false) if err != nil { b.Fatal(err) } diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index fb0a8e4e5..6b6a14cc0 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -310,7 +310,8 @@ func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, n func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error { var err error localTarget := LocalTarget{ - Storage: e.Config.LocalStore, + Storage: e.Config.LocalStore, + Container: e.Container, } completed := make(chan interface{}) if poolErr := e.Config.LocalPool.Submit(func() { diff --git a/pkg/services/object/common/writer/local.go b/pkg/services/object/common/writer/local.go index 02fd25b9e..e219b44dd 100644 --- a/pkg/services/object/common/writer/local.go +++ b/pkg/services/object/common/writer/local.go @@ -4,7 +4,9 @@ import ( "context" "fmt" + containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) @@ -13,7 +15,7 @@ import ( type ObjectStorage interface { // Put must save passed object // and return any appeared error. - Put(context.Context, *objectSDK.Object) error + Put(context.Context, *objectSDK.Object, bool) error // Delete must delete passed objects // and return any appeared error. Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error @@ -25,7 +27,8 @@ type ObjectStorage interface { } type LocalTarget struct { - Storage ObjectStorage + Storage ObjectStorage + Container containerSDK.Container } func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error { @@ -44,7 +47,7 @@ func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, met // objects that do not change meta storage } - if err := t.Storage.Put(ctx, obj); err != nil { + if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil { return fmt.Errorf("(%T) could not put object to local storage: %w", t, err) } return nil diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go index 3d50da988..0e4c4d9c6 100644 --- a/pkg/services/object/common/writer/writer.go +++ b/pkg/services/object/common/writer/writer.go @@ -150,7 +150,8 @@ func newDefaultObjectWriter(prm *Params, forECPlacement bool) transformer.Object nodeTargetInitializer: func(node NodeDescriptor) preparedObjectTarget { if node.Local { return LocalTarget{ - Storage: prm.Config.LocalStore, + Storage: prm.Config.LocalStore, + Container: prm.Container, } } diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go index 9b4163268..5f9b5d110 100644 --- a/pkg/services/object/put/single.go +++ b/pkg/services/object/put/single.go @@ -177,7 +177,7 @@ func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlac } return iter.ForEachNode(ctx, func(ctx context.Context, nd objectwriter.NodeDescriptor) error { - return s.saveToPlacementNode(ctx, &nd, obj, signer, meta) + return s.saveToPlacementNode(ctx, &nd, obj, signer, meta, placement.container) }) } @@ -263,10 +263,10 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb } func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwriter.NodeDescriptor, obj *objectSDK.Object, - signer *putSingleRequestSigner, meta object.ContentMeta, + signer *putSingleRequestSigner, meta object.ContentMeta, container containerSDK.Container, ) error { if nodeDesc.Local { - return s.saveLocal(ctx, obj, meta) + return s.saveLocal(ctx, obj, meta, container) } var info client.NodeInfo @@ -281,9 +281,10 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite return s.redirectPutSingleRequest(ctx, signer, obj, info, c) } -func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta) error { +func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error { localTarget := &objectwriter.LocalTarget{ - Storage: s.Config.LocalStore, + Storage: s.Config.LocalStore, + Container: container, } return localTarget.WriteObject(ctx, obj, meta) } diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index 06282bd8d..401977f66 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -37,7 +37,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er policy := cnr.Value.PlacementPolicy() if policycore.IsECPlacement(policy) { - return p.processECContainerObject(ctx, objInfo, policy) + return p.processECContainerObject(ctx, objInfo, cnr.Value) } return p.processRepContainerObject(ctx, objInfo, policy) } diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go index e822d1c09..6d2c153c9 100644 --- a/pkg/services/policer/ec.go +++ b/pkg/services/policer/ec.go @@ -10,6 +10,7 @@ import ( objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode" @@ -27,11 +28,11 @@ type ecChunkProcessResult struct { var errInvalidECPlacement = errors.New("invalid EC placement: EC placement must have one placement vector with at least one node") -func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { +func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error { if objInfo.ECInfo == nil { - return p.processECContainerRepObject(ctx, objInfo, policy) + return p.processECContainerRepObject(ctx, objInfo, cnr.PlacementPolicy()) } - return p.processECContainerECObject(ctx, objInfo, policy) + return p.processECContainerECObject(ctx, objInfo, cnr) } // processECContainerRepObject processes non erasure coded objects in EC container: tombstones, locks and linking objects. @@ -67,8 +68,8 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec return nil } -func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { - nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, policy) +func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error { + nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy()) if err != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) } @@ -85,9 +86,9 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object res := p.processECChunk(ctx, objInfo, nn[0]) if !res.validPlacement { // drop local chunk only if all required chunks are in place - res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0]) + res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0], cnr) } - p.adjustECPlacement(ctx, objInfo, nn[0], policy) + p.adjustECPlacement(ctx, objInfo, nn[0], cnr) if res.removeLocal { p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address)) @@ -138,7 +139,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n } } -func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) bool { +func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) bool { var parentAddress oid.Address parentAddress.SetContainer(objInfo.Address.Container()) parentAddress.SetObject(objInfo.ECInfo.ParentID) @@ -169,8 +170,9 @@ func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.I addr.SetContainer(objInfo.Address.Container()) addr.SetObject(indexToObjectID[index]) p.replicator.HandlePullTask(ctx, replicator.Task{ - Addr: addr, - Nodes: candidates, + Addr: addr, + Nodes: candidates, + Container: cnr, }) } // there was some missing chunks, it's not ok @@ -245,7 +247,7 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A return true } -func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, policy netmap.PlacementPolicy) { +func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) { var parentAddress oid.Address parentAddress.SetContainer(objInfo.Address.Container()) parentAddress.SetObject(objInfo.ECInfo.ParentID) @@ -292,7 +294,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info if !restore || uint32(len(resolved)) == objInfo.ECInfo.Total { return } - if objInfo.ECInfo.Total-uint32(len(resolved)) > policy.ReplicaDescriptor(0).GetECParityCount() { + if objInfo.ECInfo.Total-uint32(len(resolved)) > cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount() { var found []uint32 for i := range resolved { found = append(found, i) @@ -300,11 +302,13 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info p.log.Error(logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found)) return } - p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, policy) + p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr) } -func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID, policy netmap.PlacementPolicy) { - c, err := erasurecode.NewConstructor(int(policy.ReplicaDescriptor(0).GetECDataCount()), int(policy.ReplicaDescriptor(0).GetECParityCount())) +func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID, + cnr containerSDK.Container, +) { + c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount())) if err != nil { p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) return @@ -339,8 +343,9 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, targetNode := nodes[idx%len(nodes)] if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) { p.replicator.HandleLocalPutTask(ctx, replicator.Task{ - Addr: addr, - Obj: part, + Addr: addr, + Obj: part, + Container: cnr, }) } else { p.replicator.HandleReplicationTask(ctx, replicator.Task{ diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go index d178700f6..7e7090237 100644 --- a/pkg/services/replicator/pull.go +++ b/pkg/services/replicator/pull.go @@ -5,6 +5,7 @@ import ( "errors" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" @@ -62,7 +63,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { return } - err := engine.Put(ctx, p.localStorage, obj) + err := engine.Put(ctx, p.localStorage, obj, containerCore.IsIndexedContainer(task.Container)) if err != nil { p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go index c06ec3f65..537833516 100644 --- a/pkg/services/replicator/put.go +++ b/pkg/services/replicator/put.go @@ -5,6 +5,7 @@ import ( "errors" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -37,7 +38,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) { return } - err := engine.Put(ctx, p.localStorage, task.Obj) + err := engine.Put(ctx, p.localStorage, task.Obj, containerCore.IsIndexedContainer(task.Container)) if err != nil { p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), diff --git a/pkg/services/replicator/task.go b/pkg/services/replicator/task.go index d2b5b2506..a03f8dcaa 100644 --- a/pkg/services/replicator/task.go +++ b/pkg/services/replicator/task.go @@ -1,6 +1,7 @@ package replicator import ( + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -16,4 +17,6 @@ type Task struct { Obj *objectSDK.Object // Nodes is a list of potential object holders. Nodes []netmap.NodeInfo + + Container containerSDK.Container } From be744ae3e6eadb5b02952cbb110ef59f33f799bc Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 1 Oct 2024 18:19:12 +0300 Subject: [PATCH 066/591] [#1412] metabase: Index attributes for indexed containers Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/object.go | 4 +- pkg/local_object_storage/engine/evacuate.go | 11 +-- pkg/local_object_storage/engine/put.go | 5 +- pkg/local_object_storage/metabase/delete.go | 45 ++++++++++ .../metabase/delete_meta_test.go | 85 +++++++++++++++++++ pkg/local_object_storage/metabase/put.go | 80 ++++++++++++++++- pkg/local_object_storage/metabase/util.go | 8 +- pkg/local_object_storage/shard/put.go | 8 +- 8 files changed, 231 insertions(+), 15 deletions(-) create mode 100644 pkg/local_object_storage/metabase/delete_meta_test.go diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 5c322886b..f2c4bff1d 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -535,6 +535,6 @@ func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock) } -func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexableContainer bool) error { - return engine.Put(ctx, e.engine, o, indexableContainer) +func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexedContainer bool) error { + return engine.Put(ctx, e.engine, o, indexedContainer) } diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index a618ff274..1baf237f9 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -18,6 +18,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -471,7 +472,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context default: } egObject.Go(func() error { - err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate) + err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate, c.Value) if err != nil { cancel(err) } @@ -744,7 +745,7 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) } func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes, - getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects") defer span.End() @@ -773,7 +774,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI return err } - evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res) + evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res, cnr) if err != nil { return err } @@ -817,7 +818,7 @@ func (e *StorageEngine) isNotRepOne(c *container.Container) bool { } func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, + shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container, ) (bool, error) { hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString())) for j := range shards { @@ -830,7 +831,7 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok { continue } - switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object).status { + switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object, container.IsIndexedContainer(cnr)).status { case putToShardSuccess: res.objEvacuated.Add(1) e.log.Debug(logs.EngineObjectIsMovedToAnotherShard, diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 41e566560..a50d80b75 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -107,7 +107,7 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { // Shard was concurrently removed, skip. return false } - shRes = e.putToShard(ctx, sh, pool, addr, prm.Object) + shRes = e.putToShard(ctx, sh, pool, addr, prm.Object, prm.IsIndexedContainer) return shRes.status != putToShardUnknown }) switch shRes.status { @@ -125,7 +125,7 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { // putToShard puts object to sh. // Return putToShardStatus and error if it is necessary to propagate an error upper. func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool util.WorkerPool, - addr oid.Address, obj *objectSDK.Object, + addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool, ) (res putToShardRes) { exitCh := make(chan struct{}) @@ -158,6 +158,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti var putPrm shard.PutPrm putPrm.SetObject(obj) + putPrm.SetIndexAttributes(isIndexedContainer) _, err = sh.Put(ctx, putPrm) if err != nil { diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index 4ad11164f..3add1f268 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -338,6 +338,11 @@ func (db *DB) deleteObject( return fmt.Errorf("can't remove list indexes: %w", err) } + err = updateFKBTIndexes(tx, obj, delFKBTIndexItem) + if err != nil { + return fmt.Errorf("can't remove fake bucket tree indexes: %w", err) + } + if isParent { // remove record from the garbage bucket, because regular object deletion does nothing for virtual object garbageBKT := tx.Bucket(garbageBucketName) @@ -415,6 +420,46 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { return nil } +func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { + bkt := tx.Bucket(item.name) + if bkt == nil { + return nil + } + + fkbtRoot := bkt.Bucket(item.key) + if fkbtRoot == nil { + return nil + } + + if err := fkbtRoot.Delete(item.val); err != nil { + return err + } + + if hasAnyItem(fkbtRoot) { + return nil + } + + if err := bkt.DeleteBucket(item.key); err != nil { + return err + } + + if hasAnyItem(bkt) { + return nil + } + + return tx.DeleteBucket(item.name) +} + +func hasAnyItem(b *bbolt.Bucket) bool { + var hasAnyItem bool + c := b.Cursor() + for k, _ := c.First(); k != nil; k, _ = c.Next() { + hasAnyItem = true + break + } + return hasAnyItem +} + func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error { addr := object.AddressOf(obj) diff --git a/pkg/local_object_storage/metabase/delete_meta_test.go b/pkg/local_object_storage/metabase/delete_meta_test.go new file mode 100644 index 000000000..cdfe2a203 --- /dev/null +++ b/pkg/local_object_storage/metabase/delete_meta_test.go @@ -0,0 +1,85 @@ +package meta + +import ( + "bytes" + "context" + "path/filepath" + "testing" + + objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "github.com/stretchr/testify/require" + "go.etcd.io/bbolt" +) + +func TestPutDeleteIndexAttributes(t *testing.T) { + db := New([]Option{ + WithPath(filepath.Join(t.TempDir(), "metabase")), + WithPermissions(0o600), + WithEpochState(epochState{}), + }...) + + require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) + require.NoError(t, db.Init()) + defer func() { require.NoError(t, db.Close()) }() + + cnr := cidtest.ID() + obj1 := testutil.GenerateObjectWithCID(cnr) + testutil.AddAttribute(obj1, "S3-Access-Box-CRDT-Name", "CRDT-Name") + testutil.AddAttribute(obj1, objectSDK.AttributeFilePath, "/path/to/object") + + var putPrm PutPrm + putPrm.SetObject(obj1) + + _, err := db.Put(context.Background(), putPrm) + require.NoError(t, err) + + require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error { + b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize))) + require.Nil(t, b) + b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize))) + require.Nil(t, b) + return nil + })) + + obj2 := testutil.GenerateObjectWithCID(cnr) + testutil.AddAttribute(obj2, "S3-Access-Box-CRDT-Name", "CRDT-Name") + testutil.AddAttribute(obj2, objectSDK.AttributeFilePath, "/path/to/object") + + putPrm.SetObject(obj2) + putPrm.SetIndexAttributes(true) + + _, err = db.Put(context.Background(), putPrm) + require.NoError(t, err) + + objKey := objectKey(objectCore.AddressOf(obj2).Object(), make([]byte, objectKeySize)) + require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error { + b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize))) + require.NotNil(t, b) + b = b.Bucket([]byte("CRDT-Name")) + require.NotNil(t, b) + require.True(t, bytes.Equal(zeroValue, b.Get(objKey))) + b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize))) + require.NotNil(t, b) + b = b.Bucket([]byte("/path/to/object")) + require.NotNil(t, b) + require.True(t, bytes.Equal(zeroValue, b.Get(objKey))) + return nil + })) + + var dPrm DeletePrm + dPrm.SetAddresses(objectCore.AddressOf(obj1), objectCore.AddressOf(obj2)) + _, err = db.Delete(context.Background(), dPrm) + require.NoError(t, err) + + require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error { + b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize))) + require.Nil(t, b) + b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize))) + require.Nil(t, b) + return nil + })) +} diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index 087529895..0c14196b7 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -35,6 +35,8 @@ type PutPrm struct { obj *objectSDK.Object id []byte + + indexAttributes bool } // PutRes groups the resulting values of Put operation. @@ -52,6 +54,10 @@ func (p *PutPrm) SetStorageID(id []byte) { p.id = id } +func (p *PutPrm) SetIndexAttributes(v bool) { + p.indexAttributes = v +} + var ( ErrUnknownObjectType = errors.New("unknown object type") ErrIncorrectSplitInfoUpdate = errors.New("updating split info on object without it") @@ -90,7 +96,7 @@ func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) { err = db.boltDB.Batch(func(tx *bbolt.Tx) error { var e error - res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch) + res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch, prm.indexAttributes) return e }) if err == nil { @@ -108,6 +114,7 @@ func (db *DB) put(tx *bbolt.Tx, id []byte, si *objectSDK.SplitInfo, currEpoch uint64, + indexAttributes bool, ) (PutRes, error) { cnr, ok := obj.ContainerID() if !ok { @@ -129,7 +136,7 @@ func (db *DB) put(tx *bbolt.Tx, return PutRes{}, db.updateObj(tx, obj, id, si, isParent) } - return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch) + return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch, indexAttributes) } func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool) error { @@ -152,14 +159,14 @@ func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *obje return nil } -func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64) error { +func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64, indexAttributes bool) error { if par := obj.Parent(); par != nil && !isParent { // limit depth by two parentSI, err := splitInfoFromObject(obj) if err != nil { return err } - _, err = db.put(tx, par, id, parentSI, currEpoch) + _, err = db.put(tx, par, id, parentSI, currEpoch, indexAttributes) if err != nil { return err } @@ -175,6 +182,13 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o return fmt.Errorf("can't put list indexes: %w", err) } + if indexAttributes { + err = updateFKBTIndexes(tx, obj, putFKBTIndexItem) + if err != nil { + return fmt.Errorf("can't put fake bucket tree indexes: %w", err) + } + } + // update container volume size estimation if obj.Type() == objectSDK.TypeRegular && !isParent { err = changeContainerSize(tx, cnr, obj.PayloadSize(), true) @@ -381,6 +395,50 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun return nil } +var indexedAttributes = map[string]struct{}{ + "S3-Access-Box-CRDT-Name": {}, + objectSDK.AttributeFilePath: {}, +} + +// IsAtrributeIndexed returns True if attribute is indexed by metabase. +func IsAtrributeIndexed(attr string) bool { + _, found := indexedAttributes[attr] + return found +} + +func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error { + id, _ := obj.ID() + cnr, _ := obj.ContainerID() + objKey := objectKey(id, make([]byte, objectKeySize)) + + key := make([]byte, bucketKeySize) + var attrs []objectSDK.Attribute + if obj.ECHeader() != nil { + attrs = obj.ECHeader().ParentAttributes() + objKey = objectKey(obj.ECHeader().Parent(), make([]byte, objectKeySize)) + } else { + attrs = obj.Attributes() + } + + // user specified attributes + for i := range attrs { + if !IsAtrributeIndexed(attrs[i].Key()) { + continue + } + key = attributeBucketName(cnr, attrs[i].Key(), key) + err := f(tx, namedBucketItem{ + name: key, + key: []byte(attrs[i].Value()), + val: objKey, + }) + if err != nil { + return err + } + } + + return nil +} + func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) { attributes := obj.Attributes() if ech := obj.ECHeader(); ech != nil { @@ -425,6 +483,20 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error { return updateUniqueIndexItem(tx, item, func(_, val []byte) ([]byte, error) { return val, nil }) } +func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { + bkt, err := createBucketLikelyExists(tx, item.name) + if err != nil { + return fmt.Errorf("can't create index %v: %w", item.name, err) + } + + fkbtRoot, err := createBucketLikelyExists(bkt, item.key) + if err != nil { + return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err) + } + + return fkbtRoot.Put(item.val, zeroValue) +} + func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { bkt, err := createBucketLikelyExists(tx, item.name) if err != nil { diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go index eef7210dc..4679de332 100644 --- a/pkg/local_object_storage/metabase/util.go +++ b/pkg/local_object_storage/metabase/util.go @@ -99,7 +99,6 @@ const ( // userAttributePrefix was used for prefixing FKBT index buckets containing objects. // Key: attribute value // Value: bucket containing object IDs as keys - // removed in version 3 userAttributePrefix // ==================== @@ -170,6 +169,13 @@ func smallBucketName(cnr cid.ID, key []byte) []byte { return bucketName(cnr, smallPrefix, key) } +// attributeBucketName returns _. +func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte { + key[0] = userAttributePrefix + cnr.Encode(key[1:]) + return append(key[:bucketKeySize], attributeKey...) +} + // rootBucketName returns _root. func rootBucketName(cnr cid.ID, key []byte) []byte { return bucketName(cnr, rootPrefix, key) diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index d7a9e7012..24cc75154 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -17,7 +17,8 @@ import ( // PutPrm groups the parameters of Put operation. type PutPrm struct { - obj *objectSDK.Object + obj *objectSDK.Object + indexAttributes bool } // PutRes groups the resulting values of Put operation. @@ -28,6 +29,10 @@ func (p *PutPrm) SetObject(obj *objectSDK.Object) { p.obj = obj } +func (p *PutPrm) SetIndexAttributes(v bool) { + p.indexAttributes = v +} + // Put saves the object in shard. // // Returns any error encountered that @@ -84,6 +89,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { var pPrm meta.PutPrm pPrm.SetObject(prm.obj) pPrm.SetStorageID(res.StorageID) + pPrm.SetIndexAttributes(prm.indexAttributes) res, err := s.metaBase.Put(ctx, pPrm) if err != nil { // may we need to handle this case in a special way From 1efa64ee72851e53ca5eb2bf643e74141b41ca46 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 2 Oct 2024 13:04:29 +0300 Subject: [PATCH 067/591] [#1412] metabase: Add search by indexed attributes Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/metabase/db_test.go | 12 +- pkg/local_object_storage/metabase/select.go | 115 ++++++++++++++++-- .../metabase/select_test.go | 81 +++++++----- 3 files changed, 169 insertions(+), 39 deletions(-) diff --git a/pkg/local_object_storage/metabase/db_test.go b/pkg/local_object_storage/metabase/db_test.go index 01e1ed2bc..0abb5ea89 100644 --- a/pkg/local_object_storage/metabase/db_test.go +++ b/pkg/local_object_storage/metabase/db_test.go @@ -32,7 +32,17 @@ func putBig(db *meta.DB, obj *objectSDK.Object) error { } func testSelect(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, exp ...oid.Address) { - res, err := metaSelect(db, cnr, fs) + res, err := metaSelect(db, cnr, fs, false) + require.NoError(t, err) + require.Len(t, res, len(exp)) + + for i := range exp { + require.Contains(t, res, exp[i]) + } +} + +func testSelect2(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, useAttrIndex bool, exp ...oid.Address) { + res, err := metaSelect(db, cnr, fs, useAttrIndex) require.NoError(t, err) require.Len(t, res, len(exp)) diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index 85d1b08ba..88ef7d5a4 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -37,8 +37,9 @@ type ( // SelectPrm groups the parameters of Select operation. type SelectPrm struct { - cnr cid.ID - filters objectSDK.SearchFilters + cnr cid.ID + filters objectSDK.SearchFilters + useAttributeIndex bool } // SelectRes groups the resulting values of Select operation. @@ -56,6 +57,10 @@ func (p *SelectPrm) SetFilters(fs objectSDK.SearchFilters) { p.filters = fs } +func (p *SelectPrm) SetUseAttributeIndex(v bool) { + p.useAttributeIndex = v +} + // AddressList returns list of addresses of the selected objects. func (r SelectRes) AddressList() []oid.Address { return r.addrList @@ -92,14 +97,14 @@ func (db *DB) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err err currEpoch := db.epochState.CurrentEpoch() return res, metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error { - res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch) + res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch, prm.useAttributeIndex) success = err == nil return err })) } -func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64) ([]oid.Address, error) { - group, err := groupFilters(fs) +func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64, useAttributeIndex bool) ([]oid.Address, error) { + group, err := groupFilters(fs, useAttributeIndex) if err != nil { return nil, err } @@ -218,7 +223,13 @@ func (db *DB) selectFastFilter( selectAllFromBucket(tx, primaryBucketName(cnr, bucketName), to, fNum) selectAllFromBucket(tx, tombstoneBucketName(cnr, bucketName), to, fNum) selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, fNum) - default: + default: // user attribute + bucketName := attributeBucketName(cnr, f.Header(), bucketName) + if f.Operation() == objectSDK.MatchNotPresent { + selectOutsideFKBT(tx, allBucketNames(cnr), bucketName, to, fNum) + } else { + db.selectFromFKBT(tx, bucketName, f, to, fNum) + } } } @@ -228,6 +239,15 @@ var mBucketNaming = map[string][]func(cid.ID, []byte) []byte{ v2object.TypeLock.String(): {bucketNameLockers}, } +func allBucketNames(cnr cid.ID) (names [][]byte) { + for _, fns := range mBucketNaming { + for _, fn := range fns { + names = append(names, fn(cnr, make([]byte, bucketKeySize))) + } + } + return +} + func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal string) (names [][]byte) { appendNames := func(key string) { fns, ok := mBucketNaming[key] @@ -259,6 +279,81 @@ func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal str return } +func (db *DB) selectFromFKBT( + tx *bbolt.Tx, + name []byte, // fkbt root bucket name + f objectSDK.SearchFilter, // filter for operation and value + to map[string]int, // resulting cache + fNum int, // index of filter +) { // + matchFunc, ok := db.matchers[f.Operation()] + if !ok { + db.log.Debug(logs.MetabaseMissingMatcher, zap.Stringer("operation", f.Operation())) + + return + } + + fkbtRoot := tx.Bucket(name) + if fkbtRoot == nil { + return + } + + err := matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error { + fkbtLeaf := fkbtRoot.Bucket(k) + if fkbtLeaf == nil { + return nil + } + + return fkbtLeaf.ForEach(func(k, _ []byte) error { + markAddressInCache(to, fNum, string(k)) + + return nil + }) + }) + if err != nil { + db.log.Debug(logs.MetabaseErrorInFKBTSelection, zap.String("error", err.Error())) + } +} + +// selectOutsideFKBT looks into all incl buckets to find list of addresses outside to add in +// resulting cache. +func selectOutsideFKBT( + tx *bbolt.Tx, + incl [][]byte, // buckets + name []byte, // fkbt root bucket name + to map[string]int, // resulting cache + fNum int, // index of filter +) { + mExcl := make(map[string]struct{}) + + bktExcl := tx.Bucket(name) + if bktExcl != nil { + _ = bktExcl.ForEachBucket(func(k []byte) error { + exclBktLeaf := bktExcl.Bucket(k) + return exclBktLeaf.ForEach(func(k, _ []byte) error { + mExcl[string(k)] = struct{}{} + + return nil + }) + }) + } + + for i := range incl { + bktIncl := tx.Bucket(incl[i]) + if bktIncl == nil { + continue + } + + _ = bktIncl.ForEach(func(k, _ []byte) error { + if _, ok := mExcl[string(k)]; !ok { + markAddressInCache(to, fNum, string(k)) + } + + return nil + }) + } +} + // selectFromList looks into index to find list of addresses to add in // resulting cache. func (db *DB) selectFromList( @@ -486,7 +581,7 @@ func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) { // groupFilters divides filters in two groups: fast and slow. Fast filters // processed by indexes and slow filters processed after by unmarshaling // object headers. -func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) { +func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filterGroup, error) { res := filterGroup{ fastFilters: make(objectSDK.SearchFilters, 0, len(filters)), slowFilters: make(objectSDK.SearchFilters, 0, len(filters)), @@ -511,7 +606,11 @@ func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) { v2object.FilterPropertyPhy: res.fastFilters = append(res.fastFilters, filters[i]) default: - res.slowFilters = append(res.slowFilters, filters[i]) + if useAttributeIndex && IsAtrributeIndexed(filters[i].Header()) { + res.fastFilters = append(res.fastFilters, filters[i]) + } else { + res.slowFilters = append(res.slowFilters, filters[i]) + } } } diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index bee778e2b..5cc25a9f6 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -26,6 +26,16 @@ import ( func TestDB_SelectUserAttributes(t *testing.T) { t.Parallel() + t.Run("with_index", func(t *testing.T) { + testSelectUserAttributes(t, true) + }) + t.Run("without_index", func(t *testing.T) { + testSelectUserAttributes(t, false) + }) +} + +func testSelectUserAttributes(t *testing.T, index bool) { + t.Parallel() db := newDB(t) defer func() { require.NoError(t, db.Close()) }() @@ -36,44 +46,52 @@ func TestDB_SelectUserAttributes(t *testing.T) { testutil.AddAttribute(raw1, "foo", "bar") testutil.AddAttribute(raw1, "x", "y") - err := putBig(db, raw1) + var putPrm meta.PutPrm + putPrm.SetIndexAttributes(index) + putPrm.SetObject(raw1) + _, err := db.Put(context.Background(), putPrm) require.NoError(t, err) raw2 := testutil.GenerateObjectWithCID(cnr) testutil.AddAttribute(raw2, "foo", "bar") testutil.AddAttribute(raw2, "x", "z") - err = putBig(db, raw2) + putPrm.SetObject(raw2) + _, err = db.Put(context.Background(), putPrm) require.NoError(t, err) raw3 := testutil.GenerateObjectWithCID(cnr) testutil.AddAttribute(raw3, "a", "b") - err = putBig(db, raw3) + putPrm.SetObject(raw3) + _, err = db.Put(context.Background(), putPrm) require.NoError(t, err) raw4 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(raw4, "path", "test/1/2") + testutil.AddAttribute(raw4, objectSDK.AttributeFilePath, "/test/1/2") - err = putBig(db, raw4) + putPrm.SetObject(raw4) + _, err = db.Put(context.Background(), putPrm) require.NoError(t, err) raw5 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(raw5, "path", "test/1/3") + testutil.AddAttribute(raw5, objectSDK.AttributeFilePath, "/test/1/3") - err = putBig(db, raw5) + putPrm.SetObject(raw5) + _, err = db.Put(context.Background(), putPrm) require.NoError(t, err) raw6 := testutil.GenerateObjectWithCID(cnr) - testutil.AddAttribute(raw6, "path", "test/2/3") + testutil.AddAttribute(raw6, objectSDK.AttributeFilePath, "/test/2/3") - err = putBig(db, raw6) + putPrm.SetObject(raw6) + _, err = db.Put(context.Background(), putPrm) require.NoError(t, err) raw7 := testutil.GenerateObjectWithCID(cnr) var attr objectSDK.Attribute - attr.SetKey("path") - attr.SetValue("test/3/4") + attr.SetKey(objectSDK.AttributeFilePath) + attr.SetValue("/test/3/4") attrs := raw7.Attributes() attrs = append(attrs, attr) ech := objectSDK.NewECHeader(objectSDK.ECParentInfo{ @@ -81,37 +99,39 @@ func TestDB_SelectUserAttributes(t *testing.T) { Attributes: attrs, }, 0, 3, []byte{}, 0) raw7.SetECHeader(ech) - require.NoError(t, putBig(db, raw7)) + putPrm.SetObject(raw7) + _, err = db.Put(context.Background(), putPrm) + require.NoError(t, err) var raw7Parent oid.Address raw7Parent.SetContainer(cnr) raw7Parent.SetObject(ech.Parent()) fs := objectSDK.SearchFilters{} fs.AddFilter("foo", "bar", objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs, + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1), object.AddressOf(raw2), ) fs = objectSDK.SearchFilters{} fs.AddFilter("x", "y", objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs, object.AddressOf(raw1)) + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1)) fs = objectSDK.SearchFilters{} fs.AddFilter("x", "y", objectSDK.MatchStringNotEqual) - testSelect(t, db, cnr, fs, object.AddressOf(raw2)) + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw2)) fs = objectSDK.SearchFilters{} fs.AddFilter("a", "b", objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs, object.AddressOf(raw3)) + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw3)) fs = objectSDK.SearchFilters{} fs.AddFilter("c", "d", objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs) + testSelect2(t, db, cnr, fs, index) fs = objectSDK.SearchFilters{} fs.AddFilter("foo", "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs, + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw3), object.AddressOf(raw4), object.AddressOf(raw5), @@ -121,7 +141,7 @@ func TestDB_SelectUserAttributes(t *testing.T) { fs = objectSDK.SearchFilters{} fs.AddFilter("a", "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs, + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1), object.AddressOf(raw2), object.AddressOf(raw4), @@ -131,7 +151,7 @@ func TestDB_SelectUserAttributes(t *testing.T) { ) fs = objectSDK.SearchFilters{} - testSelect(t, db, cnr, fs, + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1), object.AddressOf(raw2), object.AddressOf(raw3), @@ -143,7 +163,7 @@ func TestDB_SelectUserAttributes(t *testing.T) { fs = objectSDK.SearchFilters{} fs.AddFilter("key", "", objectSDK.MatchNotPresent) - testSelect(t, db, cnr, fs, + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1), object.AddressOf(raw2), object.AddressOf(raw3), @@ -154,8 +174,8 @@ func TestDB_SelectUserAttributes(t *testing.T) { ) fs = objectSDK.SearchFilters{} - fs.AddFilter("path", "test", objectSDK.MatchCommonPrefix) - testSelect(t, db, cnr, fs, + fs.AddFilter(objectSDK.AttributeFilePath, "/test", objectSDK.MatchCommonPrefix) + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw4), object.AddressOf(raw5), object.AddressOf(raw6), @@ -163,15 +183,15 @@ func TestDB_SelectUserAttributes(t *testing.T) { ) fs = objectSDK.SearchFilters{} - fs.AddFilter("path", "test/1", objectSDK.MatchCommonPrefix) - testSelect(t, db, cnr, fs, + fs.AddFilter(objectSDK.AttributeFilePath, "/test/1", objectSDK.MatchCommonPrefix) + testSelect2(t, db, cnr, fs, index, object.AddressOf(raw4), object.AddressOf(raw5), ) fs = objectSDK.SearchFilters{} - fs.AddFilter("path", "test/3/4", objectSDK.MatchStringEqual) - testSelect(t, db, cnr, fs, + fs.AddFilter(objectSDK.AttributeFilePath, "/test/3/4", objectSDK.MatchStringEqual) + testSelect2(t, db, cnr, fs, index, raw7Parent, ) } @@ -1185,11 +1205,11 @@ func TestExpiredObjects(t *testing.T) { cidExp, _ := exp.ContainerID() cidNonExp, _ := nonExp.ContainerID() - objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{}) + objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{}, false) require.NoError(t, err) require.Empty(t, objs) // expired object should not be returned - objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{}) + objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{}, false) require.NoError(t, err) require.NotEmpty(t, objs) }) @@ -1211,10 +1231,11 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear } } -func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) { +func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters, useAttributeIndex bool) ([]oid.Address, error) { var prm meta.SelectPrm prm.SetFilters(fs) prm.SetContainerID(cnr) + prm.SetUseAttributeIndex(useAttributeIndex) res, err := db.Select(context.Background(), prm) return res.AddressList(), err From 4572fa487493f21cc5ebffcdc526270452a36e6a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 2 Oct 2024 14:52:54 +0300 Subject: [PATCH 068/591] [#1412] searchSvc: Check container is indexed For non S3 containers it is expected to use attributes index for some attributes. Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/object.go | 5 +++-- pkg/local_object_storage/engine/delete.go | 2 +- pkg/local_object_storage/engine/inhume_test.go | 4 ++-- pkg/local_object_storage/engine/select.go | 14 ++++++++------ pkg/local_object_storage/engine/tree_test.go | 2 +- pkg/local_object_storage/shard/select.go | 9 ++++++--- pkg/services/object/search/container.go | 10 ++++++++++ pkg/services/object/search/service.go | 4 ++++ pkg/services/object/search/util.go | 7 ++++++- 9 files changed, 41 insertions(+), 16 deletions(-) diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index f2c4bff1d..68acb05d3 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -174,7 +174,7 @@ func initObjectService(c *cfg) { sPutV2 := createPutSvcV2(sPut, keyStorage) - sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache) + sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource) sSearchV2 := createSearchSvcV2(sSearch, keyStorage) @@ -366,7 +366,7 @@ func createPatchSvc(sGet *getsvc.Service, sPut *putsvc.Service) *patchsvc.Servic return patchsvc.NewService(sPut.Config, sGet) } -func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service { +func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache, containerSource containercore.Source) *searchsvc.Service { ls := c.cfgObject.cfgLocalStorage.localStorage return searchsvc.New( @@ -377,6 +377,7 @@ func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Trav ), c.netMapSource, keyStorage, + containerSource, searchsvc.WithLogger(c.log), ) } diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 61cb6832d..3ec3f8f9b 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -143,7 +143,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo var selectPrm shard.SelectPrm selectPrm.SetFilters(fs) - selectPrm.SetContainerID(addr.Container()) + selectPrm.SetContainerID(addr.Container(), false) // doesn't matter for search by splitID var inhumePrm shard.InhumePrm if force { diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index f87679253..b4fbbd810 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -49,7 +49,7 @@ func TestStorageEngine_Inhume(t *testing.T) { _, err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) - addrs, err := Select(context.Background(), e, cnr, fs) + addrs, err := Select(context.Background(), e, cnr, false, fs) require.NoError(t, err) require.Empty(t, addrs) }) @@ -78,7 +78,7 @@ func TestStorageEngine_Inhume(t *testing.T) { _, err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) - addrs, err := Select(context.Background(), e, cnr, fs) + addrs, err := Select(context.Background(), e, cnr, false, fs) require.NoError(t, err) require.Empty(t, addrs) }) diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index 972a4f52a..a85891f0c 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -14,8 +14,9 @@ import ( // SelectPrm groups the parameters of Select operation. type SelectPrm struct { - cnr cid.ID - filters objectSDK.SearchFilters + cnr cid.ID + filters objectSDK.SearchFilters + indexedContainer bool } // SelectRes groups the resulting values of Select operation. @@ -24,8 +25,9 @@ type SelectRes struct { } // WithContainerID is a Select option to set the container id to search in. -func (p *SelectPrm) WithContainerID(cnr cid.ID) { +func (p *SelectPrm) WithContainerID(cnr cid.ID, indexedContainer bool) { p.cnr = cnr + p.indexedContainer = indexedContainer } // WithFilters is a Select option to set the object filters. @@ -67,7 +69,7 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, var outError error var shPrm shard.SelectPrm - shPrm.SetContainerID(prm.cnr) + shPrm.SetContainerID(prm.cnr, prm.indexedContainer) shPrm.SetFilters(prm.filters) e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { @@ -140,9 +142,9 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro } // Select selects objects from local storage using provided filters. -func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) { +func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, isIndexedContainer bool, fs objectSDK.SearchFilters) ([]oid.Address, error) { var selectPrm SelectPrm - selectPrm.WithContainerID(cnr) + selectPrm.WithContainerID(cnr, isIndexedContainer) selectPrm.WithFilters(fs) res, err := storage.Select(ctx, selectPrm) diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go index 21fcce415..ea0a9e74e 100644 --- a/pkg/local_object_storage/engine/tree_test.go +++ b/pkg/local_object_storage/engine/tree_test.go @@ -50,7 +50,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { b.Run("search", func(b *testing.B) { var prm SelectPrm - prm.WithContainerID(cid) + prm.WithContainerID(cid, true) var fs objectSDK.SearchFilters fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual) diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go index 1615f5fbe..184ca9b71 100644 --- a/pkg/local_object_storage/shard/select.go +++ b/pkg/local_object_storage/shard/select.go @@ -15,8 +15,9 @@ import ( // SelectPrm groups the parameters of Select operation. type SelectPrm struct { - cnr cid.ID - filters objectSDK.SearchFilters + cnr cid.ID + filters objectSDK.SearchFilters + isIndexedContainer bool } // SelectRes groups the resulting values of Select operation. @@ -25,8 +26,9 @@ type SelectRes struct { } // SetContainerID is a Select option to set the container id to search in. -func (p *SelectPrm) SetContainerID(cnr cid.ID) { +func (p *SelectPrm) SetContainerID(cnr cid.ID, isIndexedContainer bool) { p.cnr = cnr + p.isIndexedContainer = isIndexedContainer } // SetFilters is a Select option to set the object filters. @@ -61,6 +63,7 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) { var selectPrm meta.SelectPrm selectPrm.SetFilters(prm.filters) selectPrm.SetContainerID(prm.cnr) + selectPrm.SetUseAttributeIndex(prm.isIndexedContainer) mRes, err := s.metaBase.Select(ctx, selectPrm) if err != nil { diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go index d70574156..39259b0ca 100644 --- a/pkg/services/object/search/container.go +++ b/pkg/services/object/search/container.go @@ -8,6 +8,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "go.uber.org/zap" ) @@ -112,3 +113,12 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { return nil } + +func (exec *execCtx) getContainer() (containerSDK.Container, error) { + cnrID := exec.containerID() + cnr, err := exec.svc.containerSource.Get(cnrID) + if err != nil { + return containerSDK.Container{}, err + } + return cnr.Value, nil +} diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go index cc388c1b2..7700f78d8 100644 --- a/pkg/services/object/search/service.go +++ b/pkg/services/object/search/service.go @@ -54,6 +54,8 @@ type cfg struct { } keyStore *util.KeyStorage + + containerSource container.Source } // New creates, initializes and returns utility serving @@ -63,6 +65,7 @@ func New(e *engine.StorageEngine, tg *util.TraverserGenerator, ns netmap.Source, ks *util.KeyStorage, + cs container.Source, opts ...Option, ) *Service { c := &cfg{ @@ -76,6 +79,7 @@ func New(e *engine.StorageEngine, traverserGenerator: tg, currentEpochReceiver: ns, keyStore: ks, + containerSource: cs, } for i := range opts { diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go index 67b6c0d01..910384a0b 100644 --- a/pkg/services/object/search/util.go +++ b/pkg/services/object/search/util.go @@ -5,6 +5,7 @@ import ( "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" @@ -112,9 +113,13 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c } func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) { + cnr, err := exec.getContainer() + if err != nil { + return nil, err + } var selectPrm engine.SelectPrm selectPrm.WithFilters(exec.searchFilters()) - selectPrm.WithContainerID(exec.containerID()) + selectPrm.WithContainerID(exec.containerID(), container.IsIndexedContainer(cnr)) r, err := e.storage.Select(ctx, selectPrm) if err != nil { From 3da168f8cf96cb342c4cfe753e38c8a0bd7ad0a4 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 2 Oct 2024 16:46:39 +0300 Subject: [PATCH 069/591] [#1412] shard: Resolve container is indexed on metabase resync Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 23 +++++++++++++++++++++-- internal/logs/logs.go | 1 + pkg/local_object_storage/shard/control.go | 17 +++++++++++++++++ pkg/local_object_storage/shard/shard.go | 10 ++++++++++ 4 files changed, 49 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 4ad9ec6c6..3c7e310b4 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1058,7 +1058,9 @@ func initLocalStorage(ctx context.Context, c *cfg) { var shardsAttached int for _, optsWithMeta := range c.shardOpts(ctx) { - id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts, shard.WithTombstoneSource(c.createTombstoneSource()))...) + id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts, + shard.WithTombstoneSource(c.createTombstoneSource()), + shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...) if err != nil { c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err)) } else { @@ -1313,7 +1315,10 @@ func (c *cfg) reloadConfig(ctx context.Context) { var rcfg engine.ReConfiguration for _, optsWithID := range c.shardOpts(ctx) { - rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, shard.WithTombstoneSource(c.createTombstoneSource()))) + rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, + shard.WithTombstoneSource(c.createTombstoneSource()), + shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)), + )) } err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg) @@ -1414,6 +1419,20 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker { return tombstoneSource } +func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider { + return container.NewInfoProvider(func() (container.Source, error) { + // threadsafe: called on init or on sighup when morph initialized + if c.cfgMorph.client == nil { + initMorphComponents(ctx, c) + } + cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary()) + if err != nil { + return nil, err + } + return containerClient.AsContainerSource(cc), nil + }) +} + func (c *cfg) shutdown() { old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN) if old == control.HealthStatus_SHUTTING_DOWN { diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 7aef6873e..1ae4f0d3f 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -249,6 +249,7 @@ const ( ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode" ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode" ShardCouldNotUnmarshalObject = "could not unmarshal object" + ShardSkipObjectFromResyncContainerDeleted = "object skipped from metabase resync: container deleted" ShardCouldNotCloseShardComponent = "could not close shard component" ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode" ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode" diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index de881654a..4f9f25608 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -275,6 +276,21 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, return nil } + hasIndexedAttribute := slices.IndexFunc(obj.Attributes(), func(attr objectSDK.Attribute) bool { return meta.IsAtrributeIndexed(attr.Key()) }) > 0 + + var isIndexedContainer bool + if hasIndexedAttribute { + info, err := s.containerInfo.Info(addr.Container()) + if err != nil { + return err + } + if info.Removed { + s.log.Debug(logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr)) + return nil + } + isIndexedContainer = info.Indexed + } + var err error switch obj.Type() { case objectSDK.TypeTombstone: @@ -290,6 +306,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, var mPrm meta.PutPrm mPrm.SetObject(obj) mPrm.SetStorageID(descriptor) + mPrm.SetIndexAttributes(hasIndexedAttribute && isIndexedContainer) _, err = s.metaBase.Put(ctx, mPrm) if err != nil && !client.IsErrObjectAlreadyRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) { diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index d7e723733..413bfd2f7 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -7,6 +7,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" @@ -95,6 +96,8 @@ type cfg struct { metricsWriter MetricsWriter reportErrorFunc func(selfID string, message string, err error) + + containerInfo container.InfoProvider } func defaultCfg() *cfg { @@ -358,6 +361,13 @@ func WithZeroCountCallback(cb EmptyContainersCallback) Option { } } +// WithContainerInfoProvider returns option to set container info provider. +func WithContainerInfoProvider(containerInfo container.InfoProvider) Option { + return func(c *cfg) { + c.containerInfo = containerInfo + } +} + func (s *Shard) fillInfo() { s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo() s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo() From 8093e145b316762977aff5b2c8babda64ae7283b Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 3 Oct 2024 11:06:31 +0300 Subject: [PATCH 070/591] [#1412] adm: Resolve container type by metabase upgrade Signed-off-by: Dmitrii Stepanov --- .../internal/modules/metabase/upgrade.go | 83 +++++++++++++---- pkg/local_object_storage/metabase/upgrade.go | 93 +++++++++++++++++-- .../metabase/upgrade_test.go | 21 +++-- pkg/local_object_storage/metabase/util.go | 15 +++ 4 files changed, 179 insertions(+), 33 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go index 96cb62f10..00b30c9b2 100644 --- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go +++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go @@ -1,6 +1,7 @@ package metabase import ( + "context" "errors" "fmt" "sync" @@ -10,19 +11,24 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" + morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" + nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + morphcontainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" "github.com/spf13/cobra" "golang.org/x/sync/errgroup" ) const ( - pathFlag = "path" noCompactFlag = "no-compact" ) -var errNoPathsFound = errors.New("no metabase paths found") - -var path string +var ( + errNoPathsFound = errors.New("no metabase paths found") + errNoMorphEndpointsFound = errors.New("no morph endpoints found") +) var UpgradeCmd = &cobra.Command{ Use: "upgrade", @@ -39,17 +45,10 @@ func upgrade(cmd *cobra.Command, _ []string) error { if err != nil { return err } - noCompact, _ := cmd.Flags().GetBool(noCompactFlag) - var paths []string - if path != "" { - paths = append(paths, path) - } appCfg := config.New(configFile, configDir, config.EnvPrefix) - if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error { - paths = append(paths, sc.Metabase().Path()) - return nil - }); err != nil { - return fmt.Errorf("failed to get metabase paths: %w", err) + paths, err := getMetabasePaths(appCfg) + if err != nil { + return err } if len(paths) == 0 { return errNoPathsFound @@ -58,6 +57,16 @@ func upgrade(cmd *cobra.Command, _ []string) error { for i, path := range paths { cmd.Println(i+1, ":", path) } + mc, err := createMorphClient(cmd.Context(), appCfg) + if err != nil { + return err + } + defer mc.Close() + civ, err := createContainerInfoProvider(mc) + if err != nil { + return err + } + noCompact, _ := cmd.Flags().GetBool(noCompactFlag) result := make(map[string]bool) var resultGuard sync.Mutex eg, ctx := errgroup.WithContext(cmd.Context()) @@ -65,7 +74,7 @@ func upgrade(cmd *cobra.Command, _ []string) error { eg.Go(func() error { var success bool cmd.Println("upgrading metabase", path, "...") - if err := meta.Upgrade(ctx, path, !noCompact, func(a ...any) { + if err := meta.Upgrade(ctx, path, !noCompact, civ, func(a ...any) { cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...) }); err != nil { cmd.Println("error: failed to upgrade metabase", path, ":", err) @@ -92,8 +101,50 @@ func upgrade(cmd *cobra.Command, _ []string) error { return nil } +func getMetabasePaths(appCfg *config.Config) ([]string, error) { + var paths []string + if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error { + paths = append(paths, sc.Metabase().Path()) + return nil + }); err != nil { + return nil, fmt.Errorf("get metabase paths: %w", err) + } + return paths, nil +} + +func createMorphClient(ctx context.Context, appCfg *config.Config) (*client.Client, error) { + addresses := morphconfig.RPCEndpoint(appCfg) + if len(addresses) == 0 { + return nil, errNoMorphEndpointsFound + } + key := nodeconfig.Key(appCfg) + cli, err := client.New(ctx, + key, + client.WithDialTimeout(morphconfig.DialTimeout(appCfg)), + client.WithEndpoints(addresses...), + client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)), + ) + if err != nil { + return nil, fmt.Errorf("create morph client:%w", err) + } + return cli, nil +} + +func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, error) { + sh, err := cli.NNSContractAddress(client.NNSContainerContractName) + if err != nil { + return nil, fmt.Errorf("resolve container contract hash: %w", err) + } + cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary()) + if err != nil { + return nil, fmt.Errorf("create morph container client: %w", err) + } + return container.NewInfoProvider(func() (container.Source, error) { + return morphcontainer.AsContainerSource(cc), nil + }), nil +} + func initUpgradeCommand() { flags := UpgradeCmd.Flags() - flags.StringVar(&path, pathFlag, "", "Path to metabase file") flags.Bool(noCompactFlag, false, "Do not compact upgraded metabase file") } diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go index b5de430dc..f2a0107a1 100644 --- a/pkg/local_object_storage/metabase/upgrade.go +++ b/pkg/local_object_storage/metabase/upgrade.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/binary" + "encoding/hex" "errors" "fmt" "os" @@ -12,6 +13,7 @@ import ( "time" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" @@ -25,15 +27,15 @@ const ( upgradeTimeout = 1 * time.Second ) -var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, log func(a ...any)) error{ +var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error{ 2: upgradeFromV2ToV3, - 3: func(_ context.Context, _ *bbolt.DB, log func(a ...any)) error { + 3: func(_ context.Context, _ *bbolt.DB, _ container.InfoProvider, log func(a ...any)) error { log("metabase already upgraded") return nil }, } -func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any)) error { +func Upgrade(ctx context.Context, path string, compact bool, cs container.InfoProvider, log func(a ...any)) error { if _, err := os.Stat(path); err != nil { return fmt.Errorf("check metabase existence: %w", err) } @@ -61,7 +63,7 @@ func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any)) }); err != nil { return fmt.Errorf("set upgrade key %w", err) } - if err := updater(ctx, db, log); err != nil { + if err := updater(ctx, db, cs, log); err != nil { return fmt.Errorf("update metabase schema: %w", err) } if err := db.Update(func(tx *bbolt.Tx) error { @@ -113,11 +115,11 @@ func compactDB(db *bbolt.DB) error { return nil } -func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, log func(a ...any)) error { +func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error { if err := createExpirationEpochBuckets(ctx, db, log); err != nil { return err } - if err := dropUserAttributes(ctx, db, log); err != nil { + if err := dropUserAttributes(ctx, db, cs, log); err != nil { return err } if err := dropOwnerIDIndex(ctx, db, log); err != nil { @@ -323,10 +325,81 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i return nil } -func dropUserAttributes(ctx context.Context, db *bbolt.DB, log func(a ...any)) error { - return dropBucketsByPrefix(ctx, db, []byte{userAttributePrefix}, func(a ...any) { - log(append([]any{"user attributes:"}, a...)...) - }) +func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error { + log("deleting user attribute buckets...") + const batch = 1000 + prefix := []byte{userAttributePrefix} + last := prefix + var count uint64 + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + var keys [][]byte + if err := db.View(func(tx *bbolt.Tx) error { + c := tx.Cursor() + for k, _ := c.Seek(last); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() { + if bytes.Equal(last, k) { + continue + } + keys = append(keys, bytes.Clone(k)) + } + return nil + }); err != nil { + log("deleting user attribute buckets completed with an error:", err) + return err + } + if len(keys) == 0 { + log("deleting user attribute buckets completed successfully, deleted", count, "buckets") + return nil + } + last = keys[len(keys)-1] + keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs) + if err != nil { + return err + } + if err := db.Update(func(tx *bbolt.Tx) error { + for _, k := range keysToDrop { + if err := tx.DeleteBucket(k); err != nil { + return err + } + } + return nil + }); err != nil { + log("deleting buckets completed with an error:", err) + return err + } + count += uint64(len(keysToDrop)) + log("deleted", count, "buckets") + } +} + +func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([][]byte, error) { + var keysToDrop [][]byte + for _, key := range keys { + attr, ok := attributeFromAttributeBucket(key) + if !ok { + return nil, fmt.Errorf("failed to parse attribute key from user attribute bucket key %s", hex.EncodeToString(key)) + } + if !IsAtrributeIndexed(attr) { + keysToDrop = append(keysToDrop, key) + continue + } + contID, ok := cidFromAttributeBucket(key) + if !ok { + return nil, fmt.Errorf("failed to parse container ID from user attribute bucket key %s", hex.EncodeToString(key)) + } + info, err := cs.Info(contID) + if err != nil { + return nil, err + } + if info.Removed || !info.Indexed { + keysToDrop = append(keysToDrop, key) + } + } + return keysToDrop, nil } func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error { diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go index 3797de0a4..9c525291a 100644 --- a/pkg/local_object_storage/metabase/upgrade_test.go +++ b/pkg/local_object_storage/metabase/upgrade_test.go @@ -12,6 +12,7 @@ import ( "time" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -35,13 +36,19 @@ func TestUpgradeV2ToV3(t *testing.T) { require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) require.ErrorIs(t, db.Init(), ErrOutdatedVersion) require.NoError(t, db.Close()) - require.NoError(t, Upgrade(context.Background(), path, true, t.Log)) + require.NoError(t, Upgrade(context.Background(), path, true, &testContainerInfoProvider{}, t.Log)) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) require.NoError(t, db.Init()) require.NoError(t, db.Close()) fmt.Println() } +type testContainerInfoProvider struct{} + +func (p *testContainerInfoProvider) Info(id cid.ID) (container.Info, error) { + return container.Info{}, nil +} + func createTempCopy(t *testing.T, path string) string { src, err := os.Open(path) require.NoError(t, err) @@ -95,7 +102,7 @@ func TestGenerateMetabaseFile(t *testing.T) { i := i eg.Go(func() error { obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) - testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10)) + testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10)) _, err := db.Put(ctx, PutPrm{ obj: obj, @@ -118,8 +125,8 @@ func TestGenerateMetabaseFile(t *testing.T) { child.SetParent(parent) idParent, _ := parent.ID() child.SetParentID(idParent) - testutil.AddAttribute(child, "FileName", strconv.FormatInt(int64(i%maxFilename), 10)) - testutil.AddAttribute(parent, "FileName", strconv.FormatInt(int64(i%maxFilename), 10)) + testutil.AddAttribute(child, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) + testutil.AddAttribute(parent, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) testutil.AddAttribute(child, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10)) testutil.AddAttribute(parent, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10)) _, err := db.Put(ctx, PutPrm{ @@ -138,7 +145,7 @@ func TestGenerateMetabaseFile(t *testing.T) { i := i eg.Go(func() error { obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) - testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10)) + testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) _, err := db.Put(ctx, PutPrm{ obj: obj, id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), @@ -160,7 +167,7 @@ func TestGenerateMetabaseFile(t *testing.T) { i := i eg.Go(func() error { obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) - testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10)) + testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) _, err := db.Put(ctx, PutPrm{ obj: obj, id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), @@ -190,7 +197,7 @@ func TestGenerateMetabaseFile(t *testing.T) { i := i eg.Go(func() error { obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) - testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10)) + testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10)) _, err := db.Put(ctx, PutPrm{ obj: obj, id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)), diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go index 4679de332..0a2f91a47 100644 --- a/pkg/local_object_storage/metabase/util.go +++ b/pkg/local_object_storage/metabase/util.go @@ -176,6 +176,21 @@ func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte { return append(key[:bucketKeySize], attributeKey...) } +func cidFromAttributeBucket(bucketName []byte) (cid.ID, bool) { + if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix { + return cid.ID{}, false + } + var result cid.ID + return result, result.Decode(bucketName[1:bucketKeySize]) == nil +} + +func attributeFromAttributeBucket(bucketName []byte) (string, bool) { + if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix { + return "", false + } + return string(bucketName[bucketKeySize:]), true +} + // rootBucketName returns _root. func rootBucketName(cnr cid.ID, key []byte) []byte { return bucketName(cnr, rootPrefix, key) From 87f4b934d1cca9a671dc93fcc4cdb5861be35915 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 3 Oct 2024 17:57:21 +0300 Subject: [PATCH 071/591] [#1412] metabase: Run bucket drop steps on upgrade concurrently Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/metabase/upgrade.go | 22 ++++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go index f2a0107a1..a997b90a0 100644 --- a/pkg/local_object_storage/metabase/upgrade.go +++ b/pkg/local_object_storage/metabase/upgrade.go @@ -119,13 +119,17 @@ func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, cs container.InfoProvi if err := createExpirationEpochBuckets(ctx, db, log); err != nil { return err } - if err := dropUserAttributes(ctx, db, cs, log); err != nil { - return err - } - if err := dropOwnerIDIndex(ctx, db, log); err != nil { - return err - } - if err := dropPayloadChecksumIndex(ctx, db, log); err != nil { + eg, ctx := errgroup.WithContext(ctx) + eg.Go(func() error { + return dropUserAttributes(ctx, db, cs, log) + }) + eg.Go(func() error { + return dropOwnerIDIndex(ctx, db, log) + }) + eg.Go(func() error { + return dropPayloadChecksumIndex(ctx, db, log) + }) + if err := eg.Wait(); err != nil { return err } return db.Update(func(tx *bbolt.Tx) error { @@ -360,7 +364,7 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv if err != nil { return err } - if err := db.Update(func(tx *bbolt.Tx) error { + if err := db.Batch(func(tx *bbolt.Tx) error { for _, k := range keysToDrop { if err := tx.DeleteBucket(k); err != nil { return err @@ -439,7 +443,7 @@ func dropBucketsByPrefix(ctx context.Context, db *bbolt.DB, prefix []byte, log f log("deleting buckets completed successfully, deleted", count, "buckets") return nil } - if err := db.Update(func(tx *bbolt.Tx) error { + if err := db.Batch(func(tx *bbolt.Tx) error { for _, k := range keys { if err := tx.DeleteBucket(k); err != nil { return err From fe9f664b577f1b51797e375ef736977ed61d9757 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 4 Oct 2024 10:49:39 +0300 Subject: [PATCH 072/591] [#1412] metabase: Drop empty user attribute buckets on upgrade Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/metabase/upgrade.go | 166 +++++++++++++++++-- 1 file changed, 154 insertions(+), 12 deletions(-) diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go index a997b90a0..1f2c7956b 100644 --- a/pkg/local_object_storage/metabase/upgrade.go +++ b/pkg/local_object_storage/metabase/upgrade.go @@ -360,26 +360,40 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv return nil } last = keys[len(keys)-1] - keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs) + cnt, err := dropNonIndexedUserAttributeBuckets(db, cs, keys) if err != nil { + log("deleting user attribute buckets completed with an error:", err) return err } - if err := db.Batch(func(tx *bbolt.Tx) error { - for _, k := range keysToDrop { - if err := tx.DeleteBucket(k); err != nil { - return err - } - } - return nil - }); err != nil { - log("deleting buckets completed with an error:", err) + count += cnt + cnt, err = dropEmptyUserAttributeBuckets(ctx, db, keys) + if err != nil { + log("deleting user attribute buckets completed with an error:", err) return err } - count += uint64(len(keysToDrop)) - log("deleted", count, "buckets") + count += cnt + log("deleted", count, "user attribute buckets") } } +func dropNonIndexedUserAttributeBuckets(db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) { + keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs) + if err != nil { + return 0, fmt.Errorf("select non indexed user attributes: %w", err) + } + if err := db.Batch(func(tx *bbolt.Tx) error { + for _, k := range keysToDrop { + if err := tx.DeleteBucket(k); err != nil { + return err + } + } + return nil + }); err != nil { + return 0, fmt.Errorf("drop non indexed user attributes: %w", err) + } + return uint64(len(keysToDrop)), nil +} + func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([][]byte, error) { var keysToDrop [][]byte for _, key := range keys { @@ -406,6 +420,134 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([] return keysToDrop, nil } +func dropEmptyUserAttributeBuckets(ctx context.Context, db *bbolt.DB, keys [][]byte) (uint64, error) { + var dropBuckets [][]byte + for _, key := range keys { + select { + case <-ctx.Done(): + return 0, ctx.Err() + default: + } + + if err := dropEmptyNestedBuckets(ctx, db, key); err != nil { + return 0, err + } + + empty, exists, err := bucketIsEmpty(db, key) + if err != nil { + return 0, err + } + if empty && exists { + dropBuckets = append(dropBuckets, key) + } + } + if len(dropBuckets) == 0 { + return 0, nil + } + if err := db.Batch(func(tx *bbolt.Tx) error { + for _, key := range dropBuckets { + if err := tx.DeleteBucket(key); err != nil { + return err + } + } + return nil + }); err != nil { + return 0, fmt.Errorf("drop empty user attributes buckets: %w", err) + } + return uint64(len(dropBuckets)), nil +} + +func bucketIsEmpty(db *bbolt.DB, bucketKey []byte) (bool, bool, error) { + var empty bool + var exists bool + if err := db.View(func(tx *bbolt.Tx) error { + b := tx.Bucket(bucketKey) + if b == nil { + return nil + } + exists = true + empty = !hasAnyItem(b) + return nil + }); err != nil { + return false, false, fmt.Errorf("bucket empty check: %w", err) + } + return empty, exists, nil +} + +func dropEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey []byte) error { + var last []byte + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var dropBuckets [][]byte + var err error + dropBuckets, last, err = selectEmptyNestedBuckets(ctx, db, rootBucketKey, last) + if err != nil { + return fmt.Errorf("select empty nested buckets: %w", err) + } + if len(dropBuckets) == 0 { + return nil + } + + if err := db.Batch(func(tx *bbolt.Tx) error { + rootBucket := tx.Bucket(rootBucketKey) + if rootBucket == nil { + return nil + } + for _, sb := range dropBuckets { + if err := rootBucket.DeleteBucket(sb); err != nil { + return err + } + } + return nil + }); err != nil { + return fmt.Errorf("drop empty nested buckets: %w", err) + } + } +} + +func selectEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey, last []byte) ([][]byte, []byte, error) { + const batchSize = 1000 + var result [][]byte + if err := db.View(func(tx *bbolt.Tx) error { + rootBucket := tx.Bucket(rootBucketKey) + if rootBucket == nil { + return nil + } + c := rootBucket.Cursor() + for k, v := c.Seek(last); k != nil && len(result) < batchSize; k, v = c.Next() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if bytes.Equal(last, k) { + continue + } + last = bytes.Clone(k) + if v != nil { // record + continue + } + nestedBucket := rootBucket.Bucket(k) + if nestedBucket == nil { + continue + } + if !hasAnyItem(nestedBucket) { + result = append(result, bytes.Clone(k)) + } + } + return nil + }); err != nil { + return nil, nil, err + } + return result, last, nil +} + func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error { return dropBucketsByPrefix(ctx, db, []byte{ownerPrefix}, func(a ...any) { log(append([]any{"owner ID index:"}, a...)...) From c065d55ca31c18fa48c8d8a173237095179be732 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 7 Oct 2024 17:19:04 +0300 Subject: [PATCH 073/591] [#1412] metabase: Drop logging inside transaction This could lead to hang the db. Signed-off-by: Dmitrii Stepanov --- internal/logs/logs.go | 6 ---- pkg/local_object_storage/metabase/select.go | 31 ++------------------- 2 files changed, 2 insertions(+), 35 deletions(-) diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 1ae4f0d3f..84bd023f1 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -226,12 +226,6 @@ const ( EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully" EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error" EngineObjectIsMovedToAnotherShard = "object is moved to another shard" - MetabaseMissingMatcher = "missing matcher" - MetabaseErrorInFKBTSelection = "error in FKBT selection" - MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf" - MetabaseUnknownOperation = "unknown operation" - MetabaseCantIterateOverTheBucket = "can't iterate over the bucket" - MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets" MetabaseCreatedDirectoryForMetabase = "created directory for Metabase" MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase" MetabaseCheckingMetabaseVersion = "checking metabase version" diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index 88ef7d5a4..41f05b756 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -9,7 +9,6 @@ import ( "time" v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -18,7 +17,6 @@ import ( "go.etcd.io/bbolt" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" ) type ( @@ -288,8 +286,6 @@ func (db *DB) selectFromFKBT( ) { // matchFunc, ok := db.matchers[f.Operation()] if !ok { - db.log.Debug(logs.MetabaseMissingMatcher, zap.Stringer("operation", f.Operation())) - return } @@ -298,7 +294,7 @@ func (db *DB) selectFromFKBT( return } - err := matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error { + _ = matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error { fkbtLeaf := fkbtRoot.Bucket(k) if fkbtLeaf == nil { return nil @@ -310,9 +306,6 @@ func (db *DB) selectFromFKBT( return nil }) }) - if err != nil { - db.log.Debug(logs.MetabaseErrorInFKBTSelection, zap.String("error", err.Error())) - } } // selectOutsideFKBT looks into all incl buckets to find list of addresses outside to add in @@ -377,24 +370,17 @@ func (db *DB) selectFromList( case objectSDK.MatchStringEqual: lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value()))) if err != nil { - db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, zap.String("error", err.Error())) return } default: fMatch, ok := db.matchers[op] if !ok { - db.log.Debug(logs.MetabaseUnknownOperation, zap.Uint32("operation", uint32(op))) - return } if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(_, val []byte) error { l, err := decodeList(val) if err != nil { - db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, - zap.String("error", err.Error()), - ) - return err } @@ -402,10 +388,6 @@ func (db *DB) selectFromList( return nil }); err != nil { - db.log.Debug(logs.MetabaseCantIterateOverTheBucket, - zap.String("error", err.Error()), - ) - return } } @@ -447,10 +429,6 @@ func (db *DB) selectObjectID( default: fMatch, ok := db.matchers[op] if !ok { - db.log.Debug(logs.MetabaseUnknownOperation, - zap.Uint32("operation", uint32(f.Operation())), - ) - return } @@ -461,18 +439,13 @@ func (db *DB) selectObjectID( return } - err := fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error { + _ = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error { var id oid.ID if err := id.Decode(k); err == nil { appendOID(id) } return nil }) - if err != nil { - db.log.Debug(logs.MetabaseCouldNotIterateOverTheBuckets, - zap.String("error", err.Error()), - ) - } } } } From 936ebbb8e5c1b1967e6fd0ec5ec50bd282c1f8bc Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Tue, 8 Oct 2024 18:39:52 +0300 Subject: [PATCH 074/591] [#1423] metabase: Hide `BucketName` form upper levels Signed-off-by: Anton Nikiforov --- pkg/local_object_storage/engine/evacuate.go | 12 +- pkg/local_object_storage/metabase/list.go | 104 ++++++++++-------- .../metabase/list_test.go | 57 ++++++++++ pkg/local_object_storage/shard/list.go | 32 +++--- 4 files changed, 143 insertions(+), 62 deletions(-) diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index 1baf237f9..c1b9276f3 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -435,7 +435,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context ) error { sh := shardsToEvacuate[shardID] var cntPrm shard.IterateOverContainersPrm - cntPrm.Handler = func(ctx context.Context, name []byte, cnt cid.ID) error { + cntPrm.Handler = func(ctx context.Context, objType objectSDK.Type, cnt cid.ID) error { select { case <-ctx.Done(): return context.Cause(ctx) @@ -455,8 +455,11 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context skip = e.isNotRepOne(c) } if skip { - countPrm := shard.CountAliveObjectsInBucketPrm{BucketName: name} - count, err := sh.CountAliveObjectsInBucket(ctx, countPrm) + countPrm := shard.CountAliveObjectsInContainerPrm{ + ObjectType: objType, + ContainerID: cnt, + } + count, err := sh.CountAliveObjectsInContainer(ctx, countPrm) if err != nil { return err } @@ -464,7 +467,8 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context return nil } var objPrm shard.IterateOverObjectsInContainerPrm - objPrm.BucketName = name + objPrm.ObjectType = objType + objPrm.ContainerID = cnt objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error { select { case <-ctx.Done(): diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index 74a529809..b007ef0da 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -65,21 +65,25 @@ func (l ListRes) Cursor() *Cursor { // IterateOverContainersPrm contains parameters for IterateOverContainers operation. type IterateOverContainersPrm struct { // Handler function executed upon containers in db. - Handler func(context.Context, []byte, cid.ID) error + Handler func(context.Context, objectSDK.Type, cid.ID) error } // IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation. type IterateOverObjectsInContainerPrm struct { - // BucketName container's bucket name. - BucketName []byte + // ObjectType type of objects to iterate over. + ObjectType objectSDK.Type + // ContainerID container for objects to iterate over. + ContainerID cid.ID // Handler function executed upon objects in db. Handler func(context.Context, *objectcore.Info) error } -// CountAliveObjectsInBucketPrm contains parameters for IterateOverObjectsInContainer operation. -type CountAliveObjectsInBucketPrm struct { - // BucketName container's bucket name. - BucketName []byte +// CountAliveObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation. +type CountAliveObjectsInContainerPrm struct { + // ObjectType type of objects to iterate over. + ObjectType objectSDK.Type + // ContainerID container for objects to iterate over. + ContainerID cid.ID } // ListWithCursor lists physical objects available in metabase starting from @@ -319,12 +323,20 @@ func (db *DB) iterateOverContainers(ctx context.Context, tx *bbolt.Tx, prm Itera if cidRaw == nil { continue } - - bktName := make([]byte, len(name)) - copy(bktName, name) var cnt cid.ID copy(cnt[:], containerID[:]) - err := prm.Handler(ctx, bktName, cnt) + var objType objectSDK.Type + switch prefix[0] { + case primaryPrefix: + objType = objectSDK.TypeRegular + case lockersPrefix: + objType = objectSDK.TypeLock + case tombstonePrefix: + objType = objectSDK.TypeTombstone + default: + continue + } + err := prm.Handler(ctx, objType, cnt) if err != nil { return err } @@ -356,22 +368,29 @@ func (db *DB) IterateOverObjectsInContainer(ctx context.Context, prm IterateOver return ErrDegradedMode } - var containerID cid.ID - cidRaw, prefix := parseContainerIDWithPrefix(&containerID, prm.BucketName) - if cidRaw == nil { - return nil - } err := db.boltDB.View(func(tx *bbolt.Tx) error { - return db.iterateOverObjectsInContainer(ctx, tx, cidRaw, prefix, containerID, prm) + return db.iterateOverObjectsInContainer(ctx, tx, prm) }) success = err == nil return metaerr.Wrap(err) } -func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, cidRaw []byte, prefix byte, - containerID cid.ID, prm IterateOverObjectsInContainerPrm, -) error { - bkt := tx.Bucket(prm.BucketName) +func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, prm IterateOverObjectsInContainerPrm) error { + var prefix byte + switch prm.ObjectType { + case objectSDK.TypeRegular: + prefix = primaryPrefix + case objectSDK.TypeLock: + prefix = lockersPrefix + case objectSDK.TypeTombstone: + prefix = tombstonePrefix + default: + return nil + } + bucketName := []byte{prefix} + bucketName = append(bucketName, prm.ContainerID[:]...) + + bkt := tx.Bucket(bucketName) if bkt == nil { return nil } @@ -380,32 +399,19 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, c c := bkt.Cursor() k, v := c.First() - var objType objectSDK.Type - - switch prefix { - case primaryPrefix: - objType = objectSDK.TypeRegular - case lockersPrefix: - objType = objectSDK.TypeLock - case tombstonePrefix: - objType = objectSDK.TypeTombstone - default: - return nil - } - for ; k != nil; k, v = c.Next() { var obj oid.ID if err := obj.Decode(k); err != nil { break } - if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 { + if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 { continue } var isLinkingObj bool var ecInfo *objectcore.ECInfo - if objType == objectSDK.TypeRegular { + if prm.ObjectType == objectSDK.TypeRegular { var o objectSDK.Object if err := o.Unmarshal(v); err != nil { return err @@ -422,9 +428,9 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, c } var a oid.Address - a.SetContainer(containerID) + a.SetContainer(prm.ContainerID) a.SetObject(obj) - objInfo := objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo} + objInfo := objectcore.Info{Address: a, Type: prm.ObjectType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo} err := prm.Handler(ctx, &objInfo) if err != nil { return err @@ -433,8 +439,8 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, c return nil } -// CountAliveObjectsInBucket count objects in bucket which aren't in graveyard or garbage. -func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjectsInBucketPrm) (uint64, error) { +// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage. +func (db *DB) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) { var ( startedAt = time.Now() success = false @@ -452,14 +458,22 @@ func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjec return 0, ErrDegradedMode } - if len(prm.BucketName) != bucketKeySize { + var prefix byte + switch prm.ObjectType { + case objectSDK.TypeRegular: + prefix = primaryPrefix + case objectSDK.TypeLock: + prefix = lockersPrefix + case objectSDK.TypeTombstone: + prefix = tombstonePrefix + default: return 0, nil } - - cidRaw := prm.BucketName[1:bucketKeySize] + bucketName := []byte{prefix} + bucketName = append(bucketName, prm.ContainerID[:]...) var count uint64 err := db.boltDB.View(func(tx *bbolt.Tx) error { - bkt := tx.Bucket(prm.BucketName) + bkt := tx.Bucket(bucketName) if bkt == nil { return nil } @@ -468,7 +482,7 @@ func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjec c := bkt.Cursor() k, _ := c.First() for ; k != nil; k, _ = c.Next() { - if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 { + if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 { continue } count++ diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index bc1726bd6..203802ec0 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -8,6 +8,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" @@ -219,3 +220,59 @@ func metaListWithCursor(db *meta.DB, count uint32, cursor *meta.Cursor) ([]objec r, err := db.ListWithCursor(context.Background(), listPrm) return r.AddressList(), r.Cursor(), err } + +func TestIterateOver(t *testing.T) { + t.Parallel() + + db := newDB(t) + defer func() { require.NoError(t, db.Close()) }() + + const total uint64 = 5 + for _, typ := range []objectSDK.Type{objectSDK.TypeRegular, objectSDK.TypeTombstone, objectSDK.TypeLock} { + var expected []*objectSDK.Object + // fill metabase with objects + cid := cidtest.ID() + for range total { + obj := testutil.GenerateObjectWithCID(cid) + obj.SetType(typ) + err := metaPut(db, obj, nil) + require.NoError(t, err) + expected = append(expected, obj) + } + + var metaIter meta.IterateOverObjectsInContainerPrm + var count uint64 + metaIter.Handler = func(context.Context, *object.Info) error { + count++ + return nil + } + metaIter.ContainerID = cid + metaIter.ObjectType = typ + err := db.IterateOverObjectsInContainer(context.Background(), metaIter) + require.NoError(t, err) + require.Equal(t, total, count) + + var metaCount meta.CountAliveObjectsInContainerPrm + metaCount.ContainerID = cid + metaCount.ObjectType = typ + res, err := db.CountAliveObjectsInContainer(context.Background(), metaCount) + require.NoError(t, err) + require.Equal(t, res, total) + + err = metaDelete(db, object.AddressOf(expected[0]), object.AddressOf(expected[1])) + require.NoError(t, err) + + res, err = db.CountAliveObjectsInContainer(context.Background(), metaCount) + require.NoError(t, err) + require.Equal(t, uint64(3), res) + } + var count int + var metaPrm meta.IterateOverContainersPrm + metaPrm.Handler = func(context.Context, objectSDK.Type, cidSDK.ID) error { + count++ + return nil + } + err := db.IterateOverContainers(context.Background(), metaPrm) + require.NoError(t, err) + require.Equal(t, 3, count) +} diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index f5d633b77..8d09974b8 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -37,21 +37,25 @@ func (r ListContainersRes) Containers() []cid.ID { // IterateOverContainersPrm contains parameters for IterateOverContainers operation. type IterateOverContainersPrm struct { // Handler function executed upon containers in db. - Handler func(context.Context, []byte, cid.ID) error + Handler func(context.Context, objectSDK.Type, cid.ID) error } // IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation. type IterateOverObjectsInContainerPrm struct { - // BucketName container's bucket name. - BucketName []byte + // ObjectType type of objects to iterate over. + ObjectType objectSDK.Type + // ContainerID container for objects to iterate over. + ContainerID cid.ID // Handler function executed upon objects in db. Handler func(context.Context, *objectcore.Info) error } -// CountAliveObjectsInBucketPrm contains parameters for CountAliveObjectsInBucket operation. -type CountAliveObjectsInBucketPrm struct { - // BucketName container's bucket name. - BucketName []byte +// CountAliveObjectsInContainerPrm contains parameters for CountAliveObjectsInContainer operation. +type CountAliveObjectsInContainerPrm struct { + // ObjectType type of objects to iterate over. + ObjectType objectSDK.Type + // ContainerID container for objects to iterate over. + ContainerID cid.ID } // ListWithCursorPrm contains parameters for ListWithCursor operation. @@ -226,7 +230,8 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv } var metaPrm meta.IterateOverObjectsInContainerPrm - metaPrm.BucketName = prm.BucketName + metaPrm.ContainerID = prm.ContainerID + metaPrm.ObjectType = prm.ObjectType metaPrm.Handler = prm.Handler err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) if err != nil { @@ -236,8 +241,8 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv return nil } -// CountAliveObjectsInBucket count objects in bucket which aren't in graveyard or garbage. -func (s *Shard) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjectsInBucketPrm) (uint64, error) { +// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage. +func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) { _, span := tracing.StartSpanFromContext(ctx, "shard.CountAliveObjectsInBucket") defer span.End() @@ -248,9 +253,10 @@ func (s *Shard) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObj return 0, ErrDegradedMode } - var metaPrm meta.CountAliveObjectsInBucketPrm - metaPrm.BucketName = prm.BucketName - count, err := s.metaBase.CountAliveObjectsInBucket(ctx, metaPrm) + var metaPrm meta.CountAliveObjectsInContainerPrm + metaPrm.ObjectType = prm.ObjectType + metaPrm.ContainerID = prm.ContainerID + count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm) if err != nil { return 0, fmt.Errorf("could not count alive objects in bucket: %w", err) } From 4190fba86d575ba6c05aeb919ae5bc30572ecbd2 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 8 Oct 2024 16:44:34 +0300 Subject: [PATCH 075/591] [#1425] Remove SetEACL-related code Signed-off-by: Evgenii Stratonikov --- .../modules/morph/container/container.go | 8 -- internal/logs/logs.go | 3 - .../processors/container/handlers.go | 4 - .../processors/container/processor.go | 10 --- pkg/morph/client/container/client.go | 1 - pkg/morph/event/container/eacl.go | 51 ------------- pkg/morph/event/container/eacl_notary.go | 75 ------------------- pkg/services/container/morph/executor_test.go | 4 - 8 files changed, 156 deletions(-) delete mode 100644 pkg/morph/event/container/eacl.go delete mode 100644 pkg/morph/event/container/eacl_notary.go diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go index a66438975..e280bc634 100644 --- a/cmd/frostfs-adm/internal/modules/morph/container/container.go +++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go @@ -159,9 +159,6 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo if err != nil { return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err) } - if len(ea.Value) != 0 { - cnt.EACL = ea - } return cnt, nil } @@ -258,10 +255,6 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd func putContainer(bw *io.BufBinWriter, ch util.Uint160, cnt Container) { emit.AppCall(bw.BinWriter, ch, "put", callflag.All, cnt.Value, cnt.Signature, cnt.PublicKey, cnt.Token) - if ea := cnt.EACL; ea != nil { - emit.AppCall(bw.BinWriter, ch, "setEACL", callflag.All, - ea.Value, ea.Signature, ea.PublicKey, ea.Token) - } } func isContainerRestored(cmd *cobra.Command, wCtx *helper.InitializeContext, containerHash util.Uint160, bw *io.BufBinWriter, hashValue util.Uint256) (bool, error) { @@ -322,7 +315,6 @@ type Container struct { Signature []byte `json:"signature"` PublicKey []byte `json:"public_key"` Token []byte `json:"token"` - EACL *EACL `json:"eacl"` } // EACL represents extended ACL struct in contract storage. diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 84bd023f1..ca783a39d 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -308,9 +308,6 @@ const ( ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete" ContainerDeleteContainerCheckFailed = "delete container check failed" ContainerCouldNotApproveDeleteContainer = "could not approve delete container" - ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL" - ContainerSetEACLCheckFailed = "set EACL check failed" - ContainerCouldNotApproveSetEACL = "could not approve set EACL" FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config" FrostFSCantRelaySetConfigEvent = "can't relay set config event" FrostFSFrostfsWorkerPool = "frostfs worker pool" diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go index 8bb89abe2..a54f3c772 100644 --- a/pkg/innerring/processors/container/handlers.go +++ b/pkg/innerring/processors/container/handlers.go @@ -48,7 +48,3 @@ func (cp *Processor) handleDelete(ev event.Event) { zap.Int("capacity", cp.pool.Cap())) } } - -func (cp *Processor) handleSetEACL(_ event.Event) { - cp.log.Warn(logs.SkipDeprecatedNotification, zap.String("type", "set EACL")) -} diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go index 8fd9edfb8..a6fbdc707 100644 --- a/pkg/innerring/processors/container/processor.go +++ b/pkg/innerring/processors/container/processor.go @@ -157,11 +157,6 @@ func (cp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo { p.SetParser(containerEvent.ParseDeleteNotary) pp = append(pp, p) - // set EACL - p.SetRequestType(containerEvent.SetEACLNotaryEvent) - p.SetParser(containerEvent.ParseSetEACLNotary) - pp = append(pp, p) - return pp } @@ -190,10 +185,5 @@ func (cp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo { h.SetHandler(cp.handleDelete) hh = append(hh, h) - // set eACL - h.SetRequestType(containerEvent.SetEACLNotaryEvent) - h.SetHandler(cp.handleSetEACL) - hh = append(hh, h) - return hh } diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go index 9dd3a337b..fc892aafb 100644 --- a/pkg/morph/client/container/client.go +++ b/pkg/morph/client/container/client.go @@ -28,7 +28,6 @@ const ( listMethod = "list" containersOfMethod = "containersOf" eaclMethod = "eACL" - setEACLMethod = "setEACL" deletionInfoMethod = "deletionInfo" startEstimationMethod = "startContainerEstimation" diff --git a/pkg/morph/event/container/eacl.go b/pkg/morph/event/container/eacl.go deleted file mode 100644 index 4168d8842..000000000 --- a/pkg/morph/event/container/eacl.go +++ /dev/null @@ -1,51 +0,0 @@ -package container - -import ( - "github.com/nspcc-dev/neo-go/pkg/network/payload" -) - -// SetEACL represents structure of notification about -// modified eACL table coming from FrostFS Container contract. -type SetEACL struct { - TableValue []byte - SignatureValue []byte - PublicKeyValue []byte - TokenValue []byte - - // For notary notifications only. - // Contains raw transactions of notary request. - NotaryRequestValue *payload.P2PNotaryRequest -} - -// MorphEvent implements Neo:Morph Event interface. -func (SetEACL) MorphEvent() {} - -// Table returns returns eACL table in a binary FrostFS API format. -func (x SetEACL) Table() []byte { - return x.TableValue -} - -// Signature returns signature of the binary table. -func (x SetEACL) Signature() []byte { - return x.SignatureValue -} - -// PublicKey returns public keys of container -// owner in a binary format. -func (x SetEACL) PublicKey() []byte { - return x.PublicKeyValue -} - -// SessionToken returns binary token of the session -// within which the eACL was set. -func (x SetEACL) SessionToken() []byte { - return x.TokenValue -} - -// NotaryRequest returns raw notary request if notification -// was received via notary service. Otherwise, returns nil. -func (x SetEACL) NotaryRequest() *payload.P2PNotaryRequest { - return x.NotaryRequestValue -} - -const expectedItemNumEACL = 4 diff --git a/pkg/morph/event/container/eacl_notary.go b/pkg/morph/event/container/eacl_notary.go deleted file mode 100644 index a4fe7c966..000000000 --- a/pkg/morph/event/container/eacl_notary.go +++ /dev/null @@ -1,75 +0,0 @@ -package container - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/vm/opcode" -) - -func (x *SetEACL) setTable(v []byte) { - if v != nil { - x.TableValue = v - } -} - -func (x *SetEACL) setSignature(v []byte) { - if v != nil { - x.SignatureValue = v - } -} - -func (x *SetEACL) setPublicKey(v []byte) { - if v != nil { - x.PublicKeyValue = v - } -} - -func (x *SetEACL) setToken(v []byte) { - if v != nil { - x.TokenValue = v - } -} - -var setEACLFieldSetters = []func(*SetEACL, []byte){ - // order on stack is reversed - (*SetEACL).setToken, - (*SetEACL).setPublicKey, - (*SetEACL).setSignature, - (*SetEACL).setTable, -} - -const ( - // SetEACLNotaryEvent is method name for container EACL operations - // in `Container` contract. Is used as identificator for notary - // EACL changing requests. - SetEACLNotaryEvent = "setEACL" -) - -// ParseSetEACLNotary from NotaryEvent into container event structure. -func ParseSetEACLNotary(ne event.NotaryEvent) (event.Event, error) { - var ( - ev SetEACL - currentOp opcode.Opcode - ) - - fieldNum := 0 - - for _, op := range ne.Params() { - currentOp = op.Code() - - switch { - case opcode.PUSHDATA1 <= currentOp && currentOp <= opcode.PUSHDATA4: - if fieldNum == expectedItemNumEACL { - return nil, event.UnexpectedArgNumErr(SetEACLNotaryEvent) - } - - setEACLFieldSetters[fieldNum](&ev, op.Param()) - fieldNum++ - default: - return nil, event.UnexpectedOpcode(SetEACLNotaryEvent, op.Code()) - } - } - - ev.NotaryRequestValue = ne.Raw() - - return ev, nil -} diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go index 560c69232..c64310eb3 100644 --- a/pkg/services/container/morph/executor_test.go +++ b/pkg/services/container/morph/executor_test.go @@ -32,10 +32,6 @@ func (m mock) Delete(_ containerCore.RemovalWitness) error { return nil } -func (m mock) PutEACL(_ containerCore.EACL) error { - return nil -} - func TestInvalidToken(t *testing.T) { m := mock{} e := containerSvcMorph.NewExecutor(m, m) From cc5360a57851e27dd51d72cf0ff5fa6ac44aba2f Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 8 Oct 2024 16:46:41 +0300 Subject: [PATCH 076/591] [#1425] morph/event: Rename eacl_test.go Signed-off-by: Evgenii Stratonikov --- pkg/morph/event/container/{eacl_test.go => util_test.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pkg/morph/event/container/{eacl_test.go => util_test.go} (100%) diff --git a/pkg/morph/event/container/eacl_test.go b/pkg/morph/event/container/util_test.go similarity index 100% rename from pkg/morph/event/container/eacl_test.go rename to pkg/morph/event/container/util_test.go From 94302235d03ad147c9d42791de39f75aaa7fe7f9 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 9 Oct 2024 10:03:58 +0300 Subject: [PATCH 077/591] [#1425] adm: Remove eACL fetching from dump-containers Signed-off-by: Evgenii Stratonikov --- .../modules/morph/container/container.go | 62 +------------------ 1 file changed, 2 insertions(+), 60 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go index e280bc634..6f08d1655 100644 --- a/cmd/frostfs-adm/internal/modules/morph/container/container.go +++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go @@ -139,13 +139,12 @@ func dumpContainers(cmd *cobra.Command, _ []string) error { func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invoker, id []byte) (*Container, error) { bw.Reset() emit.AppCall(bw.BinWriter, ch, "get", callflag.All, id) - emit.AppCall(bw.BinWriter, ch, "eACL", callflag.All, id) res, err := inv.Run(bw.Bytes()) if err != nil { return nil, fmt.Errorf("can't get container info: %w", err) } - if len(res.Stack) != 2 { - return nil, fmt.Errorf("%w: expected 2 items on stack", errInvalidContainerResponse) + if len(res.Stack) != 1 { + return nil, fmt.Errorf("%w: expected 1 items on stack", errInvalidContainerResponse) } cnt := new(Container) @@ -154,11 +153,6 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err) } - ea := new(EACL) - err = ea.FromStackItem(res.Stack[1]) - if err != nil { - return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err) - } return cnt, nil } @@ -317,14 +311,6 @@ type Container struct { Token []byte `json:"token"` } -// EACL represents extended ACL struct in contract storage. -type EACL struct { - Value []byte `json:"value"` - Signature []byte `json:"signature"` - PublicKey []byte `json:"public_key"` - Token []byte `json:"token"` -} - // ToStackItem implements stackitem.Convertible. func (c *Container) ToStackItem() (stackitem.Item, error) { return stackitem.NewStruct([]stackitem.Item{ @@ -369,50 +355,6 @@ func (c *Container) FromStackItem(item stackitem.Item) error { return nil } -// ToStackItem implements stackitem.Convertible. -func (c *EACL) ToStackItem() (stackitem.Item, error) { - return stackitem.NewStruct([]stackitem.Item{ - stackitem.NewByteArray(c.Value), - stackitem.NewByteArray(c.Signature), - stackitem.NewByteArray(c.PublicKey), - stackitem.NewByteArray(c.Token), - }), nil -} - -// FromStackItem implements stackitem.Convertible. -func (c *EACL) FromStackItem(item stackitem.Item) error { - arr, ok := item.Value().([]stackitem.Item) - if !ok || len(arr) != 4 { - return errors.New("invalid stack item type") - } - - value, err := arr[0].TryBytes() - if err != nil { - return errors.New("invalid eACL value") - } - - sig, err := arr[1].TryBytes() - if err != nil { - return errors.New("invalid eACL signature") - } - - pub, err := arr[2].TryBytes() - if err != nil { - return errors.New("invalid eACL public key") - } - - tok, err := arr[3].TryBytes() - if err != nil { - return errors.New("invalid eACL token") - } - - c.Value = value - c.Signature = sig - c.PublicKey = pub - c.Token = tok - return nil -} - // getCIDFilterFunc returns filtering function for container IDs. // Raw byte slices are used because it works with structures returned // from contract. From 02bb7159a54a9522ad0bc97d1a5456f5cfc425e4 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 9 Oct 2024 10:50:30 +0300 Subject: [PATCH 078/591] [#1425] services/tree: Remove eACL processing Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/tree.go | 1 - pkg/services/tree/options.go | 9 -- pkg/services/tree/signature.go | 137 +------------------------ pkg/services/tree/signature_test.go | 151 ++++++++++++++++++---------- 4 files changed, 100 insertions(+), 198 deletions(-) diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index d22e510de..192f08471 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -54,7 +54,6 @@ func initTreeService(c *cfg) { cli: c.shared.cnrClient, }), tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient), - tree.WithEACLSource(c.cfgObject.eaclSource), tree.WithNetmapSource(c.netMapSource), tree.WithPrivateKey(&c.key.PrivateKey), tree.WithLogger(c.log), diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go index 6a20fe5cc..1db5607f6 100644 --- a/pkg/services/tree/options.go +++ b/pkg/services/tree/options.go @@ -33,7 +33,6 @@ type cfg struct { nmSource netmap.Source cnrSource ContainerSource frostfsidSubjectProvider frostfsidcore.SubjectProvider - eaclSource container.EACLSource forest pilorama.Forest // replication-related parameters replicatorChannelCapacity int @@ -65,14 +64,6 @@ func WithFrostfsidSubjectProvider(provider frostfsidcore.SubjectProvider) Option } } -// WithEACLSource sets a eACL table source for a tree service. -// This option is required. -func WithEACLSource(src container.EACLSource) Option { - return func(c *cfg) { - c.eaclSource = src - } -} - // WithNetmapSource sets a netmap source for a tree service. // This option is required. func WithNetmapSource(src netmap.Source) Option { diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index 58cab659f..305adf2d7 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -9,10 +9,8 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" @@ -20,7 +18,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "go.uber.org/zap" ) type message interface { @@ -30,16 +27,11 @@ type message interface { SetSignature(*Signature) } -func basicACLErr(op acl.Op) error { - return fmt.Errorf("access to operation %s is denied by basic ACL check", op) -} - func eACLErr(op eacl.Operation, err error) error { return fmt.Errorf("access to operation %s is denied by extended ACL check: %w", op, err) } var ( - errBearerWrongOwner = errors.New("bearer token must be signed by the container owner") errBearerWrongContainer = errors.New("bearer token is created for another container") errBearerSignature = errors.New("invalid bearer token signature") ) @@ -77,56 +69,7 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, return fmt.Errorf("can't get request role: %w", err) } - basicACL := cnr.Value.BasicACL() - // Basic ACL mask can be unset, if a container operations are performed - // with strict APE checks only. - // - // FIXME(@aarifullin): tree service temporiraly performs APE checks on - // object verbs, because tree verbs have not been introduced yet. - if basicACL == 0x0 { - return s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey) - } - - if !basicACL.IsOpAllowed(op, role) { - return basicACLErr(op) - } - - if !basicACL.Extendable() { - return nil - } - - var useBearer bool - if len(rawBearer) != 0 { - if !basicACL.AllowedBearerRules(op) { - s.log.Debug(logs.TreeBearerPresentedButNotAllowedByACL, - zap.String("cid", cid.EncodeToString()), - zap.Stringer("op", op), - ) - } else { - useBearer = true - } - } - - var tb eacl.Table - signer := req.GetSignature().GetKey() - if useBearer && !bt.Impersonate() { - if !bearer.ResolveIssuer(*bt).Equals(cnr.Value.Owner()) { - return eACLErr(eaclOp, errBearerWrongOwner) - } - tb = bt.EACLTable() - } else { - tbCore, err := s.eaclSource.GetEACL(cid) - if err != nil { - return handleGetEACLError(err) - } - tb = *tbCore.Value - - if useBearer && bt.Impersonate() { - signer = bt.SigningKeyBytes() - } - } - - return checkEACL(tb, signer, eACLRole(role), eaclOp) + return s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey) } // Returns true iff the operation is read-only and request was signed @@ -168,14 +111,6 @@ func parseBearer(rawBearer []byte, cid cidSDK.ID, eaclOp eacl.Operation) (*beare return bt, nil } -func handleGetEACLError(err error) error { - if client.IsErrEACLNotFound(err) { - return nil - } - - return fmt.Errorf("get eACL table: %w", err) -} - func verifyMessage(m message) error { binBody, err := m.ReadSignedData(nil) if err != nil { @@ -260,73 +195,3 @@ func eACLOp(op acl.Op) eacl.Operation { panic(fmt.Sprintf("unexpected tree service ACL operation: %s", op)) } } - -func eACLRole(role acl.Role) eacl.Role { - switch role { - case acl.RoleOwner: - return eacl.RoleUser - case acl.RoleOthers: - return eacl.RoleOthers - default: - panic(fmt.Sprintf("unexpected tree service ACL role: %s", role)) - } -} - -var ( - errDENY = errors.New("DENY eACL rule") - errNoAllowRules = errors.New("not found allowing rules for the request") -) - -// checkEACL searches for the eACL rules that could be applied to the request -// (a tuple of a signer key, his FrostFS role and a request operation). -// It does not filter the request by the filters of the eACL table since tree -// requests do not contain any "object" information that could be filtered and, -// therefore, filtering leads to unexpected results. -// The code was copied with the minor updates from the SDK repo: -// https://github.com/nspcc-dev/frostfs-sdk-go/blob/43a57d42dd50dc60465bfd3482f7f12bcfcf3411/eacl/validator.go#L28. -func checkEACL(tb eacl.Table, signer []byte, role eacl.Role, op eacl.Operation) error { - for _, record := range tb.Records() { - // check type of operation - if record.Operation() != op { - continue - } - - // check target - if !targetMatches(record, role, signer) { - continue - } - - switch a := record.Action(); a { - case eacl.ActionAllow: - return nil - case eacl.ActionDeny: - return eACLErr(op, errDENY) - default: - return eACLErr(op, fmt.Errorf("unexpected action: %s", a)) - } - } - - return eACLErr(op, errNoAllowRules) -} - -func targetMatches(rec eacl.Record, role eacl.Role, signer []byte) bool { - for _, target := range rec.Targets() { - // check public key match - if pubs := target.BinaryKeys(); len(pubs) != 0 { - for _, key := range pubs { - if bytes.Equal(key, signer) { - return true - } - } - - continue - } - - // check target group match - if role == target.Role() { - return true - } - } - - return false -} diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index 3c3ebfe89..939ff170d 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -4,22 +4,30 @@ import ( "context" "crypto/ecdsa" "crypto/sha256" + "encoding/hex" "errors" "testing" aclV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl" + "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory" + "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" "github.com/stretchr/testify/require" ) @@ -27,6 +35,34 @@ type dummyNetmapSource struct { netmap.Source } +type dummySubjectProvider struct { + subjects map[util.Uint160]client.SubjectExtended +} + +func (s dummySubjectProvider) GetSubject(addr util.Uint160) (*client.Subject, error) { + res := s.subjects[addr] + return &client.Subject{ + PrimaryKey: res.PrimaryKey, + AdditionalKeys: res.AdditionalKeys, + Namespace: res.Namespace, + Name: res.Name, + KV: res.KV, + }, nil +} + +func (s dummySubjectProvider) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) { + res := s.subjects[addr] + return &res, nil +} + +type dummyEpochSource struct { + epoch uint64 +} + +func (s dummyEpochSource) CurrentEpoch() uint64 { + return s.epoch +} + type dummyContainerSource map[string]*containercore.Container func (s dummyContainerSource) List() ([]cid.ID, error) { @@ -57,16 +93,6 @@ func (s dummyContainerSource) DeletionInfo(id cid.ID) (*containercore.DelInfo, e return &containercore.DelInfo{}, nil } -type dummyEACLSource map[string]*containercore.EACL - -func (s dummyEACLSource) GetEACL(id cid.ID) (*containercore.EACL, error) { - cntEACL, ok := s[id.String()] - if !ok { - return nil, errors.New("container not found") - } - return cntEACL, nil -} - func testContainer(owner user.ID) container.Container { var r netmapSDK.ReplicaDescriptor r.SetNumberOfObjects(1) @@ -81,6 +107,8 @@ func testContainer(owner user.ID) container.Container { return cnt } +const currentEpoch = 123 + func TestMessageSign(t *testing.T) { privs := make([]*keys.PrivateKey, 4) for i := range privs { @@ -99,6 +127,15 @@ func TestMessageSign(t *testing.T) { Value: testContainer(ownerID), } + e := inmemory.NewInMemoryLocalOverrides() + e.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.Target{ + Type: engine.Container, + Name: cid1.EncodeToString(), + }, testChain(privs[0].PublicKey(), privs[1].PublicKey())) + frostfsidProvider := dummySubjectProvider{ + subjects: make(map[util.Uint160]client.SubjectExtended), + } + s := &Service{ cfg: cfg{ log: test.NewLogger(t), @@ -107,12 +144,10 @@ func TestMessageSign(t *testing.T) { cnrSource: dummyContainerSource{ cid1.String(): cnr, }, - eaclSource: dummyEACLSource{ - cid1.String(): &containercore.EACL{ - Value: testTable(cid1, privs[0].PublicKey(), privs[1].PublicKey()), - }, - }, + frostfsidSubjectProvider: frostfsidProvider, + state: dummyEpochSource{epoch: currentEpoch}, }, + apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}), } rawCID1 := make([]byte, sha256.Size) @@ -235,46 +270,58 @@ func TestMessageSign(t *testing.T) { func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token { var b bearer.Token - b.SetEACLTable(*testTable(cid, forPutGet, forGet)) + b.SetExp(currentEpoch + 1) + b.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid.EncodeToString(), + }, + Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}}, + }) return b } -func testTable(cid cid.ID, forPutGet, forGet *keys.PublicKey) *eaclSDK.Table { - tgtGet := eaclSDK.NewTarget() - tgtGet.SetRole(eaclSDK.RoleUnknown) - tgtGet.SetBinaryKeys([][]byte{forPutGet.Bytes(), forGet.Bytes()}) - - rGet := eaclSDK.NewRecord() - rGet.SetAction(eaclSDK.ActionAllow) - rGet.SetOperation(eaclSDK.OperationGet) - rGet.SetTargets(*tgtGet) - - tgtPut := eaclSDK.NewTarget() - tgtPut.SetRole(eaclSDK.RoleUnknown) - tgtPut.SetBinaryKeys([][]byte{forPutGet.Bytes()}) - - rPut := eaclSDK.NewRecord() - rPut.SetAction(eaclSDK.ActionAllow) - rPut.SetOperation(eaclSDK.OperationPut) - rPut.SetTargets(*tgtPut) - - tb := eaclSDK.NewTable() - tb.AddRecord(rGet) - tb.AddRecord(rPut) - - tgt := eaclSDK.NewTarget() - tgt.SetRole(eaclSDK.RoleOthers) - - for _, op := range []eaclSDK.Operation{eaclSDK.OperationGet, eaclSDK.OperationPut} { - r := eaclSDK.NewRecord() - r.SetAction(eaclSDK.ActionDeny) - r.SetTargets(*tgt) - r.SetOperation(op) - tb.AddRecord(r) +func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain { + ruleGet := chain.Rule{ + Status: chain.Allow, + Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}}, + Actions: chain.Actions{Names: []string{native.MethodGetObject}}, + Any: true, + Condition: []chain.Condition{ + { + Op: chain.CondStringEquals, + Kind: chain.KindRequest, + Key: native.PropertyKeyActorPublicKey, + Value: hex.EncodeToString(forPutGet.Bytes()), + }, + { + Op: chain.CondStringEquals, + Kind: chain.KindRequest, + Key: native.PropertyKeyActorPublicKey, + Value: hex.EncodeToString(forGet.Bytes()), + }, + }, + } + rulePut := chain.Rule{ + Status: chain.Allow, + Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}}, + Actions: chain.Actions{Names: []string{native.MethodPutObject}}, + Any: true, + Condition: []chain.Condition{ + { + Op: chain.CondStringEquals, + Kind: chain.KindRequest, + Key: native.PropertyKeyActorPublicKey, + Value: hex.EncodeToString(forPutGet.Bytes()), + }, + }, } - tb.SetCID(cid) - - return tb + return &chain.Chain{ + Rules: []chain.Rule{ + ruleGet, + rulePut, + }, + } } From 11347602719a764179a74382076de4430936d7ad Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 9 Oct 2024 10:55:48 +0300 Subject: [PATCH 079/591] [#1425] services/tree: Remove eACL mentions from bearer token parsing errors Signed-off-by: Evgenii Stratonikov --- pkg/services/tree/signature.go | 30 ++++++------------------------ 1 file changed, 6 insertions(+), 24 deletions(-) diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index 305adf2d7..20a629fcc 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -15,7 +15,6 @@ import ( cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" ) @@ -27,10 +26,6 @@ type message interface { SetSignature(*Signature) } -func eACLErr(op eacl.Operation, err error) error { - return fmt.Errorf("access to operation %s is denied by extended ACL check: %w", op, err) -} - var ( errBearerWrongContainer = errors.New("bearer token is created for another container") errBearerSignature = errors.New("invalid bearer token signature") @@ -57,11 +52,9 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, return fmt.Errorf("can't get container %s: %w", cid, err) } - eaclOp := eACLOp(op) - - bt, err := parseBearer(rawBearer, cid, eaclOp) + bt, err := parseBearer(rawBearer, cid) if err != nil { - return err + return fmt.Errorf("access to operation %s is denied: %w", op, err) } role, pubKey, err := roleAndPubKeyFromReq(cnr, req, bt) @@ -93,20 +86,20 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) { return false, nil } -func parseBearer(rawBearer []byte, cid cidSDK.ID, eaclOp eacl.Operation) (*bearer.Token, error) { +func parseBearer(rawBearer []byte, cid cidSDK.ID) (*bearer.Token, error) { if len(rawBearer) == 0 { return nil, nil } bt := new(bearer.Token) if err := bt.Unmarshal(rawBearer); err != nil { - return nil, eACLErr(eaclOp, fmt.Errorf("invalid bearer token: %w", err)) + return nil, fmt.Errorf("invalid bearer token: %w", err) } if !bt.AssertContainer(cid) { - return nil, eACLErr(eaclOp, errBearerWrongContainer) + return nil, errBearerWrongContainer } if !bt.VerifySignature() { - return nil, eACLErr(eaclOp, errBearerSignature) + return nil, errBearerSignature } return bt, nil } @@ -184,14 +177,3 @@ func roleAndPubKeyFromReq(cnr *core.Container, req message, bt *bearer.Token) (a return role, pub, nil } - -func eACLOp(op acl.Op) eacl.Operation { - switch op { - case acl.OpObjectGet: - return eacl.OperationGet - case acl.OpObjectPut: - return eacl.OperationPut - default: - panic(fmt.Sprintf("unexpected tree service ACL operation: %s", op)) - } -} From dfb00083d07499a0e3d89076cc3f08729c00cb71 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 10 Oct 2024 14:57:39 +0300 Subject: [PATCH 080/591] [#1426] go.mod: Update sdk-go Signed-off-by: Evgenii Stratonikov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 91cc55a36..9a64f0e81 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 git.frostfs.info/TrueCloudLab/tzhash v1.8.0 diff --git a/go.sum b/go.sum index 728592ea5..777d9b3ab 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa h1:Jr8hXNNFECLhC7S45HuyQms4U/gim1xILoU3g4ZZnHg= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa/go.mod h1:jmb7yxzZota9jWbC10p+7YR+6wwJPBj7J/Fl5VDkXys= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509 h1:5gtEq4bjVgAbTOrbEquspyM3s+qsMtkpGC5m9FtfImk= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509/go.mod h1:jmb7yxzZota9jWbC10p+7YR+6wwJPBj7J/Fl5VDkXys= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM= From 5992ee901ae574d536a82bd7f1504852e17fb086 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Fri, 11 Oct 2024 11:33:36 +0300 Subject: [PATCH 081/591] [#1411] go.mod: Bump frostfs-contract to v0.20.0 Signed-off-by: Alexander Chuprov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9a64f0e81..1468c12b2 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22 require ( code.gitea.io/sdk/gitea v0.17.1 git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3 - git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c + git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 diff --git a/go.sum b/go.sum index 777d9b3ab..5ce81807a 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3 h1:6QXNnfBgYx81UZsBdpPnQY+ZMSKGFbFc29wV7DJ/UG4= git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c h1:9JmXxWcqWaOXModBwtj/WlPYOfSk2jRSDoRsDRYvgnA= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c/go.mod h1:DVb1AqP323wiEwCtlfoJJTERF4Xji2L39X6T8kv26RE= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0 h1:8Z5iPhieCrbcdhxBuY/Bajh6V5fki7Whh0b4S2zYJYU= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0/go.mod h1:Y2Xorxc8SBO4phoek7n3XxaPZz5rIrFgDsU4TOjmlGA= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= From 42bf03e5cc04f2b6d67465608b842693171344ba Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Fri, 11 Oct 2024 11:33:56 +0300 Subject: [PATCH 082/591] [#1411] adm/nns: Add 'delRecord' Signed-off-by: Alexander Chuprov --- .../internal/modules/morph/nns/record.go | 29 +++++++++++++++++++ .../internal/modules/morph/nns/root.go | 10 +++++++ 2 files changed, 39 insertions(+) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go index 0e217eb61..66bb1b94f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go @@ -47,6 +47,19 @@ func initDelRecordsCmd() { _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag) } +func initDelRecordCmd() { + Cmd.AddCommand(delRecordCmd) + delRecordCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + delRecordCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) + delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) + delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) + + _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag) + _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag) + _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordDataFlag) +} + func addRecord(cmd *cobra.Command, _ []string) { c, actor, _ := getRPCClient(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) @@ -115,6 +128,22 @@ func delRecords(cmd *cobra.Command, _ []string) { cmd.Println("Records removed successfully") } +func delRecord(cmd *cobra.Command, _ []string) { + c, actor, _ := getRPCClient(cmd) + name, _ := cmd.Flags().GetString(nnsNameFlag) + data, _ := cmd.Flags().GetString(nnsRecordDataFlag) + recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) + typ, err := getRecordType(recordType) + commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err) + h, vub, err := c.DeleteRecord(name, typ, data) + commonCmd.ExitOnErr(cmd, "unable to delete record: %w", err) + + cmd.Println("Waiting for transaction to persist...") + _, err = actor.Wait(h, vub, err) + commonCmd.ExitOnErr(cmd, "delete records error: %w", err) + cmd.Println("Record removed successfully") +} + func getRecordType(recordType string) (*big.Int, error) { switch strings.ToUpper(recordType) { case "A": diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go index 56774c292..9bdeaccd9 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go @@ -95,6 +95,15 @@ var ( }, Run: delRecords, } + delRecordCmd = &cobra.Command{ + Use: "delete-record", + Short: "Removes domain record with the specified type and data", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + }, + Run: delRecord, + } ) func init() { @@ -106,4 +115,5 @@ func init() { initAddRecordCmd() initGetRecordsCmd() initDelRecordsCmd() + initDelRecordCmd() } From acd6eb18151d7e2fec413d97e273b13076a9b4bb Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Fri, 11 Oct 2024 15:40:01 +0300 Subject: [PATCH 083/591] [#1427] object: Fix `Put` for `EC` object when node unavailable There might be situation when context canceled earlier than traverser move to another part of the nodes. To avoid this, need to wait for the result from concurrent put at each traverser iteration. Signed-off-by: Anton Nikiforov --- pkg/services/object/common/writer/ec.go | 20 +- pkg/services/object/common/writer/ec_test.go | 191 +++++++++++++++++++ 2 files changed, 205 insertions(+), 6 deletions(-) create mode 100644 pkg/services/object/common/writer/ec_test.go diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index 6b6a14cc0..dffe52a6d 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -197,14 +197,15 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er if err != nil { return err } + partsProcessed := make([]atomic.Bool, len(parts)) objID, _ := obj.ID() t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...) if err != nil { return err } - eg, egCtx := errgroup.WithContext(ctx) for { + eg, egCtx := errgroup.WithContext(ctx) nodes := t.Next() if len(nodes) == 0 { break @@ -216,13 +217,20 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er } for idx := range parts { - eg.Go(func() error { - return e.writePart(egCtx, parts[idx], idx, nodes, visited) - }) - t.SubmitSuccess() + if !partsProcessed[idx].Load() { + eg.Go(func() error { + err := e.writePart(egCtx, parts[idx], idx, nodes, visited) + if err == nil { + partsProcessed[idx].Store(true) + t.SubmitSuccess() + } + return err + }) + } } + err = eg.Wait() } - if err := eg.Wait(); err != nil { + if err != nil { return errIncompletePut{ singleErr: err, } diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go new file mode 100644 index 000000000..32863d678 --- /dev/null +++ b/pkg/services/object/common/writer/ec_test.go @@ -0,0 +1,191 @@ +package writer + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/sha256" + "errors" + "fmt" + "strconv" + "testing" + + rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" + apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" + "git.frostfs.info/TrueCloudLab/tzhash/tz" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/panjf2000/ants/v2" + "github.com/stretchr/testify/require" +) + +type testPlacementBuilder struct { + vectors [][]netmap.NodeInfo +} + +func (p *testPlacementBuilder) BuildPlacement(_ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) ( + [][]netmap.NodeInfo, error, +) { + arr := make([]netmap.NodeInfo, len(p.vectors[0])) + copy(arr, p.vectors[0]) + return [][]netmap.NodeInfo{arr}, nil +} + +type nmKeys struct{} + +func (nmKeys) IsLocalKey(_ []byte) bool { + return false +} + +type clientConstructor struct { + vectors [][]netmap.NodeInfo +} + +func (c clientConstructor) Get(info client.NodeInfo) (client.MultiAddressClient, error) { + if bytes.Equal(info.PublicKey(), c.vectors[0][0].PublicKey()) || + bytes.Equal(info.PublicKey(), c.vectors[0][1].PublicKey()) { + return multiAddressClient{err: errors.New("node unavailable")}, nil + } + return multiAddressClient{}, nil +} + +type multiAddressClient struct { + client.MultiAddressClient + err error +} + +func (c multiAddressClient) ObjectPutSingle(_ context.Context, _ apiclient.PrmObjectPutSingle) (*apiclient.ResObjectPutSingle, error) { + if c.err != nil { + return nil, c.err + } + return &apiclient.ResObjectPutSingle{}, nil +} + +func (c multiAddressClient) ReportError(error) { +} + +func (multiAddressClient) RawForAddress(context.Context, network.Address, func(cli *rawclient.Client) error) error { + return nil +} + +func TestECWriter(t *testing.T) { + // Create container with policy EC 1.1 + cnr := container.Container{} + p1 := netmap.PlacementPolicy{} + p1.SetContainerBackupFactor(1) + x1 := netmap.ReplicaDescriptor{} + x1.SetECDataCount(1) + x1.SetECParityCount(1) + p1.AddReplicas(x1) + cnr.SetPlacementPolicy(p1) + cnr.SetAttribute("cnr", "cnr1") + + cid := cidtest.ID() + + // Create 4 nodes, 2 nodes for chunks, + // 2 nodes for the case when the first two will fail. + ns, _ := testNodeMatrix(t, []int{4}) + + data := make([]byte, 100) + _, _ = rand.Read(data) + ver := version.Current() + + var csum checksum.Checksum + csum.SetSHA256(sha256.Sum256(data)) + + var csumTZ checksum.Checksum + csumTZ.SetTillichZemor(tz.Sum(csum.Value())) + + obj := objectSDK.New() + obj.SetID(oidtest.ID()) + obj.SetOwnerID(usertest.ID()) + obj.SetContainerID(cid) + obj.SetVersion(&ver) + obj.SetPayload(data) + obj.SetPayloadSize(uint64(len(data))) + obj.SetPayloadChecksum(csum) + obj.SetPayloadHomomorphicHash(csumTZ) + + // Builder return nodes without sort by hrw + builder := &testPlacementBuilder{ + vectors: ns, + } + + ownerKey, err := keys.NewPrivateKey() + require.NoError(t, err) + + pool, err := ants.NewPool(4, ants.WithNonblocking(true)) + require.NoError(t, err) + + log, err := logger.NewLogger(nil) + require.NoError(t, err) + + var n nmKeys + ecw := ECWriter{ + Config: &Config{ + NetmapKeys: n, + RemotePool: pool, + Logger: log, + ClientConstructor: clientConstructor{vectors: ns}, + }, + PlacementOpts: append( + []placement.Option{placement.UseBuilder(builder), placement.ForContainer(cnr)}, + placement.WithCopyNumbers(nil)), // copies number ignored for EC + Container: cnr, + Key: &ownerKey.PrivateKey, + Relay: nil, + ObjectMetaValid: true, + } + + err = ecw.WriteObject(context.Background(), obj) + require.NoError(t, err) +} + +func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) { + mNodes := make([][]netmap.NodeInfo, len(dim)) + mAddr := make([][]string, len(dim)) + + for i := range dim { + ns := make([]netmap.NodeInfo, dim[i]) + as := make([]string, dim[i]) + + for j := range dim[i] { + a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s", + strconv.Itoa(i), + strconv.Itoa(60000+j), + ) + + var ni netmap.NodeInfo + ni.SetNetworkEndpoints(a) + ni.SetPublicKey([]byte(a)) + + var na network.AddressGroup + + err := na.FromIterator(netmapcore.Node(ni)) + require.NoError(t, err) + + as[j] = network.StringifyGroup(na) + + ns[j] = ni + } + + mNodes[i] = ns + mAddr[i] = as + } + + return mNodes, mAddr +} From d2a59b2de8572952df34e2b66c3bf51d03ce13d9 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Mon, 14 Oct 2024 15:51:37 +0300 Subject: [PATCH 084/591] [#1429] lens/explorer: Fix locked object records display text Display texts for a locked object and a list of it lockers were mistakenly swapped. Signed-off-by: Aleksey Savchuk --- cmd/frostfs-lens/internal/schema/metabase/records/string.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go index a6c70d537..ec0ab8e1a 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/string.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/string.go @@ -38,7 +38,7 @@ func (r *ContainerVolumeRecord) String() string { func (r *LockedRecord) String() string { return fmt.Sprintf( - "Locker OID %s %c Locked [%d]OID {...}", + "Object OID %s %c Lockers [%d]OID {...}", common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua), tview.Borders.Vertical, len(r.ids), From 714ff784fa460767e82527b71fd520932b0256ed Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Mon, 14 Oct 2024 17:31:26 +0300 Subject: [PATCH 085/591] [#1431] objsvc: Use specific values in message about address mismatch This makes troubleshooting failed operations much easier Signed-off-by: Vitaliy Potyarkin --- pkg/services/object/common/target/target.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go index a2d6b4d39..9e0f49297 100644 --- a/pkg/services/object/common/target/target.go +++ b/pkg/services/object/common/target/target.go @@ -86,7 +86,7 @@ func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter user.IDFromKey(&ownerSession, key.PublicKey) if !ownerObj.Equals(ownerSession) { - return nil, errors.New("session token is missing but object owner id is different from the default key") + return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession) } } else { if !ownerObj.Equals(sessionInfo.Owner) { From 3012286452e8b2bb04a6ad9b70e364b00b29919f Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 14 Oct 2024 17:10:03 +0300 Subject: [PATCH 086/591] [#1431] metabase: Fix unreachable code Signed-off-by: Alexander Chuprov --- pkg/local_object_storage/metabase/delete.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index 3add1f268..b5ac22017 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -453,7 +453,7 @@ func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { func hasAnyItem(b *bbolt.Bucket) bool { var hasAnyItem bool c := b.Cursor() - for k, _ := c.First(); k != nil; k, _ = c.Next() { + for k, _ := c.First(); k != nil; { hasAnyItem = true break } From d53732f663ce46ff29196782b5d836a37c1f6c7d Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 14 Oct 2024 17:17:00 +0300 Subject: [PATCH 087/591] [#1431] engine: Delete always false condition Signed-off-by: Alexander Chuprov --- pkg/local_object_storage/engine/evacuate.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index c1b9276f3..940e30323 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -56,9 +56,6 @@ func (s EvacuateScope) String() string { var sb strings.Builder first := true if s&EvacuateScopeObjects == EvacuateScopeObjects { - if !first { - sb.WriteString(";") - } sb.WriteString("objects") first = false } From 63466d71b22d8b23eff4d0fc1eea5a16d36a138b Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 14 Oct 2024 17:20:14 +0300 Subject: [PATCH 088/591] [#1431] engine: Delete unused constants Signed-off-by: Alexander Chuprov --- cmd/frostfs-cli/modules/ape_manager/add_chain.go | 9 ++++----- internal/logs/logs.go | 14 -------------- pkg/morph/client/container/client.go | 1 - 3 files changed, 4 insertions(+), 20 deletions(-) diff --git a/cmd/frostfs-cli/modules/ape_manager/add_chain.go b/cmd/frostfs-cli/modules/ape_manager/add_chain.go index c6622da25..a85f3c93e 100644 --- a/cmd/frostfs-cli/modules/ape_manager/add_chain.go +++ b/cmd/frostfs-cli/modules/ape_manager/add_chain.go @@ -31,11 +31,10 @@ const ( ) const ( - defaultNamespace = "" - namespaceTarget = "namespace" - containerTarget = "container" - userTarget = "user" - groupTarget = "group" + namespaceTarget = "namespace" + containerTarget = "container" + userTarget = "user" + groupTarget = "group" ) var errUnknownTargetType = errors.New("unknown target type") diff --git a/internal/logs/logs.go b/internal/logs/logs.go index ca783a39d..b4bc31b0c 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -14,8 +14,6 @@ const ( InterruptPlacementIterationByContext = "interrupt placement iteration by context" Notification = "notification" - - SkipDeprecatedNotification = "skip deprecated notification" ) const ( @@ -41,8 +39,6 @@ const ( InnerringCantUpdatePersistentState = "can't update persistent state" InnerringCloserError = "closer error" InnerringReadConfigFromBlockchain = "read config from blockchain" - NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications" - NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification" PolicerCouldNotGetContainer = "could not get container" PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal" PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container" @@ -61,7 +57,6 @@ const ( ReplicatorCouldNotReplicateObject = "could not replicate object" ReplicatorObjectSuccessfullyReplicated = "object successfully replicated" TreeRedirectingTreeServiceQuery = "redirecting tree service query" - TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL" TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree" TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree" TreeSynchronizeTree = "synchronize tree" @@ -107,7 +102,6 @@ const ( GetUnableToGetAllPartsECObject = "unable to get all parts, continue to reconstruct with existed" GetUnableToGetPartECObject = "unable to get part of the erasure-encoded object" GetUnableToHeadPartECObject = "unable to head part of the erasure-encoded object" - GetUnableToGetECObjectContainer = "unable to get container for erasure-coded object" GetUnableToHeadPartsECObject = "unable to head parts of the erasure-encoded object" GetAssemblingSplittedObjectCompleted = "assembling splitted object completed" GetAssemblingECObjectCompleted = "assembling erasure-coded object completed" @@ -271,9 +265,7 @@ const ( ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" - WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache" WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" - WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database" WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza" @@ -413,11 +405,6 @@ const ( FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification" FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt" FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt" - FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers" - FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container" - FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object" - FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications" - FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification" FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value" FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage" FrostFSNodeFailedInitTracing = "failed init tracing" @@ -461,7 +448,6 @@ const ( FSTreeCantUnmarshalObject = "can't unmarshal an object" FSTreeCantFushObjectBlobstor = "can't flush an object to blobstor" FSTreeCantUpdateID = "can't update object storage ID" - FSTreeCantDecodeDBObjectAddress = "can't decode object address from the DB" PutSingleRedirectFailure = "failed to redirect PutSingle request" StorageIDRetrievalFailure = "can't get storage ID from metabase" ObjectRemovalFailureBlobStor = "can't remove object from blobStor" diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go index fc892aafb..f735a5ff7 100644 --- a/pkg/morph/client/container/client.go +++ b/pkg/morph/client/container/client.go @@ -33,7 +33,6 @@ const ( startEstimationMethod = "startContainerEstimation" stopEstimationMethod = "stopContainerEstimation" - putSizeMethod = "putContainerSize" listSizesMethod = "listContainerSizes" getSizeMethod = "getContainerSize" From 00b1cecfb7486aac93e8806caa6563fe75eabc1b Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 14 Oct 2024 17:25:00 +0300 Subject: [PATCH 089/591] [#1431] obj_storage/shard: Fix visibility of 'newMetricStore' Signed-off-by: Alexander Chuprov --- pkg/local_object_storage/shard/control_test.go | 4 ++-- pkg/local_object_storage/shard/metrics_test.go | 4 ++-- pkg/local_object_storage/shard/reload_test.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go index 44fee1636..6b9eaa550 100644 --- a/pkg/local_object_storage/shard/control_test.go +++ b/pkg/local_object_storage/shard/control_test.go @@ -126,7 +126,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) { }), } - mm := NewMetricStore() + mm := newMetricStore() sh := New( WithID(NewIDFromBytes([]byte{})), @@ -190,7 +190,7 @@ func TestRefillMetabase(t *testing.T) { }), } - mm := NewMetricStore() + mm := newMetricStore() sh := New( WithID(NewIDFromBytes([]byte{})), diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go index 56622326a..cec5a12ad 100644 --- a/pkg/local_object_storage/shard/metrics_test.go +++ b/pkg/local_object_storage/shard/metrics_test.go @@ -35,7 +35,7 @@ type metricsStore struct { refillStatus string } -func NewMetricStore() *metricsStore { +func newMetricStore() *metricsStore { return &metricsStore{ objCounters: map[string]uint64{ "phy": 0, @@ -404,7 +404,7 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) { }), } - mm := NewMetricStore() + mm := newMetricStore() sh := New( WithID(NewIDFromBytes([]byte{})), diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go index 7dacbfa6c..7dd7189bb 100644 --- a/pkg/local_object_storage/shard/reload_test.go +++ b/pkg/local_object_storage/shard/reload_test.go @@ -51,7 +51,7 @@ func TestShardReload(t *testing.T) { WithMetaBaseOptions(metaOpts...), WithPiloramaOptions( pilorama.WithPath(filepath.Join(p, "pilorama"))), - WithMetricsWriter(NewMetricStore()), + WithMetricsWriter(newMetricStore()), } sh := New(opts...) From f6582081a4ee67e97773f655b8f18148946c5a0c Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 14 Oct 2024 17:31:15 +0300 Subject: [PATCH 090/591] [#1431] obj_storage/metabase: Delete unused variable Signed-off-by: Alexander Chuprov --- pkg/local_object_storage/metabase/put.go | 5 ++--- pkg/services/object/remote_reader.go | 3 --- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index 0c14196b7..b329e8032 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -59,9 +59,8 @@ func (p *PutPrm) SetIndexAttributes(v bool) { } var ( - ErrUnknownObjectType = errors.New("unknown object type") - ErrIncorrectSplitInfoUpdate = errors.New("updating split info on object without it") - ErrIncorrectRootObject = errors.New("invalid root object") + ErrUnknownObjectType = errors.New("unknown object type") + ErrIncorrectRootObject = errors.New("invalid root object") ) // Put saves object header in metabase. Object payload expected to be cut. diff --git a/pkg/services/object/remote_reader.go b/pkg/services/object/remote_reader.go index 18b6107cf..bc6ffd160 100644 --- a/pkg/services/object/remote_reader.go +++ b/pkg/services/object/remote_reader.go @@ -2,7 +2,6 @@ package object import ( "context" - "errors" "fmt" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" @@ -35,8 +34,6 @@ type RemoteRequestPrm struct { const remoteOpTTL = 1 -var ErrNotFound = errors.New("object header not found") - // NewRemoteReader creates, initializes and returns new RemoteHeader instance. func NewRemoteReader(keyStorage *util.KeyStorage, cache ClientConstructor) *RemoteReader { return &RemoteReader{ From d83879d4b859f016a9bfef808b19324ce593814e Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 14 Oct 2024 18:05:55 +0300 Subject: [PATCH 091/591] [#1431] node: Fix comment format Signed-off-by: Alexander Chuprov --- cmd/frostfs-node/config/profiler/config.go | 2 +- cmd/frostfs-node/object.go | 2 +- pkg/core/netmap/keys.go | 2 +- .../netmap/nodevalidation/locode/deps.go | 16 ++++++++-------- pkg/innerring/processors/netmap/processor.go | 2 +- pkg/local_object_storage/shard/metrics.go | 2 +- pkg/morph/client/actor.go | 2 +- pkg/morph/event/container/put_notary.go | 2 +- pkg/network/group.go | 4 ++-- pkg/services/control/ir/server/deps.go | 2 +- pkg/services/control/rpc.go | 2 +- pkg/services/control/server/server.go | 4 ++-- pkg/services/netmap/executor.go | 4 ++-- pkg/services/object/common/writer/distributed.go | 2 +- pkg/services/object/delete/service.go | 4 ++-- pkg/services/object/patch/service.go | 2 +- pkg/services/policer/option.go | 2 +- pkg/util/rand/rand.go | 2 +- pkg/util/sdnotify/sdnotify.go | 2 +- 19 files changed, 30 insertions(+), 30 deletions(-) diff --git a/cmd/frostfs-node/config/profiler/config.go b/cmd/frostfs-node/config/profiler/config.go index 191694970..6c3e8adab 100644 --- a/cmd/frostfs-node/config/profiler/config.go +++ b/cmd/frostfs-node/config/profiler/config.go @@ -52,7 +52,7 @@ func Address(c *config.Config) string { return AddressDefault } -// BlockRates returns the value of "block_rate" config parameter +// BlockRate returns the value of "block_rate" config parameter // from "pprof" section. func BlockRate(c *config.Config) int { s := c.Sub(subsection) diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 68acb05d3..c484c5d8c 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -117,7 +117,7 @@ func (i *delNetInfo) TombstoneLifetime() (uint64, error) { return i.cfg.cfgObject.tombstoneLifetime.Load(), nil } -// returns node owner ID calculated from configured private key. +// LocalNodeID returns node owner ID calculated from configured private key. // // Implements method needed for Object.Delete service. func (i *delNetInfo) LocalNodeID() user.ID { diff --git a/pkg/core/netmap/keys.go b/pkg/core/netmap/keys.go index 29cb2dc94..0c64bb798 100644 --- a/pkg/core/netmap/keys.go +++ b/pkg/core/netmap/keys.go @@ -2,6 +2,6 @@ package netmap // AnnouncedKeys is an interface of utility for working with the announced public keys of the storage nodes. type AnnouncedKeys interface { - // Checks if the key was announced by a local node. + // IsLocalKey checks if the key was announced by a local node. IsLocalKey(key []byte) bool } diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go index 8f6667933..ba5db9205 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go +++ b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go @@ -8,38 +8,38 @@ import ( // Record is an interface of read-only // FrostFS LOCODE database single entry. type Record interface { - // Must return ISO 3166-1 alpha-2 + // CountryCode must return ISO 3166-1 alpha-2 // country code. // // Must not return nil. CountryCode() *locodedb.CountryCode - // Must return English short country name + // CountryName must return English short country name // officially used by the ISO 3166 // Maintenance Agency (ISO 3166/MA). CountryName() string - // Must return UN/LOCODE 3-character code + // LocationCode must return UN/LOCODE 3-character code // for the location (numerals 2-9 may also // be used). // // Must not return nil. LocationCode() *locodedb.LocationCode - // Must return name of the location which + // LocationName must return name of the location which // have been allocated a UN/LOCODE without // diacritic sign. LocationName() string - // Must return ISO 1-3 character alphabetic + // SubDivCode Must return ISO 1-3 character alphabetic // and/or numeric code for the administrative // division of the country concerned. SubDivCode() string - // Must return subdivision name. + // SubDivName must return subdivision name. SubDivName() string - // Must return existing continent where is + // Continent must return existing continent where is // the location. // // Must not return nil. @@ -49,7 +49,7 @@ type Record interface { // DB is an interface of read-only // FrostFS LOCODE database. type DB interface { - // Must find the record that corresponds to + // Get must find the record that corresponds to // LOCODE and provides the Record interface. // // Must return an error if Record is nil. diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go index e8fb8721b..4cecda59c 100644 --- a/pkg/innerring/processors/netmap/processor.go +++ b/pkg/innerring/processors/netmap/processor.go @@ -43,7 +43,7 @@ type ( // of information about the node and its finalization for adding // to the network map. NodeValidator interface { - // Must verify and optionally update NodeInfo structure. + // VerifyAndUpdate must verify and optionally update NodeInfo structure. // // Must return an error if NodeInfo input is invalid. // Must return an error if it is not possible to correctly diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go index 91bf8d0ae..087ba42ef 100644 --- a/pkg/local_object_storage/shard/metrics.go +++ b/pkg/local_object_storage/shard/metrics.go @@ -22,7 +22,7 @@ type MetricsWriter interface { // SetShardID must set (update) the shard identifier that will be used in // metrics. SetShardID(id string) - // SetReadonly must set shard mode. + // SetMode set mode of shard. SetMode(mode mode.Mode) // SetContainerObjectsCount sets container object count. SetContainerObjectsCount(cnrID string, objectType string, value uint64) diff --git a/pkg/morph/client/actor.go b/pkg/morph/client/actor.go index b6718dea5..2849f3052 100644 --- a/pkg/morph/client/actor.go +++ b/pkg/morph/client/actor.go @@ -16,7 +16,7 @@ type actorProvider interface { GetRPCActor() actor.RPCActor } -// Client switches an established connection with neo-go if it is broken. +// SwitchRPCGuardedActor switches an established connection with neo-go if it is broken. // This leads to an invalidation of an rpc actor within Client. That means the // components that are initilized with the rpc actor may unintentionally use // it when it is already invalidated. SwitchRPCGuardedActor is used to prevent diff --git a/pkg/morph/event/container/put_notary.go b/pkg/morph/event/container/put_notary.go index f5779ced6..6b2ee7b0a 100644 --- a/pkg/morph/event/container/put_notary.go +++ b/pkg/morph/event/container/put_notary.go @@ -46,7 +46,7 @@ const ( // put container requests. PutNotaryEvent = "put" - // PutNotaryEvent is an ID of notary "put named container" notification. + // PutNamedNotaryEvent is an ID of notary "put named container" notification. PutNamedNotaryEvent = "putNamed" ) diff --git a/pkg/network/group.go b/pkg/network/group.go index a6de0653e..9843b14d4 100644 --- a/pkg/network/group.go +++ b/pkg/network/group.go @@ -67,11 +67,11 @@ func (x AddressGroup) Swap(i, j int) { // MultiAddressIterator is an interface of network address group. type MultiAddressIterator interface { - // Must iterate over network addresses and pass each one + // IterateAddresses must iterate over network addresses and pass each one // to the handler until it returns true. IterateAddresses(func(string) bool) - // Must return number of addresses in group. + // NumberOfAddresses must return number of addresses in group. NumberOfAddresses() int } diff --git a/pkg/services/control/ir/server/deps.go b/pkg/services/control/ir/server/deps.go index 0c2de5300..9d5cfefc8 100644 --- a/pkg/services/control/ir/server/deps.go +++ b/pkg/services/control/ir/server/deps.go @@ -5,7 +5,7 @@ import control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ // HealthChecker is component interface for calculating // the current health status of a node. type HealthChecker interface { - // Must calculate and return current health status of the IR application. + // HealthStatus must calculate and return current health status of the IR application. // // If status can not be calculated for any reason, // control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned. diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go index 80aece008..04524a68c 100644 --- a/pkg/services/control/rpc.go +++ b/pkg/services/control/rpc.go @@ -292,7 +292,7 @@ func ListTargetsLocalOverrides(cli *client.Client, req *ListTargetsLocalOverride return wResp.message, nil } -// RemoveChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC. +// GetChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC. func GetChainLocalOverride(cli *client.Client, req *GetChainLocalOverrideRequest, opts ...client.CallOption) (*GetChainLocalOverrideResponse, error) { wResp := newResponseWrapper[GetChainLocalOverrideResponse]() wReq := &requestWrapper{m: req} diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go index f3fe56a46..b6fdcb246 100644 --- a/pkg/services/control/server/server.go +++ b/pkg/services/control/server/server.go @@ -26,13 +26,13 @@ type Server struct { // HealthChecker is component interface for calculating // the current health status of a node. type HealthChecker interface { - // Must calculate and return current status of the node in FrostFS network map. + // NetmapStatus must calculate and return current status of the node in FrostFS network map. // // If status can not be calculated for any reason, // control.netmapStatus_STATUS_UNDEFINED should be returned. NetmapStatus() control.NetmapStatus - // Must calculate and return current health status of the node application. + // HealthStatus must calculate and return current health status of the node application. // // If status can not be calculated for any reason, // control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned. diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go index 496b07a98..ae2044246 100644 --- a/pkg/services/netmap/executor.go +++ b/pkg/services/netmap/executor.go @@ -26,7 +26,7 @@ type executorSvc struct { // NodeState encapsulates information // about current node state. type NodeState interface { - // Must return current node state + // LocalNodeInfo must return current node state // in FrostFS API v2 NodeInfo structure. LocalNodeInfo() (*netmap.NodeInfo, error) @@ -39,7 +39,7 @@ type NodeState interface { // NetworkInfo encapsulates source of the // recent information about the FrostFS network. type NetworkInfo interface { - // Must return recent network information in FrostFS API v2 NetworkInfo structure. + // Dump must return recent network information in FrostFS API v2 NetworkInfo structure. // // If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset. Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error) diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go index f62934bed..f7486eae7 100644 --- a/pkg/services/object/common/writer/distributed.go +++ b/pkg/services/object/common/writer/distributed.go @@ -28,7 +28,7 @@ type distributedWriter struct { resetSuccessAfterOnBroadcast bool } -// parameters and state of container Traversal. +// Traversal parameters and state of container. type Traversal struct { Opts []placement.Option diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go index 0ba21eee3..e4f7a8c50 100644 --- a/pkg/services/object/delete/service.go +++ b/pkg/services/object/delete/service.go @@ -27,11 +27,11 @@ type Option func(*cfg) type NetworkInfo interface { netmap.State - // Must return the lifespan of the tombstones + // TombstoneLifetime must return the lifespan of the tombstones // in the FrostFS epochs. TombstoneLifetime() (uint64, error) - // Returns user ID of the local storage node. Result must not be nil. + // LocalNodeID returns user ID of the local storage node. Result must not be nil. // New tombstone objects will have the result as an owner ID if removal is executed w/o a session. LocalNodeID() user.ID } diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go index f1082dfff..953f82b48 100644 --- a/pkg/services/object/patch/service.go +++ b/pkg/services/object/patch/service.go @@ -26,7 +26,7 @@ func NewService(cfg *objectwriter.Config, } } -// Put calls internal service and returns v2 object streamer. +// Patch calls internal service and returns v2 object streamer. func (s *Service) Patch() (object.PatchObjectStream, error) { nodeKey, err := s.Config.KeyStorage.GetKey(nil) if err != nil { diff --git a/pkg/services/policer/option.go b/pkg/services/policer/option.go index 9dbfd8b9f..336f7a0ab 100644 --- a/pkg/services/policer/option.go +++ b/pkg/services/policer/option.go @@ -143,7 +143,7 @@ func WithPlacementBuilder(v placement.Builder) Option { } } -// WithRemoteObjectHeader returns option to set remote object header receiver of Policer. +// WithRemoteObjectHeaderFunc returns option to set remote object header receiver of Policer. func WithRemoteObjectHeaderFunc(v RemoteObjectHeaderFunc) Option { return func(c *cfg) { c.remoteHeader = v diff --git a/pkg/util/rand/rand.go b/pkg/util/rand/rand.go index 97508f82a..a06296a07 100644 --- a/pkg/util/rand/rand.go +++ b/pkg/util/rand/rand.go @@ -13,7 +13,7 @@ func Uint64() uint64 { return source.Uint64() } -// Uint64 returns a random uint32 value. +// Uint32 returns a random uint32 value. func Uint32() uint32 { return source.Uint32() } diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go index e94ff77ad..22549bc96 100644 --- a/pkg/util/sdnotify/sdnotify.go +++ b/pkg/util/sdnotify/sdnotify.go @@ -24,7 +24,7 @@ var ( errSocketIsNotInitialized = errors.New("socket is not initialized") ) -// Initializes socket with provided name of +// InitSocket initializes socket with provided name of // environment variable. func InitSocket() error { notifySocket := os.Getenv("NOTIFY_SOCKET") From 41038b2ec0fab0d9488f15330e8777f053a28c03 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 14 Oct 2024 18:16:59 +0300 Subject: [PATCH 092/591] [#1431] node: Fix 'empty slice declaration using a literal' Signed-off-by: Alexander Chuprov --- pkg/innerring/processors/alphabet/handlers_test.go | 6 +++--- pkg/local_object_storage/shard/control.go | 2 +- pkg/morph/event/notary_preparator_test.go | 2 +- scripts/populate-metabase/internal/generate.go | 8 ++++---- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go index dfda37472..c7a004b54 100644 --- a/pkg/innerring/processors/alphabet/handlers_test.go +++ b/pkg/innerring/processors/alphabet/handlers_test.go @@ -95,7 +95,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) { t.Parallel() var emission uint64 = 100_000 var index int = 5 - var parsedWallets []util.Uint160 = []util.Uint160{} + var parsedWallets []util.Uint160 alphabetContracts := innerring.NewAlphabetContracts() for i := range index + 1 { @@ -167,7 +167,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) { t.Parallel() var emission uint64 = 100_000 var index int = 5 - var parsedWallets []util.Uint160 = []util.Uint160{} + var parsedWallets []util.Uint160 alphabetContracts := innerring.NewAlphabetContracts() for i := range index + 1 { @@ -176,7 +176,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) { morphClient := &testMorphClient{} - nodes := []netmap.NodeInfo{} + var nodes []netmap.NodeInfo network := &netmap.NetMap{} network.SetNodes(nodes) diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 4f9f25608..62800dbd0 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -368,7 +368,7 @@ func (s *Shard) Close() error { if s.rb != nil { s.rb.Stop(s.log) } - components := []interface{ Close() error }{} + var components []interface{ Close() error } if s.pilorama != nil { components = append(components, s.pilorama) diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go index 4c269bcbd..60ddb4601 100644 --- a/pkg/morph/event/notary_preparator_test.go +++ b/pkg/morph/event/notary_preparator_test.go @@ -25,7 +25,7 @@ var ( alphaKeys keys.PublicKeys wrongAlphaKeys keys.PublicKeys - dummyAlphabetInvocationScript = []byte{} // expected to be empty if generated by Notary Actor, as requester can't fill it in + dummyAlphabetInvocationScript []byte dummyAlphabetInvocationScriptOld = append([]byte{byte(opcode.PUSHDATA1), 64}, make([]byte, 64)...) // expected to be dummy if generated manually wrongDummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64, 1}, make([]byte, 63)...) diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go index d2004b673..8a96dcaaa 100644 --- a/scripts/populate-metabase/internal/generate.go +++ b/scripts/populate-metabase/internal/generate.go @@ -18,7 +18,7 @@ import ( ) func GeneratePayloadPool(count uint, size uint) [][]byte { - pool := [][]byte{} + var pool [][]byte for i := uint(0); i < count; i++ { payload := make([]byte, size) _, _ = rand.Read(payload) @@ -29,7 +29,7 @@ func GeneratePayloadPool(count uint, size uint) [][]byte { } func GenerateAttributePool(count uint) []objectSDK.Attribute { - pool := []objectSDK.Attribute{} + var pool []objectSDK.Attribute for i := uint(0); i < count; i++ { for j := uint(0); j < count; j++ { attr := *objectSDK.NewAttribute() @@ -42,7 +42,7 @@ func GenerateAttributePool(count uint) []objectSDK.Attribute { } func GenerateOwnerPool(count uint) []user.ID { - pool := []user.ID{} + var pool []user.ID for i := uint(0); i < count; i++ { pool = append(pool, usertest.ID()) } @@ -117,7 +117,7 @@ func WithPayloadFromPool(pool [][]byte) ObjectOption { func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption { return func(obj *objectSDK.Object) { - attrs := []objectSDK.Attribute{} + var attrs []objectSDK.Attribute for i := uint(0); i < count; i++ { attrs = append(attrs, pool[rand.Intn(len(pool))]) } From 07ce40e1196a44d305390dcea8e1e0040f6a16d2 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Tue, 15 Oct 2024 12:28:58 +0300 Subject: [PATCH 093/591] [#1430] adm/morph: Add NNS address display in 'deploy' Signed-off-by: Alexander Chuprov --- cmd/frostfs-adm/internal/modules/morph/helper/contract.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go index 2011301d1..eea3b040e 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go @@ -166,5 +166,6 @@ func DeployNNS(c *InitializeContext, method string) error { return fmt.Errorf("can't send deploy transaction: %w", err) } + c.Command.Println("NNS hash:", invokeHash.StringLE()) return c.AwaitTx() } From 90f36693995e1b411094686e4419bb7d11831f35 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 14 Oct 2024 16:07:38 +0300 Subject: [PATCH 094/591] [#1342] network/cache: Add node address to error multiClient Signed-off-by: Alexander Chuprov --- pkg/network/cache/multi.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index 9305c143b..b83cbb217 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -169,15 +169,16 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie var siErr *objectSDK.SplitInfoError var eiErr *objectSDK.ECInfoError + if err != nil { + err = fmt.Errorf("client connection error at %v: %w", addr, err) + x.ReportError(err) + } + success := err == nil || errors.Is(err, context.Canceled) || errors.As(err, &siErr) || errors.As(err, &eiErr) if success || firstErr == nil || errors.Is(firstErr, errRecentlyFailed) { firstErr = err } - if err != nil { - x.ReportError(err) - } - return success }) From b0c5def2d934ed5b79f54fb37160560f576785f4 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 17 Oct 2024 14:16:03 +0300 Subject: [PATCH 095/591] [#1433] shard/test: Use WithDisabledGC() option where possible Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/shard/gc_internal_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go index 3993593ad..11db5e54e 100644 --- a/pkg/local_object_storage/shard/gc_internal_test.go +++ b/pkg/local_object_storage/shard/gc_internal_test.go @@ -73,10 +73,10 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { return pool }), WithGCRemoverSleepInterval(1 * time.Second), + WithDisabledGC(), } sh = New(opts...) - sh.gcCfg.testHookRemover = func(context.Context) gcRunResult { return gcRunResult{} } require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Init(context.Background())) defer func() { require.NoError(t, sh.Close()) }() From b42bcdc6fa6cca2cf8e5a5fbaf2c8cf82f957b37 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 17 Oct 2024 14:37:26 +0300 Subject: [PATCH 096/591] [#1433] services/object: Put object before auxiliary info Consider the following operations ordering: 1. Inhume(with tombstone A) --> add tombstone mark for an object 2. --> new epoch arives 3. --> GCMark is added for a tombstone A, because it is unavailable 4. Put(A) --> return error, because the object already has a GCMark It is possible, and I have successfully reproduced it with a test on the shard level. However, the error is related to the specific _ordering_ of operations with engine. And triggering race-conditions like this is only possible on a shard level currently, so no tests are written. Signed-off-by: Evgenii Stratonikov --- pkg/services/object/common/writer/local.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/services/object/common/writer/local.go b/pkg/services/object/common/writer/local.go index e219b44dd..cf3d03275 100644 --- a/pkg/services/object/common/writer/local.go +++ b/pkg/services/object/common/writer/local.go @@ -32,6 +32,10 @@ type LocalTarget struct { } func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error { + if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil { + return fmt.Errorf("(%T) could not put object to local storage: %w", t, err) + } + switch meta.Type() { case objectSDK.TypeTombstone: err := t.Storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects()) @@ -47,8 +51,5 @@ func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, met // objects that do not change meta storage } - if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil { - return fmt.Errorf("(%T) could not put object to local storage: %w", t, err) - } return nil } From 3304afa9d1f9893ad72bcd9445751798b6558c16 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 8 Oct 2024 15:24:01 +0300 Subject: [PATCH 097/591] [#1422] config: Add multinet config Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config/multinet/config.go | 62 +++++++++++++++++++ .../config/multinet/config_test.go | 52 ++++++++++++++++ config/example/ir.env | 9 +++ config/example/ir.yaml | 15 +++++ config/example/node.env | 10 +++ config/example/node.json | 22 +++++++ config/example/node.yaml | 15 +++++ docs/storage-node-configuration.md | 39 ++++++++++-- 8 files changed, 219 insertions(+), 5 deletions(-) create mode 100644 cmd/frostfs-node/config/multinet/config.go create mode 100644 cmd/frostfs-node/config/multinet/config_test.go diff --git a/cmd/frostfs-node/config/multinet/config.go b/cmd/frostfs-node/config/multinet/config.go new file mode 100644 index 000000000..f598efc51 --- /dev/null +++ b/cmd/frostfs-node/config/multinet/config.go @@ -0,0 +1,62 @@ +package multinet + +import ( + "strconv" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" +) + +const ( + subsection = "multinet" + + FallbackDelayDefault = 300 * time.Millisecond +) + +// Enabled returns the value of "enabled" config parameter from "multinet" section. +func Enabled(c *config.Config) bool { + return config.BoolSafe(c.Sub(subsection), "enabled") +} + +type Subnet struct { + Mask string + SourceIPs []string +} + +// Subnets returns the value of "subnets" config parameter from "multinet" section. +func Subnets(c *config.Config) []Subnet { + var result []Subnet + sub := c.Sub(subsection).Sub("subnets") + for i := 0; ; i++ { + s := sub.Sub(strconv.FormatInt(int64(i), 10)) + mask := config.StringSafe(s, "mask") + if mask == "" { + break + } + sourceIPs := config.StringSliceSafe(s, "source_ips") + result = append(result, Subnet{ + Mask: mask, + SourceIPs: sourceIPs, + }) + } + return result +} + +// Balancer returns the value of "balancer" config parameter from "multinet" section. +func Balancer(c *config.Config) string { + return config.StringSafe(c.Sub(subsection), "balancer") +} + +// Restrict returns the value of "restrict" config parameter from "multinet" section. +func Restrict(c *config.Config) bool { + return config.BoolSafe(c.Sub(subsection), "restrict") +} + +// FallbackDelay returns the value of "fallback_delay" config parameter from "multinet" section. +func FallbackDelay(c *config.Config) time.Duration { + fd := config.DurationSafe(c.Sub(subsection), "fallback_delay") + if fd != 0 { // negative value means no fallback + return fd + } + return FallbackDelayDefault +} diff --git a/cmd/frostfs-node/config/multinet/config_test.go b/cmd/frostfs-node/config/multinet/config_test.go new file mode 100644 index 000000000..5f7dc6d53 --- /dev/null +++ b/cmd/frostfs-node/config/multinet/config_test.go @@ -0,0 +1,52 @@ +package multinet + +import ( + "testing" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "github.com/stretchr/testify/require" +) + +func TestMultinetSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + empty := configtest.EmptyConfig() + require.Equal(t, false, Enabled(empty)) + require.Equal(t, ([]Subnet)(nil), Subnets(empty)) + require.Equal(t, "", Balancer(empty)) + require.Equal(t, false, Restrict(empty)) + require.Equal(t, FallbackDelayDefault, FallbackDelay(empty)) + }) + + const path = "../../../../config/example/node" + + fileConfigTest := func(c *config.Config) { + require.Equal(t, true, Enabled(c)) + require.Equal(t, []Subnet{ + { + Mask: "192.168.219.174/24", + SourceIPs: []string{ + "192.168.218.185", + "192.168.219.185", + }, + }, + { + Mask: "10.78.70.74/24", + SourceIPs: []string{ + "10.78.70.185", + "10.78.71.185", + }, + }, + }, Subnets(c)) + require.Equal(t, "roundrobin", Balancer(c)) + require.Equal(t, false, Restrict(c)) + require.Equal(t, 350*time.Millisecond, FallbackDelay(c)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) +} diff --git a/config/example/ir.env b/config/example/ir.env index 7234a4b32..ebd91c243 100644 --- a/config/example/ir.env +++ b/config/example/ir.env @@ -80,3 +80,12 @@ FROSTFS_IR_PPROF_MUTEX_RATE=10000 FROSTFS_IR_PROMETHEUS_ENABLED=true FROSTFS_IR_PROMETHEUS_ADDRESS=localhost:9090 FROSTFS_IR_PROMETHEUS_SHUTDOWN_TIMEOUT=30s + +FROSTFS_MULTINET_ENABLED=true +FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24" +FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185" +FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24" +FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185" +FROSTFS_MULTINET_BALANCER=roundrobin +FROSTFS_MULTINET_RESTRICT=false +FROSTFS_MULTINET_FALLBACK_DELAY=350ms diff --git a/config/example/ir.yaml b/config/example/ir.yaml index 4c64f088b..49f9fd324 100644 --- a/config/example/ir.yaml +++ b/config/example/ir.yaml @@ -123,3 +123,18 @@ prometheus: systemdnotify: enabled: true + +multinet: + enabled: true + subnets: + - mask: 192.168.219.174/24 + source_ips: + - 192.168.218.185 + - 192.168.219.185 + - mask: 10.78.70.74/24 + source_ips: + - 10.78.70.185 + - 10.78.71.185 + balancer: roundrobin + restrict: false + fallback_delay: 350ms diff --git a/config/example/node.env b/config/example/node.env index 6618a981a..580d343fb 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -206,3 +206,13 @@ FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824 # AUDIT section FROSTFS_AUDIT_ENABLED=true + +# MULTINET section +FROSTFS_MULTINET_ENABLED=true +FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24" +FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185" +FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24" +FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185" +FROSTFS_MULTINET_BALANCER=roundrobin +FROSTFS_MULTINET_RESTRICT=false +FROSTFS_MULTINET_FALLBACK_DELAY=350ms diff --git a/config/example/node.json b/config/example/node.json index 0d100ed80..3470d2d12 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -264,5 +264,27 @@ }, "audit": { "enabled": true + }, + "multinet": { + "enabled": true, + "subnets": [ + { + "mask": "192.168.219.174/24", + "source_ips": [ + "192.168.218.185", + "192.168.219.185" + ] + }, + { + "mask": "10.78.70.74/24", + "source_ips":[ + "10.78.70.185", + "10.78.71.185" + ] + } + ], + "balancer": "roundrobin", + "restrict": false, + "fallback_delay": "350ms" } } diff --git a/config/example/node.yaml b/config/example/node.yaml index 2a80fba18..2a963fc0f 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -240,3 +240,18 @@ runtime: audit: enabled: true + +multinet: + enabled: true + subnets: + - mask: 192.168.219.174/24 + source_ips: + - 192.168.218.185 + - 192.168.219.185 + - mask: 10.78.70.74/24 + source_ips: + - 10.78.70.185 + - 10.78.71.185 + balancer: roundrobin + restrict: false + fallback_delay: 350ms diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index c74695e2b..2b94400df 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -25,8 +25,8 @@ There are some custom types used for brevity: | `replicator` | [Replicator service configuration](#replicator-section) | | `storage` | [Storage engine configuration](#storage-section) | | `runtime` | [Runtime configuration](#runtime-section) | -| `audit` | [Audit configuration](#audit-section) | - +| `audit` | [Audit configuration](#audit-section) | +| `multinet` | [Multinet configuration](#multinet-section) | # `control` section ```yaml @@ -435,6 +435,35 @@ audit: enabled: true ``` -| Parameter | Type | Default value | Description | -|---------------------|--------|---------------|---------------------------------------------------| -| `soft_memory_limit` | `bool` | false | If `true` then audit event logs will be recorded. | +| Parameter | Type | Default value | Description | +|-----------|--------|---------------|---------------------------------------------------| +| `enabled` | `bool` | false | If `true` then audit event logs will be recorded. | + + +# `multinet` section +Contains multinet parameters. + +```yaml +multinet: + enabled: true + subnets: + - mask: 192.168.219.174/24 + source_ips: + - 192.168.218.185 + - 192.168.219.185 + - mask: 10.78.70.74/24 + source_ips: + - 10.78.70.185 + - 10.78.71.185 + balancer: roundrobin + restrict: false + fallback_delay: 350ms +``` + +| Parameter | Type | Default value | Description | +| ---------------- | ---------- | ------------- | -------------------------------------------------------------------------------------------------------------------------- | +| `enabled` | `bool` | false | If `true` then source-based routing is enabled. | +| `subnets` | `subnet` | empty | Resulting subnets. | +| `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". | +| `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. | +| `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. | From 74db7352653b67e67e9345e0659fd37047fec710 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 8 Oct 2024 17:25:37 +0300 Subject: [PATCH 098/591] [#1422] node: Add dialer source to config Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 30 +++++++++++++++ go.mod | 1 + go.sum | 2 + internal/logs/logs.go | 1 + internal/net/config.go | 66 +++++++++++++++++++++++++++++++++ internal/net/dial_target.go | 54 +++++++++++++++++++++++++++ internal/net/dialer.go | 35 ++++++++++++++++++ internal/net/dialer_source.go | 69 +++++++++++++++++++++++++++++++++++ 8 files changed, 258 insertions(+) create mode 100644 internal/net/config.go create mode 100644 internal/net/dial_target.go create mode 100644 internal/net/dialer.go create mode 100644 internal/net/dialer_source.go diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 3c7e310b4..dc1bad485 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -26,12 +26,14 @@ import ( fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger" morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/multinet" nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" + internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" @@ -436,6 +438,8 @@ type shared struct { metricsCollector *metrics.NodeMetrics metricsSvc *objectService.MetricCollector + + dialerSource *internalNet.DialerSource } // dynamicConfiguration stores parameters of the @@ -760,6 +764,9 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path()) fatalOnErr(err) + ds, err := internalNet.NewDialerSource(internalNetConfig(appCfg)) + fatalOnErr(err) + cacheOpts := cache.ClientCacheOpts{ DialTimeout: apiclientconfig.DialTimeout(appCfg), StreamTimeout: apiclientconfig.StreamTimeout(appCfg), @@ -778,9 +785,27 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt putClientCache: cache.NewSDKClientCache(cacheOpts), persistate: persistate, metricsCollector: metrics.NewNodeMetrics(), + dialerSource: ds, } } +func internalNetConfig(appCfg *config.Config) internalNet.Config { + result := internalNet.Config{ + Enabled: multinet.Enabled(appCfg), + Balancer: multinet.Balancer(appCfg), + Restrict: multinet.Restrict(appCfg), + FallbackDelay: multinet.FallbackDelay(appCfg), + } + sn := multinet.Subnets(appCfg) + for _, s := range sn { + result.Subnets = append(result.Subnets, internalNet.Subnet{ + Prefix: s.Mask, + SourceIPs: s.SourceIPs, + }) + } + return result +} + func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap { netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize) fatalOnErr(err) @@ -1336,6 +1361,11 @@ func (c *cfg) reloadConfig(ctx context.Context) { } } + if err := c.dialerSource.Update(internalNetConfig(c.appCfg)); err != nil { + c.log.Error(logs.FailedToUpdateMultinetConfiguration, zap.Error(err)) + return + } + c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) } diff --git a/go.mod b/go.mod index 1468c12b2..a84d3122a 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509 git.frostfs.info/TrueCloudLab/hrw v1.2.1 + git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 git.frostfs.info/TrueCloudLab/tzhash v1.8.0 git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 diff --git a/go.sum b/go.sum index 5ce81807a..43d53aa40 100644 --- a/go.sum +++ b/go.sum @@ -14,6 +14,8 @@ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509/go.mod h1:jmb7yxzZota9jWbC10p+7YR+6wwJPBj7J/Fl5VDkXys= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= +git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= +git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg= git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ= diff --git a/internal/logs/logs.go b/internal/logs/logs.go index b4bc31b0c..0e9d58f32 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -523,4 +523,5 @@ const ( WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty" BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file" WritecacheCantGetObject = "can't get an object from fstree" + FailedToUpdateMultinetConfiguration = "failed to update multinet configuration" ) diff --git a/internal/net/config.go b/internal/net/config.go new file mode 100644 index 000000000..10450db23 --- /dev/null +++ b/internal/net/config.go @@ -0,0 +1,66 @@ +package net + +import ( + "errors" + "fmt" + "net/netip" + "slices" + "time" + + "git.frostfs.info/TrueCloudLab/multinet" +) + +var errEmptySourceIPList = errors.New("empty source IP list") + +type Subnet struct { + Prefix string + SourceIPs []string +} + +type Config struct { + Enabled bool + Subnets []Subnet + Balancer string + Restrict bool + FallbackDelay time.Duration +} + +func (c Config) toMultinetConfig() (multinet.Config, error) { + var subnets []multinet.Subnet + for _, s := range c.Subnets { + var ms multinet.Subnet + p, err := netip.ParsePrefix(s.Prefix) + if err != nil { + return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err) + } + ms.Prefix = p + for _, ip := range s.SourceIPs { + addr, err := netip.ParseAddr(ip) + if err != nil { + return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err) + } + ms.SourceIPs = append(ms.SourceIPs, addr) + } + if len(ms.SourceIPs) == 0 { + return multinet.Config{}, errEmptySourceIPList + } + subnets = append(subnets, ms) + } + return multinet.Config{ + Subnets: subnets, + Balancer: multinet.BalancerType(c.Balancer), + Restrict: c.Restrict, + FallbackDelay: c.FallbackDelay, + Dialer: newDefaulDialer(), + }, nil +} + +func (c Config) equals(other Config) bool { + return c.Enabled == other.Enabled && + slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool { + return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs) + }) && + c.Balancer == other.Balancer && + c.Restrict == other.Restrict && + c.FallbackDelay == other.FallbackDelay +} diff --git a/internal/net/dial_target.go b/internal/net/dial_target.go new file mode 100644 index 000000000..6265f1860 --- /dev/null +++ b/internal/net/dial_target.go @@ -0,0 +1,54 @@ +// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go + +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package net + +import ( + "net/url" + "strings" +) + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/internal/net/dialer.go b/internal/net/dialer.go new file mode 100644 index 000000000..4537490f6 --- /dev/null +++ b/internal/net/dialer.go @@ -0,0 +1,35 @@ +package net + +import ( + "context" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +type Dialer interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +func newDefaulDialer() net.Dialer { + // From `grpc.WithContextDialer` comment: + // + // Note: All supported releases of Go (as of December 2023) override the OS + // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive + // with OS defaults for keepalive time and interval, use a net.Dialer that sets + // the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket + // option to true from the Control field. For a concrete example of how to do + // this, see internal.NetDialerWithTCPKeepalive(). + // + // https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432 + return net.Dialer{ + KeepAlive: time.Duration(-1), + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/internal/net/dialer_source.go b/internal/net/dialer_source.go new file mode 100644 index 000000000..e6a142a08 --- /dev/null +++ b/internal/net/dialer_source.go @@ -0,0 +1,69 @@ +package net + +import ( + "context" + "net" + "sync" + + "git.frostfs.info/TrueCloudLab/multinet" +) + +type DialerSource struct { + guard sync.RWMutex + + c Config + + md multinet.Dialer +} + +func NewDialerSource(c Config) (*DialerSource, error) { + result := &DialerSource{} + if err := result.build(c); err != nil { + return nil, err + } + return result, nil +} + +func (s *DialerSource) build(c Config) error { + if c.Enabled { + mc, err := c.toMultinetConfig() + if err != nil { + return err + } + md, err := multinet.NewDialer(mc) + if err != nil { + return err + } + s.md = md + s.c = c + return nil + } + s.md = nil + s.c = c + return nil +} + +// GrpcContextDialer returns grpc.WithContextDialer func. +// Returns nil if multinet disabled. +func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) { + s.guard.RLock() + defer s.guard.RUnlock() + + if s.c.Enabled { + return func(ctx context.Context, address string) (net.Conn, error) { + network, address := parseDialTarget(address) + return s.md.DialContext(ctx, network, address) + } + } + return nil +} + +func (s *DialerSource) Update(c Config) error { + s.guard.Lock() + defer s.guard.Unlock() + + if s.c.equals(c) { + return nil + } + return s.build(c) +} From 6c96cc2af6eb9cb64e747c5b758fbec4d90c7287 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 9 Oct 2024 11:11:44 +0300 Subject: [PATCH 099/591] [#1422] node: Use dialer source for SDK cache Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 1 + internal/net/dialer.go | 6 +++++- pkg/network/cache/client.go | 2 ++ pkg/network/cache/multi.go | 25 ++++++++++++++----------- 4 files changed, 22 insertions(+), 12 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index dc1bad485..d44597857 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -773,6 +773,7 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt Key: &key.PrivateKey, AllowExternal: apiclientconfig.AllowExternal(appCfg), ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg), + DialerSource: ds, } return shared{ diff --git a/internal/net/dialer.go b/internal/net/dialer.go index 4537490f6..daf0f815f 100644 --- a/internal/net/dialer.go +++ b/internal/net/dialer.go @@ -13,6 +13,10 @@ type Dialer interface { DialContext(ctx context.Context, network, address string) (net.Conn, error) } +func DialContextTCP(ctx context.Context, address string, d Dialer) (net.Conn, error) { + return d.DialContext(ctx, "tcp", address) +} + func newDefaulDialer() net.Dialer { // From `grpc.WithContextDialer` comment: // @@ -28,7 +32,7 @@ func newDefaulDialer() net.Dialer { KeepAlive: time.Duration(-1), Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { - unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + _ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) }) }, } diff --git a/pkg/network/cache/client.go b/pkg/network/cache/client.go index 371d3c76f..63ae0bfdb 100644 --- a/pkg/network/cache/client.go +++ b/pkg/network/cache/client.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" ) @@ -25,6 +26,7 @@ type ( Key *ecdsa.PrivateKey ResponseCallback func(client.ResponseMetaInfo) error AllowExternal bool + DialerSource *net.DialerSource } ) diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index b83cbb217..e936ead65 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -60,18 +60,21 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address prmInit.Key = *x.opts.Key } + grpcOpts := []grpc.DialOption{ + grpc.WithChainUnaryInterceptor( + metrics.NewUnaryClientInterceptor(), + tracing.NewUnaryClientInteceptor(), + ), + grpc.WithChainStreamInterceptor( + metrics.NewStreamClientInterceptor(), + tracing.NewStreamClientInterceptor(), + ), + grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()), + } + prmDial := client.PrmDial{ - Endpoint: addr.URIAddr(), - GRPCDialOptions: []grpc.DialOption{ - grpc.WithChainUnaryInterceptor( - metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInteceptor(), - ), - grpc.WithChainStreamInterceptor( - metrics.NewStreamClientInterceptor(), - tracing.NewStreamClientInterceptor(), - ), - }, + Endpoint: addr.URIAddr(), + GRPCDialOptions: grpcOpts, } if x.opts.DialTimeout > 0 { prmDial.DialTimeout = x.opts.DialTimeout From e314f328c4806bf1b34b6e3c31abdc4afdfaaac4 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 9 Oct 2024 11:18:24 +0300 Subject: [PATCH 100/591] [#1422] tree: Use dialer source for tree service connections Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/tree.go | 1 + pkg/services/tree/cache.go | 6 +++++- pkg/services/tree/options.go | 8 ++++++++ pkg/services/tree/service.go | 2 +- 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index 192f08471..f188e2fbc 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -67,6 +67,7 @@ func initTreeService(c *cfg) { tree.WithAPELocalOverrideStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage()), tree.WithAPEMorphRuleStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage()), tree.WithNetmapState(c.cfgNetmap.state), + tree.WithDialerSource(c.dialerSource), ) c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index 38501b852..e490cb855 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -8,6 +8,7 @@ import ( "sync" "time" + internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" @@ -21,6 +22,7 @@ type clientCache struct { sync.Mutex simplelru.LRU[string, cacheItem] key *ecdsa.PrivateKey + ds *internalNet.DialerSource } type cacheItem struct { @@ -36,7 +38,7 @@ const ( var errRecentlyFailed = errors.New("client has recently failed") -func (c *clientCache) init(pk *ecdsa.PrivateKey) { +func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) { l, _ := simplelru.NewLRU(defaultClientCacheSize, func(_ string, value cacheItem) { if conn := value.cc; conn != nil { _ = conn.Close() @@ -44,6 +46,7 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey) { }) c.LRU = *l c.key = pk + c.ds = ds } func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) { @@ -99,6 +102,7 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (* metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), ), + grpc.WithContextDialer(c.ds.GrpcContextDialer()), } if !netAddr.IsTLSEnabled() { diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go index 1db5607f6..1633ae557 100644 --- a/pkg/services/tree/options.go +++ b/pkg/services/tree/options.go @@ -4,6 +4,7 @@ import ( "crypto/ecdsa" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -45,6 +46,7 @@ type cfg struct { morphChainStorage policyengine.MorphRuleChainStorageReader metrics MetricsRegister + ds *net.DialerSource } // Option represents configuration option for a tree service. @@ -161,3 +163,9 @@ func WithNetmapState(state netmap.State) Option { c.state = state } } + +func WithDialerSource(ds *net.DialerSource) Option { + return func(c *cfg) { + c.ds = ds + } +} diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 60bb1a6ad..2cb2af294 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -65,7 +65,7 @@ func New(opts ...Option) *Service { s.log = &logger.Logger{Logger: zap.NewNop()} } - s.cache.init(s.key) + s.cache.init(s.key, s.ds) s.closeCh = make(chan struct{}) s.replicateCh = make(chan movePair, s.replicatorChannelCapacity) s.replicateLocalCh = make(chan applyOp) From 5b653aa65fd65759ee733fd552d17870c6660b13 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 9 Oct 2024 11:34:36 +0300 Subject: [PATCH 101/591] [#1422] morph: Drop single client as not used Signed-off-by: Dmitrii Stepanov --- pkg/morph/client/constructor.go | 59 +++++++++------------------------ 1 file changed, 15 insertions(+), 44 deletions(-) diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go index 78cb3e82f..60b5b4b97 100644 --- a/pkg/morph/client/constructor.go +++ b/pkg/morph/client/constructor.go @@ -41,8 +41,6 @@ type cfg struct { endpoints []Endpoint - singleCli *rpcclient.WSClient // neo-go client for single client mode - inactiveModeCb Callback switchInterval time.Duration @@ -124,40 +122,24 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er var err error var act *actor.Actor - if cfg.singleCli != nil { - // return client in single RPC node mode that uses - // predefined WS client - // - // in case of the closing web socket connection: - // if extra endpoints were provided via options, - // they will be used in switch process, otherwise - // inactive mode will be enabled - cli.client = cfg.singleCli - - act, err = newActor(cfg.singleCli, acc, *cfg) + var endpoint Endpoint + for cli.endpoints.curr, endpoint = range cli.endpoints.list { + cli.client, act, err = cli.newCli(ctx, endpoint) if err != nil { - return nil, fmt.Errorf("could not create RPC actor: %w", err) - } - } else { - var endpoint Endpoint - for cli.endpoints.curr, endpoint = range cli.endpoints.list { - cli.client, act, err = cli.newCli(ctx, endpoint) - if err != nil { - cli.logger.Warn(logs.FrostFSIRCouldntCreateRPCClientForEndpoint, - zap.Error(err), zap.String("endpoint", endpoint.Address)) - } else { - cli.logger.Info(logs.FrostFSIRCreatedRPCClientForEndpoint, - zap.String("endpoint", endpoint.Address)) - if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 { - cli.switchIsActive.Store(true) - go cli.switchToMostPrioritized(ctx) - } - break + cli.logger.Warn(logs.FrostFSIRCouldntCreateRPCClientForEndpoint, + zap.Error(err), zap.String("endpoint", endpoint.Address)) + } else { + cli.logger.Info(logs.FrostFSIRCreatedRPCClientForEndpoint, + zap.String("endpoint", endpoint.Address)) + if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 { + cli.switchIsActive.Store(true) + go cli.switchToMostPrioritized(ctx) } + break } - if cli.client == nil { - return nil, ErrNoHealthyEndpoint - } + } + if cli.client == nil { + return nil, ErrNoHealthyEndpoint } cli.setActor(act) @@ -285,17 +267,6 @@ func WithEndpoints(endpoints ...Endpoint) Option { } } -// WithSingleClient returns a client constructor option -// that specifies single neo-go client and forces Client -// to use it for requests. -// -// Passed client must already be initialized. -func WithSingleClient(cli *rpcclient.WSClient) Option { - return func(c *cfg) { - c.singleCli = cli - } -} - // WithConnLostCallback return a client constructor option // that specifies a callback that is called when Client // unsuccessfully tried to connect to all the specified From 67798bb50e57ecf77a6797a3a02751f54a2e02ee Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 9 Oct 2024 11:48:16 +0300 Subject: [PATCH 102/591] [#1422] mod: Bump neoneo-go version Signed-off-by: Dmitrii Stepanov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a84d3122a..aefe2889a 100644 --- a/go.mod +++ b/go.mod @@ -133,4 +133,4 @@ require ( rsc.io/tmplfunc v0.0.3 // indirect ) -replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 +replace github.com/nspcc-dev/neo-go => git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 diff --git a/go.sum b/go.sum index 43d53aa40..4d44079d4 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8l git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI= -git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM= -git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg= +git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY= +git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg= git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ= git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA= From fbdfd503e4c2475b01728dbfc21c269a443c93f2 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 9 Oct 2024 11:48:59 +0300 Subject: [PATCH 103/591] [#1422] morph: Add dialer source support Signed-off-by: Dmitrii Stepanov --- internal/net/dialer_source.go | 14 ++++++++++++++ pkg/morph/client/constructor.go | 15 +++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/internal/net/dialer_source.go b/internal/net/dialer_source.go index e6a142a08..3d94dedc7 100644 --- a/internal/net/dialer_source.go +++ b/internal/net/dialer_source.go @@ -58,6 +58,20 @@ func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Co return nil } +// NetContextDialer returns net.DialContext dial function. +// Returns nil if multinet disabled. +func (s *DialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) { + s.guard.RLock() + defer s.guard.RUnlock() + + if s.c.Enabled { + return func(ctx context.Context, network, address string) (net.Conn, error) { + return s.md.DialContext(ctx, network, address) + } + } + return nil +} + func (s *DialerSource) Update(c Config) error { s.guard.Lock() defer s.guard.Unlock() diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go index 60b5b4b97..2313222f0 100644 --- a/pkg/morph/client/constructor.go +++ b/pkg/morph/client/constructor.go @@ -4,10 +4,12 @@ import ( "context" "errors" "fmt" + "net" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" + internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" lru "github.com/hashicorp/golang-lru/v2" @@ -46,6 +48,8 @@ type cfg struct { switchInterval time.Duration morphCacheMetrics metrics.MorphCacheMetrics + + dialerSource *internalNet.DialerSource } const ( @@ -153,10 +157,15 @@ func (c *Client) newCli(ctx context.Context, endpoint Endpoint) (*rpcclient.WSCl if err != nil { return nil, nil, fmt.Errorf("read mtls certificates: %w", err) } + var netDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + if c.cfg.dialerSource != nil { // TODO fix after IR + netDialContext = c.cfg.dialerSource.NetContextDialer() + } cli, err := rpcclient.NewWS(ctx, endpoint.Address, rpcclient.WSOptions{ Options: rpcclient.Options{ DialTimeout: c.cfg.dialTimeout, TLSClientConfig: cfg, + NetDialContext: netDialContext, }, }) if err != nil { @@ -291,3 +300,9 @@ func WithMorphCacheMetrics(morphCacheMetrics metrics.MorphCacheMetrics) Option { c.morphCacheMetrics = morphCacheMetrics } } + +func WithDialerSource(ds *internalNet.DialerSource) Option { + return func(c *cfg) { + c.dialerSource = ds + } +} From f7caef355bee787ac34f2cd44a4f9c4eb32177a3 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 9 Oct 2024 11:49:23 +0300 Subject: [PATCH 104/591] [#1422] node: Use dialer source for morph Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/morph.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 1bfcb8ac9..f93f233eb 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -48,6 +48,7 @@ func initMorphComponents(ctx context.Context, c *cfg) { }), client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)), client.WithMorphCacheMetrics(c.metricsCollector.MorphCacheMetrics()), + client.WithDialerSource(c.dialerSource), ) if err != nil { c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient, From ef38420623c64e12aa00b7d4072bdf6b08dec247 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 14 Oct 2024 13:51:21 +0300 Subject: [PATCH 105/591] [#1422] ir: Add dialer source Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-ir/defaults.go | 10 ++++++++++ pkg/innerring/innerring.go | 29 +++++++++++++++++++++++++++++ pkg/morph/client/constructor.go | 7 +------ 3 files changed, 40 insertions(+), 6 deletions(-) diff --git a/cmd/frostfs-ir/defaults.go b/cmd/frostfs-ir/defaults.go index 899918d22..9b775252f 100644 --- a/cmd/frostfs-ir/defaults.go +++ b/cmd/frostfs-ir/defaults.go @@ -48,6 +48,8 @@ func defaultConfiguration(cfg *viper.Viper) { cfg.SetDefault("node.kludge_compatibility_mode", false) cfg.SetDefault("audit.enabled", false) + + setMultinetDefaults(cfg) } func setControlDefaults(cfg *viper.Viper) { @@ -131,3 +133,11 @@ func setMorphDefaults(cfg *viper.Viper) { cfg.SetDefault("morph.validators", []string{}) cfg.SetDefault("morph.switch_interval", 2*time.Minute) } + +func setMultinetDefaults(cfg *viper.Viper) { + cfg.SetDefault("multinet.enabled", false) + cfg.SetDefault("multinet.balancer", "") + cfg.SetDefault("multinet.restrict", false) + cfg.SetDefault("multinet.fallback_delay", "0s") + cfg.SetDefault("multinet.subnets", "") +} diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index 53a07e36c..a4a52edec 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" + internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/config" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" @@ -486,6 +487,12 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c return nil, fmt.Errorf("%s chain client endpoints not provided", p.name) } + nc := parseMultinetConfig(p.cfg) + ds, err := internalNet.NewDialerSource(nc) + if err != nil { + return nil, fmt.Errorf("dialer source: %w", err) + } + return client.New( ctx, p.key, @@ -498,6 +505,7 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c }), client.WithSwitchInterval(p.cfg.GetDuration(p.name+".switch_interval")), client.WithMorphCacheMetrics(p.morphCacheMetric), + client.WithDialerSource(ds), ) } @@ -542,6 +550,27 @@ func parseWalletAddressesFromStrings(wallets []string) ([]util.Uint160, error) { return extraWallets, nil } +func parseMultinetConfig(cfg *viper.Viper) internalNet.Config { + nc := internalNet.Config{ + Enabled: cfg.GetBool("multinet.enabled"), + Balancer: cfg.GetString("multinet.balancer"), + Restrict: cfg.GetBool("multinet.restrict"), + FallbackDelay: cfg.GetDuration("multinet.fallback_delay"), + } + for i := 0; ; i++ { + mask := cfg.GetString(fmt.Sprintf("multinet.subnets.%d.mask", i)) + if mask == "" { + break + } + sourceIPs := cfg.GetStringSlice(fmt.Sprintf("multinet.subnets.%d.source_ips", i)) + nc.Subnets = append(nc.Subnets, internalNet.Subnet{ + Prefix: mask, + SourceIPs: sourceIPs, + }) + } + return nc +} + func (s *Server) initConfigFromBlockchain() error { // get current epoch epoch, err := s.netmapClient.Epoch() diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go index 2313222f0..a8efa76e7 100644 --- a/pkg/morph/client/constructor.go +++ b/pkg/morph/client/constructor.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "net" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -157,15 +156,11 @@ func (c *Client) newCli(ctx context.Context, endpoint Endpoint) (*rpcclient.WSCl if err != nil { return nil, nil, fmt.Errorf("read mtls certificates: %w", err) } - var netDialContext func(ctx context.Context, network, addr string) (net.Conn, error) - if c.cfg.dialerSource != nil { // TODO fix after IR - netDialContext = c.cfg.dialerSource.NetContextDialer() - } cli, err := rpcclient.NewWS(ctx, endpoint.Address, rpcclient.WSOptions{ Options: rpcclient.Options{ DialTimeout: c.cfg.dialTimeout, TLSClientConfig: cfg, - NetDialContext: netDialContext, + NetDialContext: c.cfg.dialerSource.NetContextDialer(), }, }) if err != nil { From 2d064d0bd87c9627aaeacb9c160cfe2bb029c56a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 14 Oct 2024 14:22:27 +0300 Subject: [PATCH 106/591] [#1422] morph: Resolve funlen linter Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/morph.go | 84 ++++++++++++++++++++------------------- 1 file changed, 44 insertions(+), 40 deletions(-) diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index f93f233eb..197e50371 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -29,6 +29,50 @@ const ( ) func initMorphComponents(ctx context.Context, c *cfg) { + initMorphClient(ctx, c) + + lookupScriptHashesInNNS(c) // smart contract auto negotiation + + if c.cfgMorph.notaryEnabled { + err := c.cfgMorph.client.EnableNotarySupport( + client.WithProxyContract( + c.cfgMorph.proxyScriptHash, + ), + ) + fatalOnErr(err) + } + + c.log.Info(logs.FrostFSNodeNotarySupport, + zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled), + ) + + wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary()) + fatalOnErr(err) + + var netmapSource netmap.Source + + c.cfgMorph.containerCacheSize = morphconfig.ContainerCacheSize(c.appCfg) + c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg) + + if c.cfgMorph.cacheTTL == 0 { + msPerBlock, err := c.cfgMorph.client.MsPerBlock() + fatalOnErr(err) + c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond + c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL)) + } + + if c.cfgMorph.cacheTTL < 0 { + netmapSource = wrap + } else { + // use RPC node as source of netmap (with caching) + netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap) + } + + c.netMapSource = netmapSource + c.cfgNetmap.wrapper = wrap +} + +func initMorphClient(ctx context.Context, c *cfg) { addresses := morphconfig.RPCEndpoint(c.appCfg) // Morph client stable-sorts endpoints by priority. Shuffle here to randomize @@ -70,46 +114,6 @@ func initMorphComponents(ctx context.Context, c *cfg) { c.cfgMorph.client = cli c.cfgMorph.notaryEnabled = cli.ProbeNotary() - - lookupScriptHashesInNNS(c) // smart contract auto negotiation - - if c.cfgMorph.notaryEnabled { - err = c.cfgMorph.client.EnableNotarySupport( - client.WithProxyContract( - c.cfgMorph.proxyScriptHash, - ), - ) - fatalOnErr(err) - } - - c.log.Info(logs.FrostFSNodeNotarySupport, - zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled), - ) - - wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary()) - fatalOnErr(err) - - var netmapSource netmap.Source - - c.cfgMorph.containerCacheSize = morphconfig.ContainerCacheSize(c.appCfg) - c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg) - - if c.cfgMorph.cacheTTL == 0 { - msPerBlock, err := c.cfgMorph.client.MsPerBlock() - fatalOnErr(err) - c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond - c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL)) - } - - if c.cfgMorph.cacheTTL < 0 { - netmapSource = wrap - } else { - // use RPC node as source of netmap (with caching) - netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap) - } - - c.netMapSource = netmapSource - c.cfgNetmap.wrapper = wrap } func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { From c0a2f20eee5e6279e71cfc3d83f8bb89d0a2ef75 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 15 Oct 2024 14:46:00 +0300 Subject: [PATCH 107/591] [#1422] multinet: Add metrics Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 11 +++++++---- internal/metrics/consts.go | 2 ++ internal/metrics/innerring.go | 6 ++++++ internal/metrics/multinet.go | 35 +++++++++++++++++++++++++++++++++ internal/metrics/node.go | 6 ++++++ internal/net/config.go | 3 +++ internal/net/event_handler.go | 29 +++++++++++++++++++++++++++ pkg/innerring/initialization.go | 1 + pkg/innerring/innerring.go | 6 ++++-- 9 files changed, 93 insertions(+), 6 deletions(-) create mode 100644 internal/metrics/multinet.go create mode 100644 internal/net/event_handler.go diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index d44597857..9d2b77210 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -764,7 +764,9 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path()) fatalOnErr(err) - ds, err := internalNet.NewDialerSource(internalNetConfig(appCfg)) + nodeMetrics := metrics.NewNodeMetrics() + + ds, err := internalNet.NewDialerSource(internalNetConfig(appCfg, nodeMetrics.MultinetMetrics())) fatalOnErr(err) cacheOpts := cache.ClientCacheOpts{ @@ -785,17 +787,18 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt bgClientCache: cache.NewSDKClientCache(cacheOpts), putClientCache: cache.NewSDKClientCache(cacheOpts), persistate: persistate, - metricsCollector: metrics.NewNodeMetrics(), + metricsCollector: nodeMetrics, dialerSource: ds, } } -func internalNetConfig(appCfg *config.Config) internalNet.Config { +func internalNetConfig(appCfg *config.Config, m metrics.MultinetMetrics) internalNet.Config { result := internalNet.Config{ Enabled: multinet.Enabled(appCfg), Balancer: multinet.Balancer(appCfg), Restrict: multinet.Restrict(appCfg), FallbackDelay: multinet.FallbackDelay(appCfg), + Metrics: m, } sn := multinet.Subnets(appCfg) for _, s := range sn { @@ -1362,7 +1365,7 @@ func (c *cfg) reloadConfig(ctx context.Context) { } } - if err := c.dialerSource.Update(internalNetConfig(c.appCfg)); err != nil { + if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil { c.log.Error(logs.FailedToUpdateMultinetConfiguration, zap.Error(err)) return } diff --git a/internal/metrics/consts.go b/internal/metrics/consts.go index 3aa51c0f0..cb165de69 100644 --- a/internal/metrics/consts.go +++ b/internal/metrics/consts.go @@ -22,6 +22,7 @@ const ( grpcServerSubsystem = "grpc_server" policerSubsystem = "policer" commonCacheSubsystem = "common_cache" + multinetSubsystem = "multinet" successLabel = "success" shardIDLabel = "shard_id" @@ -41,6 +42,7 @@ const ( endpointLabel = "endpoint" hitLabel = "hit" cacheLabel = "cache" + sourceIPLabel = "source_ip" readWriteMode = "READ_WRITE" readOnlyMode = "READ_ONLY" diff --git a/internal/metrics/innerring.go b/internal/metrics/innerring.go index f3f529d05..d0cb8131f 100644 --- a/internal/metrics/innerring.go +++ b/internal/metrics/innerring.go @@ -17,6 +17,7 @@ type InnerRingServiceMetrics struct { eventDuration *prometheus.HistogramVec morphCacheMetrics *morphCacheMetrics logMetrics logger.LogMetrics + multinet *multinetMetrics // nolint: unused appInfo *ApplicationInfo } @@ -51,6 +52,7 @@ func NewInnerRingMetrics() *InnerRingServiceMetrics { morphCacheMetrics: newMorphCacheMetrics(innerRingNamespace), appInfo: NewApplicationInfo(misc.Version), logMetrics: logger.NewLogMetrics(innerRingNamespace), + multinet: newMultinetMetrics(innerRingNamespace), } } @@ -78,3 +80,7 @@ func (m *InnerRingServiceMetrics) MorphCacheMetrics() MorphCacheMetrics { func (m *InnerRingServiceMetrics) LogMetrics() logger.LogMetrics { return m.logMetrics } + +func (m *InnerRingServiceMetrics) Multinet() MultinetMetrics { + return m.multinet +} diff --git a/internal/metrics/multinet.go b/internal/metrics/multinet.go new file mode 100644 index 000000000..6b1f99d46 --- /dev/null +++ b/internal/metrics/multinet.go @@ -0,0 +1,35 @@ +package metrics + +import ( + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" + "github.com/prometheus/client_golang/prometheus" +) + +type multinetMetrics struct { + dials *prometheus.GaugeVec +} + +type MultinetMetrics interface { + Dial(sourceIP string, success bool) +} + +func newMultinetMetrics(ns string) *multinetMetrics { + return &multinetMetrics{ + dials: metrics.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: ns, + Subsystem: multinetSubsystem, + Name: "dial_count", + Help: "Dials count performed by multinet", + }, []string{sourceIPLabel, successLabel}), + } +} + +func (m *multinetMetrics) Dial(sourceIP string, success bool) { + m.dials.With(prometheus.Labels{ + sourceIPLabel: sourceIP, + successLabel: strconv.FormatBool(success), + }).Inc() +} diff --git a/internal/metrics/node.go b/internal/metrics/node.go index 711387875..4ea3c7c24 100644 --- a/internal/metrics/node.go +++ b/internal/metrics/node.go @@ -25,6 +25,7 @@ type NodeMetrics struct { morphClient *morphClientMetrics morphCache *morphCacheMetrics log logger.LogMetrics + multinet *multinetMetrics // nolint: unused appInfo *ApplicationInfo } @@ -53,6 +54,7 @@ func NewNodeMetrics() *NodeMetrics { morphCache: newMorphCacheMetrics(namespace), log: logger.NewLogMetrics(namespace), appInfo: NewApplicationInfo(misc.Version), + multinet: newMultinetMetrics(namespace), } } @@ -120,3 +122,7 @@ func (m *NodeMetrics) MorphCacheMetrics() MorphCacheMetrics { func (m *NodeMetrics) LogMetrics() logger.LogMetrics { return m.log } + +func (m *NodeMetrics) MultinetMetrics() MultinetMetrics { + return m.multinet +} diff --git a/internal/net/config.go b/internal/net/config.go index 10450db23..b84ac3b35 100644 --- a/internal/net/config.go +++ b/internal/net/config.go @@ -7,6 +7,7 @@ import ( "slices" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/multinet" ) @@ -23,6 +24,7 @@ type Config struct { Balancer string Restrict bool FallbackDelay time.Duration + Metrics metrics.MultinetMetrics } func (c Config) toMultinetConfig() (multinet.Config, error) { @@ -52,6 +54,7 @@ func (c Config) toMultinetConfig() (multinet.Config, error) { Restrict: c.Restrict, FallbackDelay: c.FallbackDelay, Dialer: newDefaulDialer(), + EventHandler: newEventHandler(c.Metrics), }, nil } diff --git a/internal/net/event_handler.go b/internal/net/event_handler.go new file mode 100644 index 000000000..024e5cf7c --- /dev/null +++ b/internal/net/event_handler.go @@ -0,0 +1,29 @@ +package net + +import ( + "net" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" + "git.frostfs.info/TrueCloudLab/multinet" +) + +var _ multinet.EventHandler = (*metricsEventHandler)(nil) + +type metricsEventHandler struct { + m metrics.MultinetMetrics +} + +func (m *metricsEventHandler) DialPerformed(sourceIP net.Addr, _ string, _ string, err error) { + sourceIPString := "undefined" + if sourceIP != nil { + sourceIPString = sourceIP.Network() + "://" + sourceIP.String() + } + m.m.Dial(sourceIPString, err == nil) +} + +func newEventHandler(m metrics.MultinetMetrics) multinet.EventHandler { + if m == nil { + return nil + } + return &metricsEventHandler{m: m} +} diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index c4aaeda56..cb0654b6e 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -463,6 +463,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- name: morphPrefix, from: fromSideChainBlock, morphCacheMetric: s.irMetrics.MorphCacheMetrics(), + multinetMetrics: s.irMetrics.Multinet(), } // create morph client diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index a4a52edec..b94312645 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -117,6 +117,7 @@ type ( sgn *transaction.Signer from uint32 // block height morphCacheMetric metrics.MorphCacheMetrics + multinetMetrics metrics.MultinetMetrics } ) @@ -487,7 +488,7 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c return nil, fmt.Errorf("%s chain client endpoints not provided", p.name) } - nc := parseMultinetConfig(p.cfg) + nc := parseMultinetConfig(p.cfg, p.multinetMetrics) ds, err := internalNet.NewDialerSource(nc) if err != nil { return nil, fmt.Errorf("dialer source: %w", err) @@ -550,12 +551,13 @@ func parseWalletAddressesFromStrings(wallets []string) ([]util.Uint160, error) { return extraWallets, nil } -func parseMultinetConfig(cfg *viper.Viper) internalNet.Config { +func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNet.Config { nc := internalNet.Config{ Enabled: cfg.GetBool("multinet.enabled"), Balancer: cfg.GetString("multinet.balancer"), Restrict: cfg.GetBool("multinet.restrict"), FallbackDelay: cfg.GetDuration("multinet.fallback_delay"), + Metrics: m, } for i := 0; ; i++ { mask := cfg.GetString(fmt.Sprintf("multinet.subnets.%d.mask", i)) From 5afea62ec0c29b8b3422412e4cd8a5452785e6fd Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 18 Oct 2024 15:15:19 +0300 Subject: [PATCH 108/591] [#1438] debian: Remove package scripts Signed-off-by: Evgenii Stratonikov --- Makefile | 21 +------------ debian/changelog | 5 --- debian/clean | 2 -- debian/control | 39 ----------------------- debian/copyright | 23 -------------- debian/frostfs-cli.docs | 4 --- debian/frostfs-cli.install | 3 -- debian/frostfs-cli.manpages | 1 - debian/frostfs-ir.dirs | 2 -- debian/frostfs-ir.docs | 3 -- debian/frostfs-ir.install | 1 - debian/frostfs-ir.postinst | 51 ------------------------------ debian/frostfs-ir.postrm | 40 ------------------------ debian/frostfs-ir.preinst | 34 -------------------- debian/frostfs-ir.prerm | 37 ---------------------- debian/frostfs-ir.service | 17 ---------- debian/frostfs-storage.dirs | 3 -- debian/frostfs-storage.docs | 4 --- debian/frostfs-storage.install | 1 - debian/frostfs-storage.postinst | 55 --------------------------------- debian/frostfs-storage.postrm | 40 ------------------------ debian/frostfs-storage.preinst | 34 -------------------- debian/frostfs-storage.prerm | 37 ---------------------- debian/frostfs-storage.service | 17 ---------- debian/rules | 40 ------------------------ debian/source/format | 1 - docs/building-deb-package.md | 46 --------------------------- docs/release-instruction.md | 5 --- 28 files changed, 1 insertion(+), 565 deletions(-) delete mode 100644 debian/changelog delete mode 100644 debian/clean delete mode 100644 debian/control delete mode 100644 debian/copyright delete mode 100644 debian/frostfs-cli.docs delete mode 100644 debian/frostfs-cli.install delete mode 100644 debian/frostfs-cli.manpages delete mode 100644 debian/frostfs-ir.dirs delete mode 100644 debian/frostfs-ir.docs delete mode 100644 debian/frostfs-ir.install delete mode 100755 debian/frostfs-ir.postinst delete mode 100755 debian/frostfs-ir.postrm delete mode 100755 debian/frostfs-ir.preinst delete mode 100755 debian/frostfs-ir.prerm delete mode 100644 debian/frostfs-ir.service delete mode 100644 debian/frostfs-storage.dirs delete mode 100644 debian/frostfs-storage.docs delete mode 100644 debian/frostfs-storage.install delete mode 100755 debian/frostfs-storage.postinst delete mode 100755 debian/frostfs-storage.postrm delete mode 100755 debian/frostfs-storage.preinst delete mode 100755 debian/frostfs-storage.prerm delete mode 100644 debian/frostfs-storage.service delete mode 100755 debian/rules delete mode 100644 debian/source/format delete mode 100644 docs/building-deb-package.md diff --git a/Makefile b/Makefile index 2f29ac19c..d92844bb5 100755 --- a/Makefile +++ b/Makefile @@ -27,12 +27,6 @@ DIRS = $(BIN) $(RELEASE) CMDS = $(notdir $(basename $(wildcard cmd/frostfs-*))) BINS = $(addprefix $(BIN)/, $(CMDS)) -# .deb package versioning -OS_RELEASE = $(shell lsb_release -cs) -PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \ - sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \ - sed "s/-/~/")-${OS_RELEASE} - OUTPUT_LINT_DIR ?= $(abspath $(BIN))/linters LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION) TMP_DIR := .cache @@ -58,7 +52,7 @@ LOCODE_DB_PATH=$(abspath ./.cache/locode_db) LOCODE_DB_VERSION=v0.4.0 .PHONY: help all images dep clean fmts fumpt imports test lint docker/lint - prepare-release debpackage pre-commit unpre-commit + prepare-release pre-commit unpre-commit # To build a specific binary, use it's name prefix with bin/ as a target # For example `make bin/frostfs-node` will build only storage node binary @@ -263,19 +257,6 @@ clean: rm -rf $(BIN) rm -rf $(RELEASE) -# Package for Debian -debpackage: - dch -b --package frostfs-node \ - --controlmaint \ - --newversion $(PKG_VERSION) \ - --distribution $(OS_RELEASE) \ - "Please see CHANGELOG.md for code changes for $(VERSION)" - dpkg-buildpackage --no-sign -b - -# Cleanup deb package build directories -debclean: - dh clean - # Download locode database locode-download: mkdir -p $(TMP_DIR) diff --git a/debian/changelog b/debian/changelog deleted file mode 100644 index 47328c419..000000000 --- a/debian/changelog +++ /dev/null @@ -1,5 +0,0 @@ -frostfs-node (0.0.1) stable; urgency=medium - - * Initial package build - - -- TrueCloudLab Tue, 25 Oct 2022 21:10:49 +0300 diff --git a/debian/clean b/debian/clean deleted file mode 100644 index 44dc05e0a..000000000 --- a/debian/clean +++ /dev/null @@ -1,2 +0,0 @@ -man/ -debian/*.bash-completion diff --git a/debian/control b/debian/control deleted file mode 100644 index f3f214bca..000000000 --- a/debian/control +++ /dev/null @@ -1,39 +0,0 @@ -Source: frostfs-node -Section: misc -Priority: optional -Maintainer: TrueCloudLab -Build-Depends: debhelper-compat (= 13), dh-sequence-bash-completion, devscripts -Standards-Version: 4.5.1 -Homepage: https://fs.neo.org/ -Vcs-Git: https://git.frostfs.info/TrueCloudLab/frostfs-node.git -Vcs-Browser: https://git.frostfs.info/TrueCloudLab/frostfs-node - -Package: frostfs-storage -Architecture: any -Depends: ${misc:Depends} -Description: FrostFS Storage node - FrostFS is a decentralized distributed object storage integrated with the NEO - Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care - of storing and distributing user's data. Any Neo user may participate in the - network and get paid for providing storage resources to other users or store - their data in FrostFS and pay a competitive price for it. - -Package: frostfs-ir -Architecture: any -Depends: ${misc:Depends}, frostfs-locode-db -Description: FrostFS InnerRing node - FrostFS is a decentralized distributed object storage integrated with the NEO - Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care - of storing and distributing user's data. Any Neo user may participate in the - network and get paid for providing storage resources to other users or store - their data in FrostFS and pay a competitive price for it. - -Package: frostfs-cli -Architecture: any -Depends: ${misc:Depends} -Description: CLI tools for FrostFS - FrostFS is a decentralized distributed object storage integrated with the NEO - Blockchain. FrostFS Nodes are organized in a peer-to-peer network that takes care - of storing and distributing user's data. Any Neo user may participate in the - network and get paid for providing storage resources to other users or store - their data in FrostFS and pay a competitive price for it. diff --git a/debian/copyright b/debian/copyright deleted file mode 100644 index 61dab665d..000000000 --- a/debian/copyright +++ /dev/null @@ -1,23 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: frostfs-node -Upstream-Contact: tech@frostfs.info -Source: https://git.frostfs.info/TrueCloudLab/frostfs-node - -Files: * -Copyright: 2022-2023 TrueCloudLab (@TrueCloudLab), contributors of FrostFS project - 2018-2022 NeoSPCC (@nspcc-dev), contributors of NeoFS project - (https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/CREDITS.md) - -License: GPL-3 - This program is free software: you can redistribute it and/or modify it - under the terms of the GNU General Public License as published - by the Free Software Foundation; version 3. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program or at /usr/share/common-licenses/GPL-3 - If not, see . diff --git a/debian/frostfs-cli.docs b/debian/frostfs-cli.docs deleted file mode 100644 index 58d4559cc..000000000 --- a/debian/frostfs-cli.docs +++ /dev/null @@ -1,4 +0,0 @@ -CONTRIBUTING.md -CREDITS.md -README.md -cmd/frostfs-adm/docs diff --git a/debian/frostfs-cli.install b/debian/frostfs-cli.install deleted file mode 100644 index 93025187b..000000000 --- a/debian/frostfs-cli.install +++ /dev/null @@ -1,3 +0,0 @@ -bin/frostfs-adm usr/bin -bin/frostfs-cli usr/bin -bin/frostfs-lens usr/bin diff --git a/debian/frostfs-cli.manpages b/debian/frostfs-cli.manpages deleted file mode 100644 index 85c5e001d..000000000 --- a/debian/frostfs-cli.manpages +++ /dev/null @@ -1 +0,0 @@ -man/* diff --git a/debian/frostfs-ir.dirs b/debian/frostfs-ir.dirs deleted file mode 100644 index 90da8fd27..000000000 --- a/debian/frostfs-ir.dirs +++ /dev/null @@ -1,2 +0,0 @@ -/etc/frostfs/ir -/var/lib/frostfs/ir diff --git a/debian/frostfs-ir.docs b/debian/frostfs-ir.docs deleted file mode 100644 index 38b0cef26..000000000 --- a/debian/frostfs-ir.docs +++ /dev/null @@ -1,3 +0,0 @@ -CONTRIBUTING.md -CREDITS.md -README.md diff --git a/debian/frostfs-ir.install b/debian/frostfs-ir.install deleted file mode 100644 index e052f5434..000000000 --- a/debian/frostfs-ir.install +++ /dev/null @@ -1 +0,0 @@ -bin/frostfs-ir usr/bin diff --git a/debian/frostfs-ir.postinst b/debian/frostfs-ir.postinst deleted file mode 100755 index eb9d381c9..000000000 --- a/debian/frostfs-ir.postinst +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `configure' -# * `abort-upgrade' -# * `abort-remove' `in-favour' -# -# * `abort-remove' -# * `abort-deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - -case "$1" in - configure) - USERNAME=ir - id -u frostfs-ir >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/ir --system -M -U -c "FrostFS InnerRing node" frostfs-ir - if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME - chmod -f 0750 /etc/frostfs/$USERNAME - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yml - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/control.yml - chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true - chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true - fi - USERDIR="$(getent passwd frostfs-$USERNAME | cut -d: -f6)" - if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then - chown -f frostfs-$USERNAME: "$USERDIR" - fi - ;; - - abort-upgrade|abort-remove|abort-deconfigure) - ;; - - *) - echo "postinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-ir.postrm b/debian/frostfs-ir.postrm deleted file mode 100755 index cbb7db2f2..000000000 --- a/debian/frostfs-ir.postrm +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `remove' -# * `purge' -# * `upgrade' -# * `failed-upgrade' -# * `abort-install' -# * `abort-install' -# * `abort-upgrade' -# * `disappear' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - purge) - rm -rf /var/lib/frostfs/ir/* - ;; - - remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) - ;; - - *) - echo "postrm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-ir.preinst b/debian/frostfs-ir.preinst deleted file mode 100755 index 37f952537..000000000 --- a/debian/frostfs-ir.preinst +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `install' -# * `install' -# * `upgrade' -# * `abort-upgrade' -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - install|upgrade) - ;; - - abort-upgrade) - ;; - - *) - echo "preinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-ir.prerm b/debian/frostfs-ir.prerm deleted file mode 100755 index 0da369d75..000000000 --- a/debian/frostfs-ir.prerm +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `remove' -# * `upgrade' -# * `failed-upgrade' -# * `remove' `in-favour' -# * `deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - remove|upgrade|deconfigure) - ;; - - failed-upgrade) - ;; - - *) - echo "prerm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-ir.service b/debian/frostfs-ir.service deleted file mode 100644 index 304017f68..000000000 --- a/debian/frostfs-ir.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=FrostFS InnerRing node -Requires=network.target - -[Service] -Type=notify -NotifyAccess=all -ExecStart=/usr/bin/frostfs-ir --config /etc/frostfs/ir/config.yml -User=frostfs-ir -Group=frostfs-ir -WorkingDirectory=/var/lib/frostfs/ir -Restart=always -RestartSec=5 -PrivateTmp=true - -[Install] -WantedBy=multi-user.target diff --git a/debian/frostfs-storage.dirs b/debian/frostfs-storage.dirs deleted file mode 100644 index 4142145ee..000000000 --- a/debian/frostfs-storage.dirs +++ /dev/null @@ -1,3 +0,0 @@ -/etc/frostfs/storage -/srv/frostfs -/var/lib/frostfs/storage diff --git a/debian/frostfs-storage.docs b/debian/frostfs-storage.docs deleted file mode 100644 index cd1f5f23f..000000000 --- a/debian/frostfs-storage.docs +++ /dev/null @@ -1,4 +0,0 @@ -docs/storage-node-configuration.md -CONTRIBUTING.md -CREDITS.md -README.md diff --git a/debian/frostfs-storage.install b/debian/frostfs-storage.install deleted file mode 100644 index 670935e7b..000000000 --- a/debian/frostfs-storage.install +++ /dev/null @@ -1 +0,0 @@ -bin/frostfs-node usr/bin diff --git a/debian/frostfs-storage.postinst b/debian/frostfs-storage.postinst deleted file mode 100755 index 88fa53be5..000000000 --- a/debian/frostfs-storage.postinst +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `configure' -# * `abort-upgrade' -# * `abort-remove' `in-favour' -# -# * `abort-remove' -# * `abort-deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - -case "$1" in - configure) - USERNAME=storage - id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/$USERNAME --system -M -U -c "FrostFS Storage node" frostfs-$USERNAME - if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME - chmod -f 0750 /etc/frostfs/$USERNAME - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yml - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/control.yml - chmod -f 0640 /etc/frostfs/$USERNAME/config.yml || true - chmod -f 0640 /etc/frostfs/$USERNAME/control.yml || true - fi - USERDIR=$(getent passwd frostfs-$USERNAME | cut -d: -f6) - if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then - chown -f frostfs-$USERNAME: "$USERDIR" - fi - USERDIR=/srv/frostfs - if ! dpkg-statoverride --list frostfs-$USERDIR >/dev/null; then - chown -f frostfs-$USERNAME: $USERDIR - fi - ;; - - abort-upgrade|abort-remove|abort-deconfigure) - ;; - - *) - echo "postinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-storage.postrm b/debian/frostfs-storage.postrm deleted file mode 100755 index d9c8c9656..000000000 --- a/debian/frostfs-storage.postrm +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `remove' -# * `purge' -# * `upgrade' -# * `failed-upgrade' -# * `abort-install' -# * `abort-install' -# * `abort-upgrade' -# * `disappear' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - purge) - rm -rf /var/lib/frostfs/storage/* - ;; - - remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) - ;; - - *) - echo "postrm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-storage.preinst b/debian/frostfs-storage.preinst deleted file mode 100755 index 37f952537..000000000 --- a/debian/frostfs-storage.preinst +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `install' -# * `install' -# * `upgrade' -# * `abort-upgrade' -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - install|upgrade) - ;; - - abort-upgrade) - ;; - - *) - echo "preinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-storage.prerm b/debian/frostfs-storage.prerm deleted file mode 100755 index 0da369d75..000000000 --- a/debian/frostfs-storage.prerm +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `remove' -# * `upgrade' -# * `failed-upgrade' -# * `remove' `in-favour' -# * `deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - remove|upgrade|deconfigure) - ;; - - failed-upgrade) - ;; - - *) - echo "prerm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-storage.service b/debian/frostfs-storage.service deleted file mode 100644 index 573961756..000000000 --- a/debian/frostfs-storage.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=FrostFS Storage node -Requires=network.target - -[Service] -Type=notify -NotifyAccess=all -ExecStart=/usr/bin/frostfs-node --config /etc/frostfs/storage/config.yml -User=frostfs-storage -Group=frostfs-storage -WorkingDirectory=/srv/frostfs -Restart=always -RestartSec=5 -PrivateTmp=true - -[Install] -WantedBy=multi-user.target diff --git a/debian/rules b/debian/rules deleted file mode 100755 index 0dd8ee399..000000000 --- a/debian/rules +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/make -f - -# Do not try to strip Go binaries -export DEB_BUILD_OPTIONS := nostrip - -%: - dh $@ --with bash-completion - -override_dh_auto_test: - -override_dh_auto_install: - echo $(DEB_BUILD_OPTIONS) - dh_auto_install - - bin/frostfs-adm gendoc --type man man/ - bin/frostfs-cli gendoc --type man man/ - - bin/frostfs-adm completion bash > debian/frostfs-adm.bash-completion - bin/frostfs-cli completion bash > debian/frostfs-cli.bash-completion - install -m 0755 -d debian/frostfs-cli/usr/share/fish/completions/ - install -m 0755 -d debian/frostfs-cli/usr/share/zsh/vendor-completions/ - bin/frostfs-adm completion fish > debian/frostfs-cli/usr/share/fish/completions/frostfs-adm.fish - bin/frostfs-adm completion zsh > debian/frostfs-cli/usr/share/zsh/vendor-completions/_frostfs-adm - bin/frostfs-cli completion fish > debian/frostfs-cli/usr/share/fish/completions/frostfs-cli.fish - bin/frostfs-cli completion zsh > debian/frostfs-cli/usr/share/zsh/vendor-completions/_frostfs-cli - - install -T -m 0640 config/example/ir.yaml debian/frostfs-ir/etc/frostfs/ir/config.yml - install -T -m 0640 config/example/ir-control.yaml debian/frostfs-ir/etc/frostfs/ir/control.yml - install -T -m 0640 config/example/node.yaml debian/frostfs-storage/etc/frostfs/storage/config.yml - install -T -m 0640 config/example/node-control.yaml debian/frostfs-storage/etc/frostfs/storage/control.yml - -override_dh_installsystemd: - dh_installsystemd --no-enable --no-start --name=frostfs-ir - dh_installsystemd --no-enable --no-start --name=frostfs-storage - -override_dh_installchangelogs: - dh_installchangelogs -k CHANGELOG.md - -override_dh_installdocs: - dh_installdocs diff --git a/debian/source/format b/debian/source/format deleted file mode 100644 index 163aaf8d8..000000000 --- a/debian/source/format +++ /dev/null @@ -1 +0,0 @@ -3.0 (quilt) diff --git a/docs/building-deb-package.md b/docs/building-deb-package.md deleted file mode 100644 index 26a77a27f..000000000 --- a/docs/building-deb-package.md +++ /dev/null @@ -1,46 +0,0 @@ -# Building Debian package on host - -## Prerequisites - -For now, we're assuming building for Debian 11 (stable) x86_64. - -Go version 18.4 or later should already be installed, i.e. this runs -successfully: - -* `make all` - -## Installing packaging dependencies - -```shell -$ sudo apt install debhelper-compat dh-sequence-bash-completion devscripts -``` - -Warining: number of package installed is pretty large considering dependecies. - -## Package building - -```shell -$ make debpackage -``` - -## Leftovers cleaning - -```shell -$ make debclean -``` -or -```shell -$ dh clean -``` - -# Package versioning - -By default, package version is based on product version and may also contain git -tags and hashes. - -Package version could be overwritten by setting `PKG_VERSION` variable before -build, Debian package versioning rules should be respected. - -```shell -$ PKG_VERSION=0.32.0 make debpackge -``` diff --git a/docs/release-instruction.md b/docs/release-instruction.md index 3aebc8e66..d000f10d0 100644 --- a/docs/release-instruction.md +++ b/docs/release-instruction.md @@ -43,11 +43,6 @@ Write new revision number into the root `VERSION` file: $ echo ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} > VERSION ``` -Update version in Debian package changelog file -``` -$ cat debian/changelog -``` - Update the supported version of `TrueCloudLab/frostfs-contract` module in root `README.md` if needed. From ed13387c0e1406040898fc91f36c6d9370f58413 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 18 Oct 2024 15:19:29 +0300 Subject: [PATCH 109/591] [#1438] .docker: Use go1.23 for builders Signed-off-by: Evgenii Stratonikov --- .docker/Dockerfile.adm | 2 +- .docker/Dockerfile.ci | 2 +- .docker/Dockerfile.cli | 2 +- .docker/Dockerfile.ir | 2 +- .docker/Dockerfile.storage | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.docker/Dockerfile.adm b/.docker/Dockerfile.adm index 5d67a1d04..42aeebc48 100644 --- a/.docker/Dockerfile.adm +++ b/.docker/Dockerfile.adm @@ -1,4 +1,4 @@ -FROM golang:1.22 AS builder +FROM golang:1.23 AS builder ARG BUILD=now ARG VERSION=dev ARG REPO=repository diff --git a/.docker/Dockerfile.ci b/.docker/Dockerfile.ci index e9077c831..9ddd8de59 100644 --- a/.docker/Dockerfile.ci +++ b/.docker/Dockerfile.ci @@ -1,4 +1,4 @@ -FROM golang:1.22 +FROM golang:1.23 WORKDIR /tmp diff --git a/.docker/Dockerfile.cli b/.docker/Dockerfile.cli index 16f643b61..16f130056 100644 --- a/.docker/Dockerfile.cli +++ b/.docker/Dockerfile.cli @@ -1,4 +1,4 @@ -FROM golang:1.22 AS builder +FROM golang:1.23 AS builder ARG BUILD=now ARG VERSION=dev ARG REPO=repository diff --git a/.docker/Dockerfile.ir b/.docker/Dockerfile.ir index f2cb764e5..c119f8127 100644 --- a/.docker/Dockerfile.ir +++ b/.docker/Dockerfile.ir @@ -1,4 +1,4 @@ -FROM golang:1.22 AS builder +FROM golang:1.23 AS builder ARG BUILD=now ARG VERSION=dev ARG REPO=repository diff --git a/.docker/Dockerfile.storage b/.docker/Dockerfile.storage index cf7f97748..854f7adea 100644 --- a/.docker/Dockerfile.storage +++ b/.docker/Dockerfile.storage @@ -1,4 +1,4 @@ -FROM golang:1.22 AS builder +FROM golang:1.23 AS builder ARG BUILD=now ARG VERSION=dev ARG REPO=repository From 8b6ec57c6147e5b784d78bc891144dd55493503d Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Mon, 21 Oct 2024 14:12:02 +0300 Subject: [PATCH 110/591] [#1440] sdnotify: Fix status for `RELOADING` Before: ``` RELOADING=1 MONOTONIC_USEC=17951246687 STATUS=RELOADING=1 MONOTONIC_USEC=17951246687 ``` After: ``` RELOADING=1 MONOTONIC_USEC=17951246687 STATUS=RELOADING ``` Signed-off-by: Anton Nikiforov --- pkg/util/sdnotify/sdnotify.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go index 22549bc96..bd15d0e8f 100644 --- a/pkg/util/sdnotify/sdnotify.go +++ b/pkg/util/sdnotify/sdnotify.go @@ -59,6 +59,8 @@ func FlagAndStatus(status string) error { return fmt.Errorf("clock_gettime: %w", err) } status += "\nMONOTONIC_USEC=" + strconv.FormatInt(ts.Nano()/1000, 10) + status += "\nSTATUS=RELOADING" + return Send(status) } status += "\nSTATUS=" + strings.TrimSuffix(status, "=1") return Send(status) From e515dd458267b8cbaa05d09b0cf55ec07f8dcf5b Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 23 Oct 2024 10:39:19 +0300 Subject: [PATCH 111/591] [#1444] config: Fix data race on morph component init It could be called for every shard on metabase resync concurrently and it is possible to get state with initialized client but not initialized contract hashes. Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/accounting.go | 4 +--- cmd/frostfs-node/config.go | 8 ++++---- cmd/frostfs-node/morph.go | 8 +++++++- cmd/frostfs-node/netmap.go | 4 +--- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/cmd/frostfs-node/accounting.go b/cmd/frostfs-node/accounting.go index ec737f8a0..1d065c227 100644 --- a/cmd/frostfs-node/accounting.go +++ b/cmd/frostfs-node/accounting.go @@ -13,9 +13,7 @@ import ( ) func initAccountingService(ctx context.Context, c *cfg) { - if c.cfgMorph.client == nil { - initMorphComponents(ctx, c) - } + c.initMorphComponents(ctx) balanceMorphWrapper, err := balance.NewFromMorph(c.cfgMorph.client, c.cfgAccounting.scriptHash, 0) fatalOnErr(err) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 9d2b77210..cf7e0da7e 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -575,6 +575,9 @@ func (c *cfgGRPC) dropConnection(endpoint string) { } type cfgMorph struct { + initialized bool + guard sync.Mutex + client *client.Client notaryEnabled bool @@ -1455,10 +1458,7 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker { func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider { return container.NewInfoProvider(func() (container.Source, error) { - // threadsafe: called on init or on sighup when morph initialized - if c.cfgMorph.client == nil { - initMorphComponents(ctx, c) - } + c.initMorphComponents(ctx) cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary()) if err != nil { return nil, err diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 197e50371..e85209059 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -28,7 +28,12 @@ const ( notaryDepositRetriesAmount = 300 ) -func initMorphComponents(ctx context.Context, c *cfg) { +func (c *cfg) initMorphComponents(ctx context.Context) { + c.cfgMorph.guard.Lock() + defer c.cfgMorph.guard.Unlock() + if c.cfgMorph.initialized { + return + } initMorphClient(ctx, c) lookupScriptHashesInNNS(c) // smart contract auto negotiation @@ -70,6 +75,7 @@ func initMorphComponents(ctx context.Context, c *cfg) { c.netMapSource = netmapSource c.cfgNetmap.wrapper = wrap + c.cfgMorph.initialized = true } func initMorphClient(ctx context.Context, c *cfg) { diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 5e4585f85..0e0571760 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -143,9 +143,7 @@ func initNetmapService(ctx context.Context, c *cfg) { parseAttributes(c) c.cfgNodeInfo.localInfo.SetStatus(netmapSDK.Offline) - if c.cfgMorph.client == nil { - initMorphComponents(ctx, c) - } + c.initMorphComponents(ctx) initNetmapState(c) From 6f798b9c4b4e244a0161e50dd5ee2e647835a949 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Tue, 22 Oct 2024 10:18:01 +0300 Subject: [PATCH 112/591] [#1441] cli: Use `grpc.WaitForReady` while initializing SDK client Before, when the target RPC server was unavailable, requests made by CLI didn't wait for a timeout specified by the `--timeout` option if the timeout was more than 20 seconds. It's because of the gRPC default backoff strategy. Adding this option fixes that behavior. Signed-off-by: Aleksey Savchuk --- cmd/frostfs-cli/internal/client/sdk.go | 1 + cmd/frostfs-cli/modules/tree/client.go | 1 + 2 files changed, 2 insertions(+) diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go index f7c48b871..2d9c45cbd 100644 --- a/cmd/frostfs-cli/internal/client/sdk.go +++ b/cmd/frostfs-cli/internal/client/sdk.go @@ -58,6 +58,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey GRPCDialOptions: []grpc.DialOption{ grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()), grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), }, } if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 { diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go index 4e0099f02..6891e711c 100644 --- a/cmd/frostfs-cli/modules/tree/client.go +++ b/cmd/frostfs-cli/modules/tree/client.go @@ -34,6 +34,7 @@ func _client() (tree.TreeServiceClient, error) { metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), ), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), } if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") { From 9a260c2e64abd5309cce7e3e7fde93b131b3ffc0 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Wed, 23 Oct 2024 14:02:31 +0300 Subject: [PATCH 113/591] [#1441] network/cache: Use `grpc.WaitForReady` option when creating client Signed-off-by: Aleksey Savchuk --- pkg/network/cache/multi.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index e936ead65..2ecce3a01 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -70,6 +70,7 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address tracing.NewStreamClientInterceptor(), ), grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), } prmDial := client.PrmDial{ From 65a4320c7539f4915e799f652475c739b7ad58b7 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Wed, 23 Oct 2024 14:04:37 +0300 Subject: [PATCH 114/591] [#1441] services/tree: Use `grpc.WaitForReady` option when creating client Signed-off-by: Aleksey Savchuk --- pkg/services/tree/cache.go | 1 + pkg/services/tree/sync.go | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index e490cb855..ac80d0e4c 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -103,6 +103,7 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (* tracing.NewStreamClientInterceptor(), ), grpc.WithContextDialer(c.ds.GrpcContextDialer()), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), } if !netAddr.IsTLSEnabled() { diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 5bbc93978..ce1e72104 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -342,7 +342,9 @@ func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) { metrics.NewStreamClientInterceptor(), tracing_grpc.NewStreamClientInterceptor(), ), - grpc.WithTransportCredentials(insecure.NewCredentials())) + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + ) } // ErrAlreadySyncing is returned when a service synchronization has already From b9284604d9a19ba4151e664439c6300894c14905 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Wed, 23 Oct 2024 15:36:49 +0300 Subject: [PATCH 115/591] [#1442] cli/tree: Allow to specify `rpc-endpoint` with config file We have several ways to specify the `rpc-endpoint`: with a flag, with a single config file or multiple files. Before, the `rpc-endpoint` flag was marked as required. Because `cobra` checked the required flag presence first, it prevented specifying `rpc-endpoint` with a config file. Signed-off-by: Aleksey Savchuk --- cmd/frostfs-cli/modules/tree/add.go | 2 -- cmd/frostfs-cli/modules/tree/add_by_path.go | 1 - cmd/frostfs-cli/modules/tree/get_by_path.go | 2 -- cmd/frostfs-cli/modules/tree/get_op_log.go | 2 -- cmd/frostfs-cli/modules/tree/healthcheck.go | 2 -- cmd/frostfs-cli/modules/tree/list.go | 2 -- cmd/frostfs-cli/modules/tree/move.go | 2 -- cmd/frostfs-cli/modules/tree/remove.go | 2 -- cmd/frostfs-cli/modules/tree/subtree.go | 2 -- 9 files changed, 17 deletions(-) diff --git a/cmd/frostfs-cli/modules/tree/add.go b/cmd/frostfs-cli/modules/tree/add.go index 019feb0ec..e2c05d486 100644 --- a/cmd/frostfs-cli/modules/tree/add.go +++ b/cmd/frostfs-cli/modules/tree/add.go @@ -30,8 +30,6 @@ func initAddCmd() { ff := addCmd.Flags() ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2") ff.Uint64(parentIDFlagKey, 0, "Parent node ID") - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func add(cmd *cobra.Command, _ []string) { diff --git a/cmd/frostfs-cli/modules/tree/add_by_path.go b/cmd/frostfs-cli/modules/tree/add_by_path.go index 5d5b00b7d..7263bcd0d 100644 --- a/cmd/frostfs-cli/modules/tree/add_by_path.go +++ b/cmd/frostfs-cli/modules/tree/add_by_path.go @@ -36,7 +36,6 @@ func initAddByPathCmd() { ff.String(pathFlagKey, "", "Path to a node") ff.StringSlice(metaFlagKey, nil, "Meta pairs in the form of Key1=[0x]Value1,Key2=[0x]Value2") - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) _ = cobra.MarkFlagRequired(ff, pathFlagKey) } diff --git a/cmd/frostfs-cli/modules/tree/get_by_path.go b/cmd/frostfs-cli/modules/tree/get_by_path.go index 7061723fd..210630e60 100644 --- a/cmd/frostfs-cli/modules/tree/get_by_path.go +++ b/cmd/frostfs-cli/modules/tree/get_by_path.go @@ -36,8 +36,6 @@ func initGetByPathCmd() { ff.String(pathFlagKey, "", "Path to a node") ff.Bool(latestOnlyFlagKey, false, "Look only for the latest version of a node") - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func getByPath(cmd *cobra.Command, _ []string) { diff --git a/cmd/frostfs-cli/modules/tree/get_op_log.go b/cmd/frostfs-cli/modules/tree/get_op_log.go index 376aa8e8d..9d767ab3e 100644 --- a/cmd/frostfs-cli/modules/tree/get_op_log.go +++ b/cmd/frostfs-cli/modules/tree/get_op_log.go @@ -30,8 +30,6 @@ func initGetOpLogCmd() { ff := getOpLogCmd.Flags() ff.Uint64(heightFlagKey, 0, "Height to start with") ff.Uint64(countFlagKey, 10, "Logged operations count") - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func getOpLog(cmd *cobra.Command, _ []string) { diff --git a/cmd/frostfs-cli/modules/tree/healthcheck.go b/cmd/frostfs-cli/modules/tree/healthcheck.go index b01bb2e77..c581b8e26 100644 --- a/cmd/frostfs-cli/modules/tree/healthcheck.go +++ b/cmd/frostfs-cli/modules/tree/healthcheck.go @@ -20,8 +20,6 @@ var healthcheckCmd = &cobra.Command{ func initHealthcheckCmd() { commonflags.Init(healthcheckCmd) - ff := healthcheckCmd.Flags() - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func healthcheck(cmd *cobra.Command, _ []string) { diff --git a/cmd/frostfs-cli/modules/tree/list.go b/cmd/frostfs-cli/modules/tree/list.go index f8c0e490f..ee1db2a79 100644 --- a/cmd/frostfs-cli/modules/tree/list.go +++ b/cmd/frostfs-cli/modules/tree/list.go @@ -26,8 +26,6 @@ func initListCmd() { ff := listCmd.Flags() ff.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) _ = listCmd.MarkFlagRequired(commonflags.CIDFlag) - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func list(cmd *cobra.Command, _ []string) { diff --git a/cmd/frostfs-cli/modules/tree/move.go b/cmd/frostfs-cli/modules/tree/move.go index dc807d752..7a369bd02 100644 --- a/cmd/frostfs-cli/modules/tree/move.go +++ b/cmd/frostfs-cli/modules/tree/move.go @@ -33,8 +33,6 @@ func initMoveCmd() { _ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey) _ = getSubtreeCmd.MarkFlagRequired(parentIDFlagKey) - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func move(cmd *cobra.Command, _ []string) { diff --git a/cmd/frostfs-cli/modules/tree/remove.go b/cmd/frostfs-cli/modules/tree/remove.go index d0b6fab2f..3c532fe26 100644 --- a/cmd/frostfs-cli/modules/tree/remove.go +++ b/cmd/frostfs-cli/modules/tree/remove.go @@ -29,8 +29,6 @@ func initRemoveCmd() { ff.Uint64(nodeIDFlagKey, 0, "Node ID.") _ = getSubtreeCmd.MarkFlagRequired(nodeIDFlagKey) - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func remove(cmd *cobra.Command, _ []string) { diff --git a/cmd/frostfs-cli/modules/tree/subtree.go b/cmd/frostfs-cli/modules/tree/subtree.go index 83a8909b6..c5f7ad401 100644 --- a/cmd/frostfs-cli/modules/tree/subtree.go +++ b/cmd/frostfs-cli/modules/tree/subtree.go @@ -34,8 +34,6 @@ func initGetSubtreeCmd() { _ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag) _ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey) - - _ = cobra.MarkFlagRequired(ff, commonflags.RPC) } func getSubTree(cmd *cobra.Command, _ []string) { From 29708b78d705958067a6f0d25b4e63572bf7e93d Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Wed, 23 Oct 2024 15:37:50 +0300 Subject: [PATCH 116/591] [#1442] cli/tree: Enchance error message if `rpc-endpoint` isn't defined Signed-off-by: Aleksey Savchuk --- cmd/frostfs-cli/modules/tree/client.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go index 6891e711c..a70624ac8 100644 --- a/cmd/frostfs-cli/modules/tree/client.go +++ b/cmd/frostfs-cli/modules/tree/client.go @@ -2,6 +2,7 @@ package tree import ( "context" + "fmt" "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" @@ -20,7 +21,13 @@ import ( // after making Tree API public. func _client() (tree.TreeServiceClient, error) { var netAddr network.Address - err := netAddr.FromString(viper.GetString(commonflags.RPC)) + + rpcEndpoint := viper.GetString(commonflags.RPC) + if rpcEndpoint == "" { + return nil, fmt.Errorf("%s is not defined", commonflags.RPC) + } + + err := netAddr.FromString(rpcEndpoint) if err != nil { return nil, err } From bc8d79ddf949cb88546bc214d688a03f5ab9740e Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 24 Oct 2024 09:45:57 +0300 Subject: [PATCH 117/591] [#1447] services/tree: Move relaying code to a separate function Signed-off-by: Evgenii Stratonikov --- pkg/services/tree/redirect.go | 14 ++++++++ pkg/services/tree/service.go | 66 ++++------------------------------- 2 files changed, 20 insertions(+), 60 deletions(-) diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go index ec41a60d4..5bde3ae38 100644 --- a/pkg/services/tree/redirect.go +++ b/pkg/services/tree/redirect.go @@ -12,10 +12,24 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" + "google.golang.org/grpc" ) var errNoSuitableNode = errors.New("no node was found to execute the request") +func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) { + var resp *Resp + var outErr error + err := s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { + resp, outErr = callback(c, ctx, req) + return true + }) + if err != nil { + return nil, err + } + return resp, outErr +} + // forEachNode executes callback for each node in the container until true is returned. // Returns errNoSuitableNode if there was no successful attempt to dial any node. func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(c TreeServiceClient) bool) error { diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 2cb2af294..acc2775e6 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -122,16 +122,7 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error return nil, err } if pos < 0 { - var resp *AddResponse - var outErr error - err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = c.Add(ctx, req) - return true - }) - if err != nil { - return nil, err - } - return resp, outErr + return relayUnary(ctx, s, ns, req, (TreeServiceClient).Add) } d := pilorama.CIDDescriptor{CID: cid, Position: pos, Size: len(ns)} @@ -174,16 +165,7 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP return nil, err } if pos < 0 { - var resp *AddByPathResponse - var outErr error - err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = c.AddByPath(ctx, req) - return true - }) - if err != nil { - return nil, err - } - return resp, outErr + return relayUnary(ctx, s, ns, req, (TreeServiceClient).AddByPath) } meta := protoToMeta(b.GetMeta()) @@ -238,16 +220,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon return nil, err } if pos < 0 { - var resp *RemoveResponse - var outErr error - err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = c.Remove(ctx, req) - return true - }) - if err != nil { - return nil, err - } - return resp, outErr + return relayUnary(ctx, s, ns, req, (TreeServiceClient).Remove) } if b.GetNodeId() == pilorama.RootID { @@ -291,16 +264,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er return nil, err } if pos < 0 { - var resp *MoveResponse - var outErr error - err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = c.Move(ctx, req) - return true - }) - if err != nil { - return nil, err - } - return resp, outErr + return relayUnary(ctx, s, ns, req, (TreeServiceClient).Move) } if b.GetNodeId() == pilorama.RootID { @@ -343,16 +307,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) return nil, err } if pos < 0 { - var resp *GetNodeByPathResponse - var outErr error - err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = c.GetNodeByPath(ctx, req) - return true - }) - if err != nil { - return nil, err - } - return resp, outErr + return relayUnary(ctx, s, ns, req, (TreeServiceClient).GetNodeByPath) } attr := b.GetPathAttribute() @@ -763,16 +718,7 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList return nil, err } if pos < 0 { - var resp *TreeListResponse - var outErr error - err = s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = c.TreeList(ctx, req) - return outErr == nil - }) - if err != nil { - return nil, err - } - return resp, outErr + return relayUnary(ctx, s, ns, req, (TreeServiceClient).TreeList) } ids, err := s.forest.TreeList(ctx, cid) From eb5336d5ff53f764edee4fcbd10e738db506a0e6 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Tue, 22 Oct 2024 15:47:15 +0300 Subject: [PATCH 118/591] [#1406] tree: Use delete verb instead put for Remove Signed-off-by: Airat Arifullin --- pkg/services/tree/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index acc2775e6..10c3b6ccc 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -210,7 +210,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectDelete) if err != nil { return nil, err } From 012af5cc38597d6bbd28b723a5d4190ece8c7cbb Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 28 Oct 2024 13:07:28 +0300 Subject: [PATCH 119/591] [#1406] tree: Add unit-tests for ape check Signed-off-by: Airat Arifullin --- pkg/services/tree/ape_test.go | 207 ++++++++++++++++++++++++++++++++++ 1 file changed, 207 insertions(+) create mode 100644 pkg/services/tree/ape_test.go diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go new file mode 100644 index 000000000..3f94925b5 --- /dev/null +++ b/pkg/services/tree/ape_test.go @@ -0,0 +1,207 @@ +package tree + +import ( + "context" + "encoding/hex" + "fmt" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" + core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" + checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" + containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory" + nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/stretchr/testify/require" +) + +var ( + containerID = "73tQMTYyUkTgmvPR1HWib6pndbhSoBovbnMF7Pws8Rcy" + + senderPrivateKey, _ = keys.NewPrivateKey() + + senderKey = hex.EncodeToString(senderPrivateKey.PublicKey().Bytes()) + + rootCnr = &core.Container{Value: containerSDK.Container{}} +) + +type frostfsIDProviderMock struct { + subjects map[util.Uint160]*client.Subject + subjectsExtended map[util.Uint160]*client.SubjectExtended +} + +func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) { + v, ok := f.subjects[key] + if !ok { + return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) + } + return v, nil +} + +func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) { + v, ok := f.subjectsExtended[key] + if !ok { + return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) + } + return v, nil +} + +var _ frostfsidcore.SubjectProvider = (*frostfsIDProviderMock)(nil) + +func newFrostfsIDProviderMock(t *testing.T) *frostfsIDProviderMock { + return &frostfsIDProviderMock{ + subjects: map[util.Uint160]*client.Subject{ + scriptHashFromSenderKey(t, senderKey): { + Namespace: "testnamespace", + Name: "test", + KV: map[string]string{ + "tag-attr1": "value1", + "tag-attr2": "value2", + }, + }, + }, + subjectsExtended: map[util.Uint160]*client.SubjectExtended{ + scriptHashFromSenderKey(t, senderKey): { + Namespace: "testnamespace", + Name: "test", + KV: map[string]string{ + "tag-attr1": "value1", + "tag-attr2": "value2", + }, + Groups: []*client.Group{ + { + ID: 1, + Name: "test", + Namespace: "testnamespace", + KV: map[string]string{ + "attr1": "value1", + "attr2": "value2", + }, + }, + }, + }, + }, + } +} + +func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 { + pk, err := keys.NewPublicKeyFromString(senderKey) + require.NoError(t, err) + return pk.GetScriptHash() +} + +type stMock struct{} + +func (m *stMock) CurrentEpoch() uint64 { + return 8 +} + +func TestCheckAPE(t *testing.T) { + cid := cid.ID{} + _ = cid.DecodeString(containerID) + + t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) { + los := inmemory.NewInmemoryLocalStorage() + mcs := inmemory.NewInmemoryMorphRuleChainStorage() + fid := newFrostfsIDProviderMock(t) + s := Service{ + cfg: cfg{ + frostfsidSubjectProvider: fid, + }, + apeChecker: checkercore.New(los, mcs, fid, &stMock{}), + } + + los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.AccessDenied, + Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + Condition: []chain.Condition{ + { + Op: chain.CondStringNotEquals, + Kind: chain.KindResource, + Key: nativeschema.PropertyKeyObjectType, + Value: "TOMBSTONE", + }, + }, + }, + }, + MatchType: chain.MatchTypeFirstMatch, + }) + + mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.Allow, + Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + }, + }, + MatchType: chain.MatchTypeFirstMatch, + }) + + err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey()) + require.NoError(t, err) + }) + + t.Run("delete rule won't affect tree add", func(t *testing.T) { + los := inmemory.NewInmemoryLocalStorage() + mcs := inmemory.NewInmemoryMorphRuleChainStorage() + fid := newFrostfsIDProviderMock(t) + s := Service{ + cfg: cfg{ + frostfsidSubjectProvider: fid, + }, + apeChecker: checkercore.New(los, mcs, fid, &stMock{}), + } + + los.AddOverride(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.AccessDenied, + Actions: chain.Actions{Names: []string{nativeschema.MethodDeleteObject}}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + }, + }, + MatchType: chain.MatchTypeFirstMatch, + }) + + mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.Allow, + Actions: chain.Actions{Names: []string{nativeschema.MethodPutObject}}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + Condition: []chain.Condition{ + { + Op: chain.CondStringNotEquals, + Kind: chain.KindResource, + Key: nativeschema.PropertyKeyObjectType, + Value: "TOMBSTONE", + }, + }, + }, + }, + MatchType: chain.MatchTypeFirstMatch, + }) + + err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey()) + require.NoError(t, err) + }) +} From 3cd7d23f1095d2efe99e66a4dfe73ec4e5ff62d5 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Tue, 22 Oct 2024 10:04:34 +0300 Subject: [PATCH 120/591] [#1439] node: Reduce usage of `netmapAPI.NodeInfo` Remove outdated code from `netmap` service. Signed-off-by: Anton Nikiforov --- cmd/frostfs-node/config.go | 12 +++++------ pkg/services/netmap/executor.go | 36 ++++++--------------------------- 2 files changed, 11 insertions(+), 37 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index cf7e0da7e..b2dcafbd7 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1179,17 +1179,15 @@ func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) { return pool } -func (c *cfg) LocalNodeInfo() (*netmapV2.NodeInfo, error) { - var res netmapV2.NodeInfo - +func (c *cfg) LocalNodeInfo() *netmap.NodeInfo { + var res netmap.NodeInfo ni, ok := c.cfgNetmap.state.getNodeInfo() if ok { - ni.WriteToV2(&res) + res = ni } else { - c.cfgNodeInfo.localInfo.WriteToV2(&res) + res = c.cfgNodeInfo.localInfo } - - return &res, nil + return &res } // setContractNodeInfo rewrites local node info from the FrostFS network map. diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go index ae2044246..f48357915 100644 --- a/pkg/services/netmap/executor.go +++ b/pkg/services/netmap/executor.go @@ -28,7 +28,7 @@ type executorSvc struct { type NodeState interface { // LocalNodeInfo must return current node state // in FrostFS API v2 NodeInfo structure. - LocalNodeInfo() (*netmap.NodeInfo, error) + LocalNodeInfo() *netmapSDK.NodeInfo // ReadCurrentNetMap reads current local network map of the storage node // into the given parameter. Returns any error encountered which prevented @@ -64,39 +64,15 @@ func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, func (s *executorSvc) LocalNodeInfo( _ context.Context, - req *netmap.LocalNodeInfoRequest, + _ *netmap.LocalNodeInfoRequest, ) (*netmap.LocalNodeInfoResponse, error) { - verV2 := req.GetMetaHeader().GetVersion() - if verV2 == nil { - return nil, errors.New("missing version") - } - - var ver versionsdk.Version - if err := ver.ReadFromV2(*verV2); err != nil { - return nil, fmt.Errorf("can't read version: %w", err) - } - - ni, err := s.state.LocalNodeInfo() - if err != nil { - return nil, err - } - - if addrNum := ni.NumberOfAddresses(); addrNum > 0 && ver.Minor() <= 7 { - ni2 := new(netmap.NodeInfo) - ni2.SetPublicKey(ni.GetPublicKey()) - ni2.SetState(ni.GetState()) - ni2.SetAttributes(ni.GetAttributes()) - ni.IterateAddresses(func(s string) bool { - ni2.SetAddresses(s) - return true - }) - - ni = ni2 - } + ni := s.state.LocalNodeInfo() + var nodeInfo netmap.NodeInfo + ni.WriteToV2(&nodeInfo) body := new(netmap.LocalNodeInfoResponseBody) body.SetVersion(&s.version) - body.SetNodeInfo(ni) + body.SetNodeInfo(&nodeInfo) resp := new(netmap.LocalNodeInfoResponse) resp.SetBody(body) From 81f4cdbb91589ddab47ecde57992ef8de10e9ec0 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Tue, 22 Oct 2024 10:06:16 +0300 Subject: [PATCH 121/591] [#1439] object: Sort nodes by priority metrics to compute GET request Signed-off-by: Anton Nikiforov --- cmd/frostfs-node/config.go | 11 + cmd/frostfs-node/config/object/config.go | 20 ++ cmd/frostfs-node/object.go | 6 +- config/example/node.env | 1 + config/example/node.json | 3 + config/example/node.yaml | 4 + docs/storage-node-configuration.md | 14 +- .../object_manager/placement/metrics.go | 43 +++ .../object_manager/placement/traverser.go | 70 ++++- .../placement/traverser_test.go | 288 +++++++++++++++++- 10 files changed, 449 insertions(+), 11 deletions(-) create mode 100644 pkg/services/object_manager/placement/metrics.go diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index b2dcafbd7..800c49127 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -58,6 +58,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone" tsourse "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/tombstone/source" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" @@ -109,6 +110,7 @@ type applicationConfiguration struct { ObjectCfg struct { tombstoneLifetime uint64 + priorityMetrics []placement.Metric } EngineCfg struct { @@ -232,6 +234,15 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error { // Object a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c) + var pm []placement.Metric + for _, raw := range objectconfig.Get(c).Priority() { + m, err := placement.ParseMetric(raw) + if err != nil { + return err + } + pm = append(pm, m) + } + a.ObjectCfg.priorityMetrics = pm // Storage Engine diff --git a/cmd/frostfs-node/config/object/config.go b/cmd/frostfs-node/config/object/config.go index 876dc3ef1..6ff1fe2ab 100644 --- a/cmd/frostfs-node/config/object/config.go +++ b/cmd/frostfs-node/config/object/config.go @@ -10,10 +10,17 @@ type PutConfig struct { cfg *config.Config } +// GetConfig is a wrapper over "get" config section which provides access +// to object get pipeline configuration of object service. +type GetConfig struct { + cfg *config.Config +} + const ( subsection = "object" putSubsection = "put" + getSubsection = "get" // PutPoolSizeDefault is a default value of routine pool size to // process object.Put requests in object service. @@ -56,3 +63,16 @@ func (g PutConfig) PoolSizeLocal() int { func (g PutConfig) SkipSessionTokenIssuerVerification() bool { return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification") } + +// Get returns structure that provides access to "get" subsection of +// "object" section. +func Get(c *config.Config) GetConfig { + return GetConfig{ + c.Sub(subsection).Sub(getSubsection), + } +} + +// Priority returns the value of "priority" config parameter. +func (g GetConfig) Priority() []string { + return config.StringSliceSafe(g.cfg, "priority") +} diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index c484c5d8c..c6bde2cff 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -178,7 +178,8 @@ func initObjectService(c *cfg) { sSearchV2 := createSearchSvcV2(sSearch, keyStorage) - sGet := createGetService(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource) + sGet := createGetService(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource, + c.ObjectCfg.priorityMetrics) *c.cfgObject.getSvc = *sGet // need smth better @@ -389,6 +390,7 @@ func createSearchSvcV2(sSearch *searchsvc.Service, keyStorage *util.KeyStorage) func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache, containerSource containercore.Source, + priorityMetrics []placement.Metric, ) *getsvc.Service { ls := c.cfgObject.cfgLocalStorage.localStorage @@ -398,6 +400,8 @@ func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Tra ls, traverseGen.WithTraverseOptions( placement.SuccessAfter(1), + placement.WithPriorityMetrics(priorityMetrics), + placement.WithNodeState(c), ), coreConstructor, containerSource, diff --git a/config/example/node.env b/config/example/node.env index 580d343fb..3979eb18f 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -87,6 +87,7 @@ FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100 FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200 FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10 +FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE" # Storage engine section FROSTFS_STORAGE_SHARD_POOL_SIZE=15 diff --git a/config/example/node.json b/config/example/node.json index 3470d2d12..1ea28de6c 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -131,6 +131,9 @@ "remote_pool_size": 100, "local_pool_size": 200, "skip_session_token_issuer_verification": true + }, + "get": { + "priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"] } }, "storage": { diff --git a/config/example/node.yaml b/config/example/node.yaml index 2a963fc0f..4a418dfcb 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -114,6 +114,10 @@ object: remote_pool_size: 100 # number of async workers for remote PUT operations local_pool_size: 200 # number of async workers for local PUT operations skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true + get: + priority: # list of metrics of nodes for prioritization + - $attribute:ClusterName + - $attribute:UN-LOCODE storage: # note: shard configuration can be omitted for relay node (see `node.relay`) diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 2b94400df..363520481 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -407,13 +407,17 @@ Contains object-service related parameters. object: put: remote_pool_size: 100 + get: + priority: + - $attribute:ClusterName ``` -| Parameter | Type | Default value | Description | -|-----------------------------|-------|---------------|------------------------------------------------------------------------------------------------| -| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. | -| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. | -| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. | +| Parameter | Type | Default value | Description | +|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------------| +| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. | +| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. | +| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. | +| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET and SEARCH requests. | # `runtime` section Contains runtime parameters. diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go new file mode 100644 index 000000000..45e6df339 --- /dev/null +++ b/pkg/services/object_manager/placement/metrics.go @@ -0,0 +1,43 @@ +package placement + +import ( + "errors" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) + +const ( + attrPrefix = "$attribute:" +) + +type Metric interface { + CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int +} + +func ParseMetric(raw string) (Metric, error) { + if attr, found := strings.CutPrefix(raw, attrPrefix); found { + return NewAttributeMetric(attr), nil + } + return nil, errors.New("unsupported priority metric") +} + +// attributeMetric describes priority metric based on attribute. +type attributeMetric struct { + attribute string +} + +// CalculateValue return [0] if from and to contains attribute attributeMetric.attribute and +// the value of attribute is the same. In other case return [1]. +func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int { + fromAttr := from.Attribute(am.attribute) + toAttr := to.Attribute(am.attribute) + if len(fromAttr) > 0 && len(toAttr) > 0 && fromAttr == toAttr { + return 0 + } + return 1 +} + +func NewAttributeMetric(attr string) Metric { + return &attributeMetric{attribute: attr} +} diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go index 4e790628f..6440f187d 100644 --- a/pkg/services/object_manager/placement/traverser.go +++ b/pkg/services/object_manager/placement/traverser.go @@ -3,6 +3,7 @@ package placement import ( "errors" "fmt" + "slices" "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" @@ -23,6 +24,11 @@ type Builder interface { BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) } +type NodeState interface { + // LocalNodeInfo return current node state in FrostFS API v2 NodeInfo structure. + LocalNodeInfo() *netmap.NodeInfo +} + // Option represents placement traverser option. type Option func(*cfg) @@ -50,6 +56,10 @@ type cfg struct { policy netmap.PlacementPolicy builder Builder + + metrics []Metric + + nodeState NodeState } const invalidOptsMsg = "invalid traverser options" @@ -99,7 +109,22 @@ func NewTraverser(opts ...Option) (*Traverser, error) { } var rem []int - if cfg.flatSuccess != nil { + if len(cfg.metrics) > 0 && cfg.nodeState != nil { + rem = defaultCopiesVector(cfg.policy) + var unsortedVector []netmap.NodeInfo + var regularVector []netmap.NodeInfo + for i := range rem { + unsortedVector = append(unsortedVector, ns[i][:rem[i]]...) + regularVector = append(regularVector, ns[i][rem[i]:]...) + } + rem = []int{-1, -1} + + sortedVector, err := sortVector(cfg, unsortedVector) + if err != nil { + return nil, err + } + ns = [][]netmap.NodeInfo{sortedVector, regularVector} + } else if cfg.flatSuccess != nil { ns = flatNodes(ns) rem = []int{int(*cfg.flatSuccess)} } else { @@ -157,6 +182,35 @@ func flatNodes(ns [][]netmap.NodeInfo) [][]netmap.NodeInfo { return [][]netmap.NodeInfo{flat} } +type nodeMetrics struct { + index int + metrics []int +} + +func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) ([]netmap.NodeInfo, error) { + nm := make([]nodeMetrics, len(unsortedVector)) + node := cfg.nodeState.LocalNodeInfo() + + for i := range unsortedVector { + m := make([]int, len(cfg.metrics)) + for j, pm := range cfg.metrics { + m[j] = pm.CalculateValue(node, &unsortedVector[i]) + } + nm[i] = nodeMetrics{ + index: i, + metrics: m, + } + } + slices.SortFunc(nm, func(a, b nodeMetrics) int { + return slices.Compare(a.metrics, b.metrics) + }) + sortedVector := make([]netmap.NodeInfo, len(unsortedVector)) + for i := range unsortedVector { + sortedVector[i] = unsortedVector[nm[i].index] + } + return sortedVector, nil +} + // Node is a descriptor of storage node with information required for intra-container communication. type Node struct { addresses network.AddressGroup @@ -322,3 +376,17 @@ func WithCopyNumbers(v []uint32) Option { c.copyNumbers = v } } + +// WithPriorityMetrics use provided priority metrics to sort nodes. +func WithPriorityMetrics(m []Metric) Option { + return func(c *cfg) { + c.metrics = m + } +} + +// WithNodeState provide state of the current node. +func WithNodeState(s NodeState) Option { + return func(c *cfg) { + c.nodeState = s + } +} diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go index b3b57677d..38f62aa07 100644 --- a/pkg/services/object_manager/placement/traverser_test.go +++ b/pkg/services/object_manager/placement/traverser_test.go @@ -22,7 +22,9 @@ func (b testBuilder) BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([] } func testNode(v uint32) (n netmap.NodeInfo) { - n.SetNetworkEndpoints("/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v))) + ip := "/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v)) + n.SetNetworkEndpoints(ip) + n.SetPublicKey([]byte(ip)) return n } @@ -40,7 +42,15 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo { return vc } -func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) { +func testPlacement(ss []int, rs []int) ([][]netmap.NodeInfo, container.Container) { + return placement(ss, rs, nil) +} + +func testECPlacement(ss []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) { + return placement(ss, nil, ec) +} + +func placement(ss []int, rs []int, ec [][]int) ([][]netmap.NodeInfo, container.Container) { nodes := make([][]netmap.NodeInfo, 0, len(rs)) replicas := make([]netmap.ReplicaDescriptor, 0, len(rs)) num := uint32(0) @@ -56,7 +66,12 @@ func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) { nodes = append(nodes, ns) var rd netmap.ReplicaDescriptor - rd.SetNumberOfObjects(uint32(rs[i])) + if len(rs) > 0 { + rd.SetNumberOfObjects(uint32(rs[i])) + } else { + rd.SetECDataCount(uint32(ec[i][0])) + rd.SetECParityCount(uint32(ec[i][1])) + } replicas = append(replicas, rd) } @@ -134,7 +149,7 @@ func TestTraverserObjectScenarios(t *testing.T) { err = n.FromIterator(netmapcore.Node(nodes[1][0])) require.NoError(t, err) - require.Equal(t, []Node{{addresses: n}}, tr.Next()) + require.Equal(t, []Node{{addresses: n, key: []byte("/ip4/0.0.0.0/tcp/5")}}, tr.Next()) }) t.Run("put scenario", func(t *testing.T) { @@ -275,3 +290,268 @@ func TestTraverserRemValues(t *testing.T) { }) } } + +type nodeState struct { + node *netmap.NodeInfo +} + +func (n *nodeState) LocalNodeInfo() *netmap.NodeInfo { + return n.node +} + +func TestTraverserPriorityMetrics(t *testing.T) { + t.Run("one rep one metric", func(t *testing.T) { + selectors := []int{4} + replicas := []int{3} + + nodes, cnr := testPlacement(selectors, replicas) + + // Node_0, PK - ip4/0.0.0.0/tcp/0 + nodes[0][0].SetAttribute("ClusterName", "A") + // Node_1, PK - ip4/0.0.0.0/tcp/1 + nodes[0][1].SetAttribute("ClusterName", "A") + // Node_2, PK - ip4/0.0.0.0/tcp/2 + nodes[0][2].SetAttribute("ClusterName", "B") + // Node_3, PK - ip4/0.0.0.0/tcp/3 + nodes[0][3].SetAttribute("ClusterName", "B") + + sdkNode := testNode(5) + sdkNode.SetAttribute("ClusterName", "B") + + nodesCopy := copyVectors(nodes) + + m := []Metric{NewAttributeMetric("ClusterName")} + + tr, err := NewTraverser( + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + // Without priority metric `ClusterName` the order will be: + // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}] + // With priority metric `ClusterName` and current node in cluster B + // the order should be: + // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}] + next := tr.Next() + require.NotNil(t, next) + require.Equal(t, 3, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey())) + + next = tr.Next() + // The last node is + require.Equal(t, 1, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + }) + + t.Run("two reps two metrics", func(t *testing.T) { + selectors := []int{3, 3} + replicas := []int{2, 2} + + nodes, cnr := testPlacement(selectors, replicas) + + // REPLICA #1 + // Node_0, PK - ip4/0.0.0.0/tcp/0 + nodes[0][0].SetAttribute("ClusterName", "A") + nodes[0][0].SetAttribute("UN-LOCODE", "RU LED") + + // Node_1, PK - ip4/0.0.0.0/tcp/1 + nodes[0][1].SetAttribute("ClusterName", "A") + nodes[0][1].SetAttribute("UN-LOCODE", "FI HEL") + + // Node_2, PK - ip4/0.0.0.0/tcp/2 + nodes[0][2].SetAttribute("ClusterName", "A") + nodes[0][2].SetAttribute("UN-LOCODE", "RU LED") + + // REPLICA #2 + // Node_3 ip4/0.0.0.0/tcp/3 + nodes[1][0].SetAttribute("ClusterName", "B") + nodes[1][0].SetAttribute("UN-LOCODE", "RU MOW") + + // Node_4, PK - ip4/0.0.0.0/tcp/4 + nodes[1][1].SetAttribute("ClusterName", "B") + nodes[1][1].SetAttribute("UN-LOCODE", "RU DME") + + // Node_5, PK - ip4/0.0.0.0/tcp/5 + nodes[1][2].SetAttribute("ClusterName", "B") + nodes[1][2].SetAttribute("UN-LOCODE", "RU MOW") + + sdkNode := testNode(9) + sdkNode.SetAttribute("ClusterName", "B") + sdkNode.SetAttribute("UN-LOCODE", "RU DME") + + nodesCopy := copyVectors(nodes) + + m := []Metric{ + NewAttributeMetric("ClusterName"), + NewAttributeMetric("UN-LOCODE"), + } + + tr, err := NewTraverser( + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + // Check that nodes in the same cluster and + // in the same location should be the first in slice. + // Nodes which are follow criteria but stay outside the replica + // should be in the next slice. + + next := tr.Next() + require.Equal(t, 4, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[1].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey())) + + next = tr.Next() + require.Equal(t, 2, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + + sdkNode.SetAttribute("ClusterName", "B") + sdkNode.SetAttribute("UN-LOCODE", "RU MOW") + + nodesCopy = copyVectors(nodes) + + tr, err = NewTraverser( + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + next = tr.Next() + require.Equal(t, 4, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[1].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[2].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[3].PublicKey())) + + next = tr.Next() + require.Equal(t, 2, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + + sdkNode.SetAttribute("ClusterName", "A") + sdkNode.SetAttribute("UN-LOCODE", "RU LED") + + nodesCopy = copyVectors(nodes) + + tr, err = NewTraverser( + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + next = tr.Next() + require.Equal(t, 4, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[1].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[2].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/4", string(next[3].PublicKey())) + + next = tr.Next() + require.Equal(t, 2, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/5", string(next[1].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + }) + + t.Run("ec container", func(t *testing.T) { + selectors := []int{4} + ec := [][]int{{2, 1}} + + nodes, cnr := testECPlacement(selectors, ec) + + // Node_0, PK - ip4/0.0.0.0/tcp/0 + nodes[0][0].SetAttribute("ClusterName", "A") + // Node_1, PK - ip4/0.0.0.0/tcp/1 + nodes[0][1].SetAttribute("ClusterName", "A") + // Node_2, PK - ip4/0.0.0.0/tcp/2 + nodes[0][2].SetAttribute("ClusterName", "B") + // Node_3, PK - ip4/0.0.0.0/tcp/3 + nodes[0][3].SetAttribute("ClusterName", "B") + + sdkNode := testNode(5) + sdkNode.SetAttribute("ClusterName", "B") + + nodesCopy := copyVectors(nodes) + + m := []Metric{NewAttributeMetric("ClusterName")} + + tr, err := NewTraverser( + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + // Without priority metric `ClusterName` the order will be: + // [ {Node_0 A}, {Node_1 A}, {Node_2 B}, {Node_3 B}] + // With priority metric `ClusterName` and current node in cluster B + // the order should be: + // [ {Node_2 B}, {Node_0 A}, {Node_1 A}, {Node_3 B}] + next := tr.Next() + require.NotNil(t, next) + require.Equal(t, 3, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/2", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[2].PublicKey())) + + next = tr.Next() + // The last node is + require.Equal(t, 1, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/3", string(next[0].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + }) +} From 433aab12bb170832a8b6607e372d09dafdd6ba84 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 29 Oct 2024 16:14:40 +0300 Subject: [PATCH 122/591] [#1455] cli: Handle missing home directory go-homedir library incorrectly handles some of the errors that could occur. It is archived, so no PR, but let's fix it on our side. The scenario in case: executing command in an empty environment. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-cli/modules/root.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/cmd/frostfs-cli/modules/root.go b/cmd/frostfs-cli/modules/root.go index 21c367d29..c0282586a 100644 --- a/cmd/frostfs-cli/modules/root.go +++ b/cmd/frostfs-cli/modules/root.go @@ -114,12 +114,14 @@ func initConfig() { } else { // Find home directory. home, err := homedir.Dir() - commonCmd.ExitOnErr(rootCmd, "", err) - - // Search config in `$HOME/.config/frostfs-cli/` with name "config.yaml" - viper.AddConfigPath(filepath.Join(home, ".config", "frostfs-cli")) - viper.SetConfigName("config") - viper.SetConfigType("yaml") + if err != nil { + common.PrintVerbose(rootCmd, "Get homedir: %s", err) + } else { + // Search config in `$HOME/.config/frostfs-cli/` with name "config.yaml" + viper.AddConfigPath(filepath.Join(home, ".config", "frostfs-cli")) + viper.SetConfigName("config") + viper.SetConfigType("yaml") + } } viper.SetEnvPrefix(envPrefix) From d5ee6d30390497b1c60245dc4cae782cad619995 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 29 Oct 2024 17:17:04 +0300 Subject: [PATCH 123/591] [#1456] morph: Use DialerSource interface instead of internal struct Signed-off-by: Dmitrii Stepanov --- pkg/morph/client/constructor.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go index a8efa76e7..08d16deb4 100644 --- a/pkg/morph/client/constructor.go +++ b/pkg/morph/client/constructor.go @@ -4,11 +4,11 @@ import ( "context" "errors" "fmt" + "net" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" - internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" lru "github.com/hashicorp/golang-lru/v2" @@ -48,7 +48,7 @@ type cfg struct { morphCacheMetrics metrics.MorphCacheMetrics - dialerSource *internalNet.DialerSource + dialerSource DialerSource } const ( @@ -68,6 +68,7 @@ func defaultConfig() *cfg { Scopes: transaction.Global, }, morphCacheMetrics: &morphmetrics.NoopMorphCacheMetrics{}, + dialerSource: &noopDialerSource{}, } } @@ -296,7 +297,17 @@ func WithMorphCacheMetrics(morphCacheMetrics metrics.MorphCacheMetrics) Option { } } -func WithDialerSource(ds *internalNet.DialerSource) Option { +type DialerSource interface { + NetContextDialer() func(context.Context, string, string) (net.Conn, error) +} + +type noopDialerSource struct{} + +func (ds *noopDialerSource) NetContextDialer() func(context.Context, string, string) (net.Conn, error) { + return nil +} + +func WithDialerSource(ds DialerSource) Option { return func(c *cfg) { c.dialerSource = ds } From 87ac3c52797d65f1c46e255c937ffd30b8441d35 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Tue, 29 Oct 2024 17:16:21 +0300 Subject: [PATCH 124/591] [#1458] object: Make patch not set key before target construction * `SignRequestPrivateKey` field should be initialized either within `newUntrustedTarget` or within `newTrustedTarget`. Otherwise, all requests are signed by local node key that makes impossible to perform patch on non-container node. Signed-off-by: Airat Arifullin --- pkg/services/object/patch/streamer.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go index c8ed6fdbf..677c6610f 100644 --- a/pkg/services/object/patch/streamer.go +++ b/pkg/services/object/patch/streamer.go @@ -113,10 +113,9 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error { oV2.GetHeader().SetOwnerID(ownerID) target, err := target.New(objectwriter.Params{ - Config: s.Config, - Common: commonPrm, - Header: objectSDK.NewFromV2(oV2), - SignRequestPrivateKey: s.localNodeKey, + Config: s.Config, + Common: commonPrm, + Header: objectSDK.NewFromV2(oV2), }) if err != nil { return fmt.Errorf("target creation: %w", err) From d28a5d2d7a48084a45b154183c450f219969db69 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Tue, 29 Oct 2024 15:47:19 +0300 Subject: [PATCH 125/591] [#1448] container/ape: Ignore an error when getting a role When getting a role in the APE checker for the container services, an error may be returned if network maps of the previous two epochs don't have enough nodes to fulfil a container placement policy. It's a logical error, so we should ignore it. Signed-off-by: Aleksey Savchuk --- pkg/services/container/ape.go | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go index d92ecf58b..dd4878331 100644 --- a/pkg/services/container/ape.go +++ b/pkg/services/container/ape.go @@ -537,10 +537,7 @@ func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercor return false, err } - in, err := isContainerNode(nm, pk, binCnrID, cont) - if err != nil { - return false, err - } else if in { + if isContainerNode(nm, pk, binCnrID, cont) { return true, nil } @@ -551,24 +548,24 @@ func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercor return false, err } - return isContainerNode(nm, pk, binCnrID, cont) + return isContainerNode(nm, pk, binCnrID, cont), nil } -func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) (bool, error) { - cnrVectors, err := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID) - if err != nil { - return false, err - } +func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) bool { + // It could an error only if the network map doesn't have enough nodes to + // fulfil the policy. It's a logical error that doesn't affect an actor role + // determining, so we ignore it + cnrVectors, _ := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID) for i := range cnrVectors { for j := range cnrVectors[i] { if bytes.Equal(cnrVectors[i][j].PublicKey(), pk) { - return true, nil + return true } } } - return false, nil + return false } func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) { From 7ac08523647c7450366a9aa350f05eafecad8c7d Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 30 Oct 2024 14:42:09 +0300 Subject: [PATCH 126/591] [#1459] .golangci.yml: Add intrange linter, fix issues Signed-off-by: Evgenii Stratonikov --- .golangci.yml | 1 + cmd/frostfs-adm/internal/modules/config/config.go | 2 +- .../internal/modules/morph/generate/generate_test.go | 2 +- .../internal/schema/metabase/records/util.go | 2 +- .../blobstor/blobovniczatree/rebuild_test.go | 6 +++--- pkg/local_object_storage/blobstor/iterate_test.go | 2 +- pkg/local_object_storage/engine/error_test.go | 2 +- pkg/local_object_storage/pilorama/forest_test.go | 2 +- pkg/local_object_storage/shard/control_test.go | 2 +- pkg/local_object_storage/writecache/limiter_test.go | 2 +- pkg/morph/timer/block_test.go | 2 +- pkg/services/object/acl/v2/util_test.go | 2 +- scripts/populate-metabase/internal/generate.go | 10 +++++----- scripts/populate-metabase/internal/populate.go | 8 ++++---- scripts/populate-metabase/main.go | 2 +- 15 files changed, 24 insertions(+), 23 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 33cf88d8a..1235c62b4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -87,5 +87,6 @@ linters: - perfsprint - testifylint - protogetter + - intrange disable-all: true fast: false diff --git a/cmd/frostfs-adm/internal/modules/config/config.go b/cmd/frostfs-adm/internal/modules/config/config.go index a98245d01..69153f0d7 100644 --- a/cmd/frostfs-adm/internal/modules/config/config.go +++ b/cmd/frostfs-adm/internal/modules/config/config.go @@ -128,7 +128,7 @@ func generateConfigExample(appDir string, credSize int) (string, error) { tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets") var i innerring.GlagoliticLetter - for i = 0; i < innerring.GlagoliticLetter(credSize); i++ { + for i = range innerring.GlagoliticLetter(credSize) { tmpl.Glagolitics = append(tmpl.Glagolitics, i.String()) } diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go index 1dd6420eb..15af5637b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate_test.go @@ -63,7 +63,7 @@ func TestGenerateAlphabet(t *testing.T) { buf.Reset() v.Set(commonflags.AlphabetWalletsFlag, walletDir) require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10))) - for i := uint64(0); i < size; i++ { + for i := range uint64(size) { buf.WriteString(strconv.FormatUint(i, 10) + "\r") } diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/util.go b/cmd/frostfs-lens/internal/schema/metabase/records/util.go index f50ebe951..d15d69146 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/util.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/util.go @@ -11,7 +11,7 @@ func DecodeOIDs(data []byte) ([]oid.ID, error) { size := r.ReadVarUint() oids := make([]oid.ID, size) - for i := uint64(0); i < size; i++ { + for i := range size { if err := oids[i].Decode(r.ReadVarBytes()); err != nil { return nil, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index dfd928aaf..dff4e9024 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -61,7 +61,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, b.Init()) storageIDs := make(map[oid.Address][]byte) - for i := 0; i < 100; i++ { + for range 100 { obj := blobstortest.NewObject(64 * 1024) // 64KB object data, err := obj.Marshal() require.NoError(t, err) @@ -168,7 +168,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs := make(map[oid.Address][]byte) toDelete := make(map[oid.Address][]byte) - for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created + for i := range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created obj := blobstortest.NewObject(64 * 1024) data, err := obj.Marshal() require.NoError(t, err) @@ -236,7 +236,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, b.Init()) storageIDs := make(map[oid.Address][]byte) - for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created + for range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created obj := blobstortest.NewObject(64 * 1024) data, err := obj.Marshal() require.NoError(t, err) diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go index 195d0bd31..3c9457db2 100644 --- a/pkg/local_object_storage/blobstor/iterate_test.go +++ b/pkg/local_object_storage/blobstor/iterate_test.go @@ -47,7 +47,7 @@ func TestIterateObjects(t *testing.T) { mObjs := make(map[string]addrData) - for i := uint64(0); i < objNum; i++ { + for i := range uint64(objNum) { sz := smalSz big := i < objNum/2 diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go index 535435ceb..1619003a1 100644 --- a/pkg/local_object_storage/engine/error_test.go +++ b/pkg/local_object_storage/engine/error_test.go @@ -151,7 +151,7 @@ func TestErrorReporting(t *testing.T) { checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite) } - for i := uint32(0); i < 2; i++ { + for i := range uint32(2) { _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)}) require.Error(t, err) checkShardState(t, te.ng, te.shards[0].id, errThreshold+i, mode.ReadOnly) diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go index 854fe0aad..fbcc53fb3 100644 --- a/pkg/local_object_storage/pilorama/forest_test.go +++ b/pkg/local_object_storage/pilorama/forest_test.go @@ -1081,7 +1081,7 @@ func prepareRandomTree(nodeCount, opCount int) []Move { } func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) { - for i := uint64(0); i < uint64(nodeCount); i++ { + for i := range uint64(nodeCount) { expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i) require.NoError(t, err) actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i) diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go index 6b9eaa550..b8f1d4417 100644 --- a/pkg/local_object_storage/shard/control_test.go +++ b/pkg/local_object_storage/shard/control_test.go @@ -216,7 +216,7 @@ func TestRefillMetabase(t *testing.T) { locked := make([]oid.ID, 1, 2) locked[0] = oidtest.ID() cnrLocked := cidtest.ID() - for i := uint64(0); i < objNum; i++ { + for range objNum { obj := objecttest.Object() obj.SetType(objectSDK.TypeRegular) diff --git a/pkg/local_object_storage/writecache/limiter_test.go b/pkg/local_object_storage/writecache/limiter_test.go index db99b203a..1ca3e1156 100644 --- a/pkg/local_object_storage/writecache/limiter_test.go +++ b/pkg/local_object_storage/writecache/limiter_test.go @@ -14,7 +14,7 @@ func TestLimiter(t *testing.T) { l := newFlushLimiter(uint64(maxSize)) var currSize atomic.Int64 var eg errgroup.Group - for i := 0; i < 10_000; i++ { + for range 10_000 { eg.Go(func() error { defer l.release(single) defer currSize.Add(-1) diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go index 7929754c1..ee6091845 100644 --- a/pkg/morph/timer/block_test.go +++ b/pkg/morph/timer/block_test.go @@ -8,7 +8,7 @@ import ( ) func tickN(t *timer.BlockTimer, n uint32) { - for i := uint32(0); i < n; i++ { + for range n { t.Tick(0) } } diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go index 435339683..8c6d01ae9 100644 --- a/pkg/services/object/acl/v2/util_test.go +++ b/pkg/services/object/acl/v2/util_test.go @@ -50,7 +50,7 @@ func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) metaHeader.SetBearerToken(b) metaHeader.SetSessionToken(s) - for i := uint32(0); i < depth; i++ { + for range depth { link := metaHeader metaHeader = new(session.RequestMetaHeader) metaHeader.SetOrigin(link) diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go index 8a96dcaaa..f2f8881cf 100644 --- a/scripts/populate-metabase/internal/generate.go +++ b/scripts/populate-metabase/internal/generate.go @@ -19,7 +19,7 @@ import ( func GeneratePayloadPool(count uint, size uint) [][]byte { var pool [][]byte - for i := uint(0); i < count; i++ { + for range count { payload := make([]byte, size) _, _ = rand.Read(payload) @@ -30,8 +30,8 @@ func GeneratePayloadPool(count uint, size uint) [][]byte { func GenerateAttributePool(count uint) []objectSDK.Attribute { var pool []objectSDK.Attribute - for i := uint(0); i < count; i++ { - for j := uint(0); j < count; j++ { + for i := range count { + for j := range count { attr := *objectSDK.NewAttribute() attr.SetKey(fmt.Sprintf("key%d", i)) attr.SetValue(fmt.Sprintf("value%d", j)) @@ -43,7 +43,7 @@ func GenerateAttributePool(count uint) []objectSDK.Attribute { func GenerateOwnerPool(count uint) []user.ID { var pool []user.ID - for i := uint(0); i < count; i++ { + for range count { pool = append(pool, usertest.ID()) } return pool @@ -118,7 +118,7 @@ func WithPayloadFromPool(pool [][]byte) ObjectOption { func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption { return func(obj *objectSDK.Object) { var attrs []objectSDK.Attribute - for i := uint(0); i < count; i++ { + for range count { attrs = append(attrs, pool[rand.Intn(len(pool))]) } obj.SetAttributes(attrs...) diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go index 390c1cdc0..4da23a295 100644 --- a/scripts/populate-metabase/internal/populate.go +++ b/scripts/populate-metabase/internal/populate.go @@ -29,7 +29,7 @@ func PopulateWithObjects( ) { digits := "0123456789" - for i := uint(0); i < count; i++ { + for range count { obj := factory() id := []byte(fmt.Sprintf( @@ -59,7 +59,7 @@ func PopulateWithBigObjects( count uint, factory func() *objectSDK.Object, ) { - for i := uint(0); i < count; i++ { + for range count { group.Go(func() error { if err := populateWithBigObject(ctx, db, factory); err != nil { return fmt.Errorf("couldn't put a big object: %w", err) @@ -154,7 +154,7 @@ func PopulateGraveyard( wg := &sync.WaitGroup{} wg.Add(int(count)) - for i := uint(0); i < count; i++ { + for range count { obj := factory() prm := meta.PutPrm{} @@ -226,7 +226,7 @@ func PopulateLocked( wg := &sync.WaitGroup{} wg.Add(int(count)) - for i := uint(0); i < count; i++ { + for range count { defer wg.Done() obj := factory() diff --git a/scripts/populate-metabase/main.go b/scripts/populate-metabase/main.go index 2bc7a5553..6f6b233cf 100644 --- a/scripts/populate-metabase/main.go +++ b/scripts/populate-metabase/main.go @@ -116,7 +116,7 @@ func populate() (err error) { eg, ctx := errgroup.WithContext(ctx) eg.SetLimit(int(jobs)) - for i := uint(0); i < numContainers; i++ { + for range numContainers { cid := cidtest.ID() for _, typ := range types { From 89892d9754988fce2d826edee7a7c6af949ced64 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 30 Oct 2024 15:13:45 +0300 Subject: [PATCH 127/591] [#1459] cli: Simplify slice append Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-cli/internal/client/client.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index dcd67f0d9..ed9817b86 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -659,9 +659,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes for { n, ok = rdr.Read(buf) - for i := range n { - list = append(list, buf[i]) - } + list = append(list, buf[:n]...) if !ok { break } From 48862e0e63f0306cbe9556abc8e20bb7da27d399 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 30 Oct 2024 15:33:10 +0300 Subject: [PATCH 128/591] [#1459] .golanci.yml: Add tenv linter, fix issues Refs #1309 Signed-off-by: Evgenii Stratonikov --- .golangci.yml | 1 + cmd/frostfs-node/config/calls_test.go | 4 +--- cmd/frostfs-node/config/test/config.go | 14 +------------- 3 files changed, 3 insertions(+), 16 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 1235c62b4..57e3b4494 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -88,5 +88,6 @@ linters: - testifylint - protogetter - intrange + - tenv disable-all: true fast: false diff --git a/cmd/frostfs-node/config/calls_test.go b/cmd/frostfs-node/config/calls_test.go index 68bf1c679..bc149eb7d 100644 --- a/cmd/frostfs-node/config/calls_test.go +++ b/cmd/frostfs-node/config/calls_test.go @@ -1,7 +1,6 @@ package config_test import ( - "os" "strings" "testing" @@ -38,8 +37,7 @@ func TestConfigEnv(t *testing.T) { envName := strings.ToUpper( strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator)) - err := os.Setenv(envName, value) - require.NoError(t, err) + t.Setenv(envName, value) c := configtest.EmptyConfig() diff --git a/cmd/frostfs-node/config/test/config.go b/cmd/frostfs-node/config/test/config.go index a93d7e648..e98c032f0 100644 --- a/cmd/frostfs-node/config/test/config.go +++ b/cmd/frostfs-node/config/test/config.go @@ -11,8 +11,6 @@ import ( ) func fromFile(path string) *config.Config { - os.Clearenv() // ENVs have priority over config files, so we do this in tests - return config.New(path, "", "") } @@ -40,15 +38,6 @@ func ForEachFileType(pref string, f func(*config.Config)) { // ForEnvFileType creates config from `.env` file. func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) { - envs := os.Environ() - t.Cleanup(func() { - os.Clearenv() - for _, env := range envs { - keyValue := strings.Split(env, "=") - os.Setenv(keyValue[0], keyValue[1]) - } - }) - f(fromEnvFile(t, pref+".env")) } @@ -73,7 +62,6 @@ func loadEnv(t testing.TB, path string) { v = strings.Trim(v, `"`) - err = os.Setenv(k, v) - require.NoError(t, err, "can't set environment variable") + t.Setenv(k, v) } } From e74d05c03f6806cea12e69ce00d84bd69eab5fb7 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 30 Oct 2024 17:43:54 +0300 Subject: [PATCH 129/591] [#1464] frostfsid: Add cache metrics Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/container.go | 3 ++- cmd/frostfs-node/frostfsid.go | 24 +++++++++++++++++++----- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index 729fcb8af..f95f671cd 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -8,6 +8,7 @@ import ( containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc" morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" @@ -42,7 +43,7 @@ func initContainerService(_ context.Context, c *cfg) { cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg) if cacheSize > 0 { - frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL) + frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id")) } c.shared.frostfsidClient = frostfsIDSubjectProvider diff --git a/cmd/frostfs-node/frostfsid.go b/cmd/frostfs-node/frostfsid.go index fb55a6019..9039915f5 100644 --- a/cmd/frostfs-node/frostfsid.go +++ b/cmd/frostfs-node/frostfsid.go @@ -14,22 +14,29 @@ type morphFrostfsIDCache struct { subjCache *expirable.LRU[util.Uint160, *client.Subject] - subjExtCache *expirable.LRU[util.Uint160, *client.SubjectExtended] + metrics cacheMetrics } -func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration) frostfsidcore.SubjectProvider { +func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int, ttl time.Duration, metrics cacheMetrics) frostfsidcore.SubjectProvider { return &morphFrostfsIDCache{ subjProvider: subjProvider, subjCache: expirable.NewLRU(size, func(util.Uint160, *client.Subject) {}, ttl), - subjExtCache: expirable.NewLRU(size, func(util.Uint160, *client.SubjectExtended) {}, ttl), + metrics: metrics, } } func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) { + hit := false + startedAt := time.Now() + defer func() { + m.metrics.AddMethodDuration("GetSubject", time.Since(startedAt), hit) + }() + result, found := m.subjCache.Get(addr) if found { + hit = true return result, nil } @@ -43,9 +50,16 @@ func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, er } func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) { - subjExt, found := m.subjExtCache.Get(addr) + hit := false + startedAt := time.Now() + defer func() { + m.metrics.AddMethodDuration("GetSubjectExtended", time.Since(startedAt), hit) + }() + + result, found := m.subjExtCache.Get(addr) if found { - return subjExt, nil + hit = true + return result, nil } var err error From 2285cfc36f1d815e722fb7ef246801a9029efb63 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 30 Oct 2024 17:44:31 +0300 Subject: [PATCH 130/591] [#1464] frostfsid: Cache `subject not found` error Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/frostfsid.go | 53 +++++++++++++++++++++++++++-------- 1 file changed, 42 insertions(+), 11 deletions(-) diff --git a/cmd/frostfs-node/frostfsid.go b/cmd/frostfs-node/frostfsid.go index 9039915f5..3cca09105 100644 --- a/cmd/frostfs-node/frostfsid.go +++ b/cmd/frostfs-node/frostfsid.go @@ -1,6 +1,7 @@ package main import ( + "strings" "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" @@ -9,10 +10,22 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" ) +type subjectWithError struct { + subject *client.Subject + err error +} + +type subjectExtWithError struct { + subject *client.SubjectExtended + err error +} + type morphFrostfsIDCache struct { subjProvider frostfsidcore.SubjectProvider - subjCache *expirable.LRU[util.Uint160, *client.Subject] + subjCache *expirable.LRU[util.Uint160, subjectWithError] + + subjExtCache *expirable.LRU[util.Uint160, subjectExtWithError] metrics cacheMetrics } @@ -21,7 +34,9 @@ func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int return &morphFrostfsIDCache{ subjProvider: subjProvider, - subjCache: expirable.NewLRU(size, func(util.Uint160, *client.Subject) {}, ttl), + subjCache: expirable.NewLRU(size, func(util.Uint160, subjectWithError) {}, ttl), + + subjExtCache: expirable.NewLRU(size, func(util.Uint160, subjectExtWithError) {}, ttl), metrics: metrics, } @@ -37,16 +52,21 @@ func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, er result, found := m.subjCache.Get(addr) if found { hit = true - return result, nil + return result.subject, result.err } - result, err := m.subjProvider.GetSubject(addr) + subj, err := m.subjProvider.GetSubject(addr) if err != nil { + if m.isCacheableError(err) { + m.subjCache.Add(addr, subjectWithError{ + err: err, + }) + } return nil, err } - m.subjCache.Add(addr, result) - return result, nil + m.subjCache.Add(addr, subjectWithError{subject: subj}) + return subj, nil } func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) { @@ -59,21 +79,32 @@ func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.Sub result, found := m.subjExtCache.Get(addr) if found { hit = true - return result, nil + return result.subject, result.err } - var err error - subjExt, err = m.subjProvider.GetSubjectExtended(addr) + subjExt, err := m.subjProvider.GetSubjectExtended(addr) if err != nil { + if m.isCacheableError(err) { + m.subjExtCache.Add(addr, subjectExtWithError{ + err: err, + }) + m.subjCache.Add(addr, subjectWithError{ + err: err, + }) + } return nil, err } - m.subjExtCache.Add(addr, subjExt) - m.subjCache.Add(addr, subjectFromSubjectExtended(subjExt)) + m.subjExtCache.Add(addr, subjectExtWithError{subject: subjExt}) + m.subjCache.Add(addr, subjectWithError{subject: subjectFromSubjectExtended(subjExt)}) return subjExt, nil } +func (m *morphFrostfsIDCache) isCacheableError(err error) bool { + return strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) +} + func subjectFromSubjectExtended(subjExt *client.SubjectExtended) *client.Subject { return &client.Subject{ PrimaryKey: subjExt.PrimaryKey, From c2effcc61c5bcb10f58e9d039b12ea0b22a17bba Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 31 Oct 2024 09:13:50 +0300 Subject: [PATCH 131/591] [#1465] Makefile: Update golangci-lint, fix warnings Signed-off-by: Evgenii Stratonikov --- Makefile | 2 +- cmd/frostfs-cli/modules/object/nodes.go | 2 +- pkg/local_object_storage/pilorama/boltdb.go | 2 +- pkg/services/object/common/writer/ec.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index d92844bb5..94b1542d5 100755 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" GO_VERSION ?= 1.22 -LINT_VERSION ?= 1.60.3 +LINT_VERSION ?= 1.61.0 TRUECLOUDLAB_LINT_VERSION ?= 0.0.7 PROTOC_VERSION ?= 25.0 PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2) diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index 0eac4e6d2..e6918dfc9 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -195,7 +195,7 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member prmHead.SetRawFlag(true) // to get an error instead of whole object eg, egCtx := errgroup.WithContext(cmd.Context()) - for idx := range len(members) { + for idx := range members { partObjID := members[idx] eg.Go(func() error { diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index e2d69cafa..09f2e1919 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -705,7 +705,7 @@ func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*M key, value = c.Prev() } - for i := range len(ms) { + for i := range ms { // Loop invariant: key represents the next stored timestamp after ms[i].Time. // 2. Insert the operation. diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index dffe52a6d..ee0681349 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -284,7 +284,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx } // try to save to any node not visited by current part - for i := range len(nodes) { + for i := range nodes { select { case <-ctx.Done(): return ctx.Err() From 5bcf81d1cc20408a925c27a9f069bbba6e0a2993 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 31 Oct 2024 10:06:51 +0300 Subject: [PATCH 132/591] [#1466] Remove woodpecker CI We use forgejo actions now. Signed-off-by: Evgenii Stratonikov --- .woodpecker/pre-commit.yml | 11 ----------- 1 file changed, 11 deletions(-) delete mode 100644 .woodpecker/pre-commit.yml diff --git a/.woodpecker/pre-commit.yml b/.woodpecker/pre-commit.yml deleted file mode 100644 index bdf3402de..000000000 --- a/.woodpecker/pre-commit.yml +++ /dev/null @@ -1,11 +0,0 @@ -pipeline: - # Kludge for non-root containers under WoodPecker - fix-ownership: - image: alpine:latest - commands: chown -R 1234:1234 . - - pre-commit: - image: git.frostfs.info/truecloudlab/frostfs-ci:v0.36 - commands: - - export HOME="$(getent passwd $(id -u) | cut '-d:' -f6)" - - pre-commit run --hook-stage manual From d19ab43500c24795b9511f0644b706a010983a5a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 30 Oct 2024 16:42:09 +0300 Subject: [PATCH 133/591] [#1462] node: Add off-cpu profiler Signed-off-by: Dmitrii Stepanov --- go.mod | 2 ++ go.sum | 14 ++++++++++++++ pkg/util/http/pprof.go | 6 ++++++ 3 files changed, 22 insertions(+) diff --git a/go.mod b/go.mod index aefe2889a..ba9ec375c 100644 --- a/go.mod +++ b/go.mod @@ -19,6 +19,7 @@ require ( github.com/cheggaaa/pb v1.0.29 github.com/chzyer/readline v1.5.1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc + github.com/felixge/fgprof v0.9.5 github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 github.com/gdamore/tcell/v2 v2.7.4 github.com/go-pkgz/expirable-cache/v3 v3.0.0 @@ -77,6 +78,7 @@ require ( github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/snappy v0.0.4 // indirect + github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect github.com/gorilla/websocket v1.5.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 // indirect diff --git a/go.sum b/go.sum index 4d44079d4..a037a99af 100644 --- a/go.sum +++ b/go.sum @@ -41,6 +41,9 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo= github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30= +github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= +github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= +github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= @@ -68,6 +71,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3 github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= +github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BMXYYRWTLOJKlh+lOBt6nUQgXAfB7oVIQt5cNreqSLI= github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:rZfgFAXFS/z/lEd6LJmf9HVZ1LkgYiHx5pHhV5DR16M= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= @@ -91,6 +96,9 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-pkgz/expirable-cache/v3 v3.0.0 h1:u3/gcu3sabLYiTCevoRKv+WzjIn5oo7P8XtiXBeRDLw= github.com/go-pkgz/expirable-cache/v3 v3.0.0/go.mod h1:2OQiDyEGQalYecLWmXprm3maPXeVb5/6/X7yRPYTzec= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -109,6 +117,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -130,6 +140,7 @@ github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= @@ -148,6 +159,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= @@ -210,6 +222,7 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/panjf2000/ants/v2 v2.9.0 h1:SztCLkVxBRigbg+vt0S5QvF5vxAbxbKt09/YfAJ0tEo= github.com/panjf2000/ants/v2 v2.9.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= @@ -361,6 +374,7 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/pkg/util/http/pprof.go b/pkg/util/http/pprof.go index 7a0413000..f85fd2ea9 100644 --- a/pkg/util/http/pprof.go +++ b/pkg/util/http/pprof.go @@ -3,8 +3,14 @@ package httputil import ( "net/http" "net/http/pprof" + + "github.com/felixge/fgprof" ) +func init() { + http.DefaultServeMux.Handle("/debug/fgprof", fgprof.Handler()) +} + // initializes pprof package in order to // register Prometheus handlers on http.DefaultServeMux. var _ = pprof.Handler("") From 6c45a17af66843ac5757fcd4b8f8e6acd0bca087 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 31 Oct 2024 13:13:03 +0300 Subject: [PATCH 134/591] [#1467] node: Break notary deposit wait after VUB Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/morph.go | 70 ++++++++++++++++++++++---------------- cmd/frostfs-node/netmap.go | 2 +- internal/logs/logs.go | 1 - pkg/innerring/notary.go | 5 +-- pkg/morph/client/client.go | 23 +++++++++++++ pkg/morph/client/notary.go | 17 ++++----- 6 files changed, 77 insertions(+), 41 deletions(-) diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index e85209059..0969f5579 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -17,15 +17,16 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" "github.com/nspcc-dev/neo-go/pkg/core/block" "github.com/nspcc-dev/neo-go/pkg/core/state" + "github.com/nspcc-dev/neo-go/pkg/neorpc/result" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/waiter" + "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" "go.uber.org/zap" ) const ( newEpochNotification = "NewEpoch" - - // amount of tries(blocks) before notary deposit timeout. - notaryDepositRetriesAmount = 300 ) func (c *cfg) initMorphComponents(ctx context.Context) { @@ -128,7 +129,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { return } - tx, err := makeNotaryDeposit(c) + tx, vub, err := makeNotaryDeposit(c) fatalOnErr(err) if tx.Equals(util.Uint256{}) { @@ -139,11 +140,11 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { return } - err = waitNotaryDeposit(ctx, c, tx) + err = waitNotaryDeposit(ctx, c, tx, vub) fatalOnErr(err) } -func makeNotaryDeposit(c *cfg) (util.Uint256, error) { +func makeNotaryDeposit(c *cfg) (util.Uint256, uint32, error) { const ( // gasMultiplier defines how many times more the notary // balance must be compared to the GAS balance of the node: @@ -157,7 +158,7 @@ func makeNotaryDeposit(c *cfg) (util.Uint256, error) { depositAmount, err := client.CalculateNotaryDepositAmount(c.cfgMorph.client, gasMultiplier, gasDivisor) if err != nil { - return util.Uint256{}, fmt.Errorf("could not calculate notary deposit: %w", err) + return util.Uint256{}, 0, fmt.Errorf("could not calculate notary deposit: %w", err) } return c.cfgMorph.client.DepositEndlessNotary(depositAmount) @@ -168,32 +169,43 @@ var ( errNotaryDepositTimeout = errors.New("notary deposit tx has not appeared in the network") ) -func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error { - for range notaryDepositRetriesAmount { - c.log.Debug(logs.ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted) - select { - case <-ctx.Done(): - return ctx.Err() - default: - } +type waiterClient struct { + c *client.Client +} - ok, err := c.cfgMorph.client.TxHalt(tx) - if err == nil { - if ok { - c.log.Info(logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted) - return nil - } +func (w *waiterClient) Context() context.Context { + return context.Background() +} - return errNotaryDepositFail - } +func (w *waiterClient) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) { + return w.c.GetApplicationLog(hash, trig) +} - err = c.cfgMorph.client.Wait(ctx, 1) - if err != nil { - return fmt.Errorf("could not wait for one block in chain: %w", err) - } +func (w *waiterClient) GetBlockCount() (uint32, error) { + return w.c.BlockCount() +} + +func (w *waiterClient) GetVersion() (*result.Version, error) { + return w.c.GetVersion() +} + +func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error { + w, err := waiter.NewPollingBased(&waiterClient{c: c.cfgMorph.client}) + if err != nil { + return fmt.Errorf("could not create notary deposit waiter: %w", err) } - - return errNotaryDepositTimeout + res, err := w.WaitAny(ctx, vub, tx) + if err != nil { + if errors.Is(err, waiter.ErrTxNotAccepted) { + return errNotaryDepositTimeout + } + return fmt.Errorf("could not wait for notary deposit persists in chain: %w", err) + } + if res.Execution.VMState.HasFlag(vmstate.Halt) { + c.log.Info(logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted) + return nil + } + return errNotaryDepositFail } func listenMorphNotifications(ctx context.Context, c *cfg) { diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 0e0571760..6e2a7c44a 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -192,7 +192,7 @@ func addNewEpochNotificationHandlers(c *cfg) { if c.cfgMorph.notaryEnabled { addNewEpochAsyncNotificationHandler(c, func(_ event.Event) { - _, err := makeNotaryDeposit(c) + _, _, err := makeNotaryDeposit(c) if err != nil { c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit, zap.String("error", err.Error()), diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 0e9d58f32..e4bac4930 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -142,7 +142,6 @@ const ( ClientNotaryRequestWithPreparedMainTXInvoked = "notary request with prepared main TX invoked" ClientNotaryRequestInvoked = "notary request invoked" ClientNotaryDepositTransactionWasSuccessfullyPersisted = "notary deposit transaction was successfully persisted" - ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted = "attempt to wait for notary deposit transaction to get persisted" ClientNeoClientInvoke = "neo client invoke" ClientNativeGasTransferInvoke = "native gas transfer invoke" ClientBatchGasTransferInvoke = "batch gas transfer invoke" diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go index e6f2b1de4..5cdbb971c 100644 --- a/pkg/innerring/notary.go +++ b/pkg/innerring/notary.go @@ -40,13 +40,14 @@ func (s *Server) depositMainNotary() (tx util.Uint256, err error) { ) } -func (s *Server) depositSideNotary() (tx util.Uint256, err error) { +func (s *Server) depositSideNotary() (util.Uint256, error) { depositAmount, err := client.CalculateNotaryDepositAmount(s.morphClient, gasMultiplier, gasDivisor) if err != nil { return util.Uint256{}, fmt.Errorf("could not calculate side notary deposit amount: %w", err) } - return s.morphClient.DepositEndlessNotary(depositAmount) + tx, _, err := s.morphClient.DepositEndlessNotary(depositAmount) + return tx, err } func (s *Server) notaryHandler(_ event.Event) { diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index df521f56b..933f1039f 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -19,6 +19,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" + "github.com/nspcc-dev/neo-go/pkg/neorpc/result" "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" @@ -461,6 +462,28 @@ func (c *Client) TxHalt(h util.Uint256) (res bool, err error) { return len(aer.Executions) > 0 && aer.Executions[0].VMState.HasFlag(vmstate.Halt), nil } +func (c *Client) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) { + c.switchLock.RLock() + defer c.switchLock.RUnlock() + + if c.inactive { + return nil, ErrConnectionLost + } + + return c.client.GetApplicationLog(hash, trig) +} + +func (c *Client) GetVersion() (*result.Version, error) { + c.switchLock.RLock() + defer c.switchLock.RUnlock() + + if c.inactive { + return nil, ErrConnectionLost + } + + return c.client.GetVersion() +} + // TxHeight returns true if transaction has been successfully executed and persisted. func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) { c.switchLock.RLock() diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 616b3b5c3..2a500b31b 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -140,7 +140,7 @@ func (c *Client) ProbeNotary() (res bool) { // use this function. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uint256, err error) { +func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (util.Uint256, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -163,7 +163,8 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uin } till := max(int64(bc+delta), currentTill) - return c.depositNotary(amount, till) + res, _, err := c.depositNotary(amount, till) + return res, err } // DepositEndlessNotary calls notary deposit method. Unlike `DepositNotary`, @@ -171,12 +172,12 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uin // This allows to avoid ValidAfterDeposit failures. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (res util.Uint256, err error) { +func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (util.Uint256, uint32, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() if c.inactive { - return util.Uint256{}, ErrConnectionLost + return util.Uint256{}, 0, ErrConnectionLost } if c.notary == nil { @@ -187,7 +188,7 @@ func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (res util.Uint256, e return c.depositNotary(amount, math.MaxUint32) } -func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint256, err error) { +func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) { txHash, vub, err := c.gasToken.Transfer( c.accAddr, c.notary.notary, @@ -195,7 +196,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint2 []any{c.acc.PrivateKey().GetScriptHash(), till}) if err != nil { if !errors.Is(err, neorpc.ErrAlreadyExists) { - return util.Uint256{}, fmt.Errorf("can't make notary deposit: %w", err) + return util.Uint256{}, 0, fmt.Errorf("can't make notary deposit: %w", err) } // Transaction is already in mempool waiting to be processed. @@ -205,7 +206,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint2 zap.Int64("expire_at", till), zap.Uint32("vub", vub), zap.Error(err)) - return util.Uint256{}, nil + return util.Uint256{}, 0, nil } c.logger.Info(logs.ClientNotaryDepositInvoke, @@ -214,7 +215,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (res util.Uint2 zap.Uint32("vub", vub), zap.Stringer("tx_hash", txHash.Reverse())) - return txHash, nil + return txHash, vub, nil } // GetNotaryDeposit returns deposit of client's account in notary contract. From 17ec84151b4f9f3d2ff593ecdad41b8c0c860e02 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 5 Nov 2024 16:15:59 +0300 Subject: [PATCH 135/591] [#532] cli: Respect XDG base directory spec XDG base directory specification defines where various files should be looked by an application. Hopefully, this makes `frostfs-cli` more predictable and pleasant to work with. Luckily for us, golang already has everything we need in the stdlib. This commit also gets rid of `github.com/mitchellh/go-homedir` dependency. Close #532 Refs #1455 Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-cli/modules/root.go | 11 +++++------ go.mod | 1 - go.sum | 2 -- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/cmd/frostfs-cli/modules/root.go b/cmd/frostfs-cli/modules/root.go index c0282586a..88acab341 100644 --- a/cmd/frostfs-cli/modules/root.go +++ b/cmd/frostfs-cli/modules/root.go @@ -21,7 +21,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/gendoc" - "github.com/mitchellh/go-homedir" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -112,13 +111,13 @@ func initConfig() { // Use config file from the flag. viper.SetConfigFile(cfgFile) } else { - // Find home directory. - home, err := homedir.Dir() + // Find config directory. + configDir, err := os.UserConfigDir() if err != nil { - common.PrintVerbose(rootCmd, "Get homedir: %s", err) + common.PrintVerbose(rootCmd, "Get config dir: %s", err) } else { - // Search config in `$HOME/.config/frostfs-cli/` with name "config.yaml" - viper.AddConfigPath(filepath.Join(home, ".config", "frostfs-cli")) + // Search config in `$XDG_CONFIG_HOME/frostfs-cli/` with name "config.yaml" + viper.AddConfigPath(filepath.Join(configDir, "frostfs-cli")) viper.SetConfigName("config") viper.SetConfigType("yaml") } diff --git a/go.mod b/go.mod index ba9ec375c..886fa958f 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,6 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/klauspost/compress v1.17.4 github.com/mailru/easyjson v0.7.7 - github.com/mitchellh/go-homedir v1.1.0 github.com/mr-tron/base58 v1.2.0 github.com/multiformats/go-multiaddr v0.12.1 github.com/nspcc-dev/neo-go v0.106.3 diff --git a/go.sum b/go.sum index a037a99af..6ed130cdb 100644 --- a/go.sum +++ b/go.sum @@ -179,8 +179,6 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= From 15102e6dfd3eddca34d83e2264c79d05d665939f Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 6 Nov 2024 10:34:16 +0300 Subject: [PATCH 136/591] [#1471] Replace sort.Slice in some places MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `slices.SortFunc` doesn't use reflection and is a bit faster. I have done some micro-benchmarks for `[]NodeInfo`: ``` $ benchstat -col "/func" out goos: linux goarch: amd64 pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz │ sort.Slice │ slices.SortFunc │ │ sec/op │ sec/op vs base │ Sort-8 2.130µ ± 2% 1.253µ ± 2% -41.20% (p=0.000 n=10) ``` Haven't included them, though, as they I don't see them being used a lot. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-cli/internal/client/client.go | 5 ++--- cmd/frostfs-cli/internal/common/tracing.go | 10 +++------- pkg/local_object_storage/pilorama/batch.go | 6 +++--- pkg/local_object_storage/pilorama/boltdb.go | 12 ++++++++---- pkg/local_object_storage/pilorama/forest.go | 5 +---- pkg/local_object_storage/pilorama/inmemory.go | 13 ++++++++----- pkg/services/tree/service.go | 9 ++++----- 7 files changed, 29 insertions(+), 31 deletions(-) diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index ed9817b86..948d61f36 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -670,9 +670,8 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes return nil, fmt.Errorf("read object list: %w", err) } - sort.Slice(list, func(i, j int) bool { - lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString() - return strings.Compare(lhs, rhs) < 0 + slices.SortFunc(list, func(a, b oid.ID) int { + return strings.Compare(a.EncodeToString(), b.EncodeToString()) }) return &SearchObjectsRes{ diff --git a/cmd/frostfs-cli/internal/common/tracing.go b/cmd/frostfs-cli/internal/common/tracing.go index 30c2f2b1a..10863ed1e 100644 --- a/cmd/frostfs-cli/internal/common/tracing.go +++ b/cmd/frostfs-cli/internal/common/tracing.go @@ -2,7 +2,7 @@ package common import ( "context" - "sort" + "slices" "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" @@ -45,15 +45,11 @@ func StartClientCommandSpan(cmd *cobra.Command) { }) commonCmd.ExitOnErr(cmd, "init tracing: %w", err) - var components sort.StringSlice + var components []string for c := cmd; c != nil; c = c.Parent() { components = append(components, c.Name()) } - for i, j := 0, len(components)-1; i < j; { - components.Swap(i, j) - i++ - j-- - } + slices.Reverse(components) operation := strings.Join(components, ".") ctx, span := tracing.StartSpanFromContext(cmd.Context(), operation) diff --git a/pkg/local_object_storage/pilorama/batch.go b/pkg/local_object_storage/pilorama/batch.go index 520c6dfb4..4c5238921 100644 --- a/pkg/local_object_storage/pilorama/batch.go +++ b/pkg/local_object_storage/pilorama/batch.go @@ -1,9 +1,9 @@ package pilorama import ( + "cmp" "encoding/binary" "slices" - "sort" "sync" "time" @@ -48,8 +48,8 @@ func (b *batch) run() { // Sorting without a mutex is ok, because we append to this slice only if timer is non-nil. // See (*boltForest).addBatch for details. - sort.Slice(b.operations, func(i, j int) bool { - return b.operations[i].Time < b.operations[j].Time + slices.SortFunc(b.operations, func(mi, mj *Move) int { + return cmp.Compare(mi.Time, mj.Time) }) b.operations = slices.CompactFunc(b.operations, func(x, y *Move) bool { return x.Time == y.Time }) diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 09f2e1919..7bce1f340 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -10,7 +10,6 @@ import ( "os" "path/filepath" "slices" - "sort" "strconv" "sync" "time" @@ -1093,14 +1092,19 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr return res, last, metaerr.Wrap(err) } +func sortByFilename(nodes []NodeInfo) { + slices.SortFunc(nodes, func(a, b NodeInfo) int { + return bytes.Compare(a.Meta.GetAttr(AttributeFilename), b.Meta.GetAttr(AttributeFilename)) + }) +} + func sortAndCut(result []NodeInfo, last *string) []NodeInfo { var lastBytes []byte if last != nil { lastBytes = []byte(*last) } - sort.Slice(result, func(i, j int) bool { - return bytes.Compare(result[i].Meta.GetAttr(AttributeFilename), result[j].Meta.GetAttr(AttributeFilename)) == -1 - }) + sortByFilename(result) + for i := range result { if lastBytes == nil || bytes.Compare(lastBytes, result[i].Meta.GetAttr(AttributeFilename)) == -1 { return result[i:] diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index 78503bada..bb5c22e51 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -1,7 +1,6 @@ package pilorama import ( - "bytes" "context" "errors" "fmt" @@ -192,9 +191,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI return nil, start, nil } - sort.Slice(res, func(i, j int) bool { - return bytes.Compare(res[i].Meta.GetAttr(AttributeFilename), res[j].Meta.GetAttr(AttributeFilename)) == -1 - }) + sortByFilename(res) r := mergeNodeInfos(res) for i := range r { diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go index c9f5df3b7..ce7b3db1e 100644 --- a/pkg/local_object_storage/pilorama/inmemory.go +++ b/pkg/local_object_storage/pilorama/inmemory.go @@ -1,6 +1,9 @@ package pilorama -import "sort" +import ( + "cmp" + "slices" +) // nodeInfo couples parent and metadata. type nodeInfo struct { @@ -131,10 +134,10 @@ func (t tree) getChildren(parent Node) []Node { } } - sort.Slice(children, func(i, j int) bool { - a := t.infoMap[children[i]] - b := t.infoMap[children[j]] - return a.Meta.Time < b.Meta.Time + slices.SortFunc(children, func(ci, cj uint64) int { + a := t.infoMap[ci] + b := t.infoMap[cj] + return cmp.Compare(a.Meta.Time, b.Meta.Time) }) return children } diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 10c3b6ccc..8097d545c 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -5,7 +5,7 @@ import ( "context" "errors" "fmt" - "sort" + "slices" "sync" "sync/atomic" @@ -575,10 +575,9 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di if len(nodes) == 0 { return nodes, nil } - less := func(i, j int) bool { - return bytes.Compare(nodes[i].Meta.GetAttr(pilorama.AttributeFilename), nodes[j].Meta.GetAttr(pilorama.AttributeFilename)) < 0 - } - sort.Slice(nodes, less) + slices.SortFunc(nodes, func(a, b pilorama.NodeInfo) int { + return bytes.Compare(a.Meta.GetAttr(pilorama.AttributeFilename), b.Meta.GetAttr(pilorama.AttributeFilename)) + }) return nodes, nil default: return nil, fmt.Errorf("unsupported order direction: %s", d.String()) From 33ad753302754c102247424b03ec4d1d8b9042c3 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 6 Nov 2024 10:57:01 +0300 Subject: [PATCH 137/591] [#1473] policer: Add tracing span To filter HEAD requests from policer. Signed-off-by: Dmitrii Stepanov --- pkg/services/policer/check.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index 401977f66..dbc9ea53c 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -9,14 +9,25 @@ import ( objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" policycore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) error { + ctx, span := tracing.StartSpanFromContext(ctx, "Policer.ProcessObject", trace.WithAttributes( + attribute.String("address", objInfo.Address.String()), + attribute.Bool("is_linking_object", objInfo.IsLinkingObject), + attribute.Bool("is_ec_part", objInfo.ECInfo != nil), + attribute.String("type", objInfo.Type.String()), + )) + defer span.End() + cnr, err := p.cnrSrc.Get(objInfo.Address.Container()) if err != nil { if client.IsErrContainerNotFound(err) { From 9902965ff49094c83b755e2f81bd386f2a74c347 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 28 Oct 2024 15:44:47 +0300 Subject: [PATCH 138/591] [#1451] writer: Sign EC parts with node's private key As EC put request may be processed only by container node, so sign requests with current node private to not to perform APE checks. Signed-off-by: Dmitrii Stepanov --- pkg/services/object/common/writer/ec.go | 34 ++++++++++++++------ pkg/services/object/common/writer/ec_test.go | 4 +++ pkg/services/object/util/prm.go | 9 +++++- 3 files changed, 36 insertions(+), 11 deletions(-) diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index ee0681349..3f7d4d49c 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -37,10 +37,12 @@ type ECWriter struct { ObjectMeta object.ContentMeta ObjectMetaValid bool + + remoteRequestSignKey *ecdsa.PrivateKey } func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error { - relayed, err := e.relayIfNotContainerNode(ctx, obj) + relayed, isContainerNode, err := e.relayIfNotContainerNode(ctx, obj) if err != nil { return err } @@ -60,23 +62,35 @@ func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error e.ObjectMetaValid = true } + if isContainerNode { + restoreTokens := e.CommonPrm.ForgetTokens() + defer restoreTokens() + // As request executed on container node, so sign request with container key. + e.remoteRequestSignKey, err = e.Config.KeyStorage.GetKey(nil) + if err != nil { + return err + } + } else { + e.remoteRequestSignKey = e.Key + } + if obj.ECHeader() != nil { return e.writeECPart(ctx, obj) } return e.writeRawObject(ctx, obj) } -func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, error) { - if e.Relay == nil { - return false, nil - } +func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, bool, error) { currentNodeIsContainerNode, err := e.currentNodeIsContainerNode() if err != nil { - return false, err + return false, false, err } if currentNodeIsContainerNode { // object can be splitted or saved local - return false, nil + return false, true, nil + } + if e.Relay == nil { + return false, currentNodeIsContainerNode, nil } objID := object.AddressOf(obj).Object() var index uint32 @@ -85,9 +99,9 @@ func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.O index = obj.ECHeader().Index() } if err := e.relayToContainerNode(ctx, objID, index); err != nil { - return false, err + return false, false, err } - return true, nil + return true, currentNodeIsContainerNode, nil } func (e *ECWriter) currentNodeIsContainerNode() (bool, error) { @@ -338,7 +352,7 @@ func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, n client.NodeInfoFromNetmapElement(&clientNodeInfo, node) remoteTaget := remoteWriter{ - privateKey: e.Key, + privateKey: e.remoteRequestSignKey, clientConstructor: e.Config.ClientConstructor, commonPrm: e.CommonPrm, nodeInfo: clientNodeInfo, diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go index 32863d678..c828c79ba 100644 --- a/pkg/services/object/common/writer/ec_test.go +++ b/pkg/services/object/common/writer/ec_test.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" @@ -127,6 +128,8 @@ func TestECWriter(t *testing.T) { ownerKey, err := keys.NewPrivateKey() require.NoError(t, err) + nodeKey, err := keys.NewPrivateKey() + require.NoError(t, err) pool, err := ants.NewPool(4, ants.WithNonblocking(true)) require.NoError(t, err) @@ -141,6 +144,7 @@ func TestECWriter(t *testing.T) { RemotePool: pool, Logger: log, ClientConstructor: clientConstructor{vectors: ns}, + KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil), }, PlacementOpts: append( []placement.Option{placement.UseBuilder(builder), placement.ForContainer(cnr)}, diff --git a/pkg/services/object/util/prm.go b/pkg/services/object/util/prm.go index 022b9fe5b..80c0db39e 100644 --- a/pkg/services/object/util/prm.go +++ b/pkg/services/object/util/prm.go @@ -100,11 +100,18 @@ func (p *CommonPrm) SetNetmapLookupDepth(v uint64) { // ForgetTokens forgets all the tokens read from the request's // meta information before. -func (p *CommonPrm) ForgetTokens() { +func (p *CommonPrm) ForgetTokens() func() { if p != nil { + tk := p.token + br := p.bearer p.token = nil p.bearer = nil + return func() { + p.token = tk + p.bearer = br + } } + return func() {} } func CommonPrmFromV2(req interface { From 5b1ba8e23dfbeae687deb19aaebce316cc266ad9 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 28 Oct 2024 15:46:38 +0300 Subject: [PATCH 139/591] [#1451] ape: Perform strict APE checks for EC parts Signed-off-by: Dmitrii Stepanov --- pkg/services/object/ape/checker_test.go | 1 + pkg/services/object/ape/request.go | 44 ++++++++++++++++--------- 2 files changed, 30 insertions(+), 15 deletions(-) diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go index 090f6a83c..7ebd147f3 100644 --- a/pkg/services/object/ape/checker_test.go +++ b/pkg/services/object/ape/checker_test.go @@ -695,6 +695,7 @@ func TestPutECChunk(t *testing.T) { nm := &netmapStub{ currentEpoch: 100, netmaps: map[uint64]*netmapSDK.NetMap{ + 99: netmap, 100: netmap, }, } diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go index da5307ca7..d07e59067 100644 --- a/pkg/services/object/ape/request.go +++ b/pkg/services/object/ape/request.go @@ -3,6 +3,7 @@ package ape import ( "context" "crypto/sha256" + "errors" "fmt" "net" "strconv" @@ -11,6 +12,7 @@ import ( aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -24,6 +26,8 @@ import ( var defaultRequest = aperequest.Request{} +var errECMissingParentObjectID = errors.New("missing EC parent object ID") + func nativeSchemaRole(role acl.Role) string { switch role { case acl.RoleOwner: @@ -122,7 +126,10 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re header = headerObjSDK.ToV2().GetHeader() } } - header = c.fillHeaderWithECParent(ctx, prm, header) + header, err := c.fillHeaderWithECParent(ctx, prm, header) + if err != nil { + return defaultRequest, fmt.Errorf("get EC parent header: %w", err) + } reqProps := map[string]string{ nativeschema.PropertyKeyActorPublicKey: prm.SenderKey, nativeschema.PropertyKeyActorRole: prm.Role, @@ -133,7 +140,6 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re reqProps[xheadKey] = xhead.GetValue() } - var err error reqProps, err = c.fillWithUserClaimTags(reqProps, prm) if err != nil { return defaultRequest, err @@ -155,35 +161,43 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re ), nil } -func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, header *objectV2.Header) *objectV2.Header { +func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, header *objectV2.Header) (*objectV2.Header, error) { if header == nil { - return header + return header, nil } if header.GetEC() == nil { - return header - } - if prm.Role == nativeschema.PropertyValueContainerRoleContainer || - prm.Role == nativeschema.PropertyValueContainerRoleIR { - return header + return header, nil } parentObjRefID := header.GetEC().Parent if parentObjRefID == nil { - return header + return nil, errECMissingParentObjectID } var parentObjID oid.ID if err := parentObjID.ReadFromV2(*parentObjRefID); err != nil { - return header + return nil, fmt.Errorf("EC parent object ID format error: %w", err) } // only container node have access to collect parent object contNode, err := c.currentNodeIsContainerNode(prm.Container) - if err != nil || !contNode { - return header + if err != nil { + return nil, fmt.Errorf("check container node status: %w", err) + } + if !contNode { + return header, nil } parentObj, err := c.headerProvider.GetHeader(ctx, prm.Container, parentObjID, false) if err != nil { - return header + if isLogicalError(err) { + return header, nil + } + return nil, fmt.Errorf("EC parent header request: %w", err) } - return parentObj.ToV2().GetHeader() + return parentObj.ToV2().GetHeader(), nil +} + +func isLogicalError(err error) bool { + var errObjRemoved *apistatus.ObjectAlreadyRemoved + var errObjNotFound *apistatus.ObjectNotFound + return errors.As(err, &errObjRemoved) || errors.As(err, &errObjNotFound) } func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) { From 9a77527f46f2db09628bef5b4567ae77cbdb3f69 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 28 Oct 2024 16:18:28 +0300 Subject: [PATCH 140/591] [#1451] ape: Drop unused Signed-off-by: Dmitrii Stepanov --- pkg/services/object/ape/checker.go | 3 --- pkg/services/object/ape/request.go | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go index 3f6cc7c20..8ce1b429d 100644 --- a/pkg/services/object/ape/checker.go +++ b/pkg/services/object/ape/checker.go @@ -67,9 +67,6 @@ type Prm struct { // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow. SoftAPECheck bool - // If true, object headers will not retrieved from storage engine. - WithoutHeaderRequest bool - // The request's bearer token. It is used in order to check APE overrides with the token. BearerToken *bearer.Token diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go index d07e59067..e12fccb5e 100644 --- a/pkg/services/object/ape/request.go +++ b/pkg/services/object/ape/request.go @@ -120,7 +120,7 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re var header *objectV2.Header if prm.Header != nil { header = prm.Header - } else if prm.Object != nil && !prm.WithoutHeaderRequest { + } else if prm.Object != nil { headerObjSDK, err := c.headerProvider.GetHeader(ctx, prm.Container, *prm.Object, true) if err == nil { header = headerObjSDK.ToV2().GetHeader() From 3cf6ea745da38fe2dede263c88275fd4e96576e0 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 29 Oct 2024 12:25:59 +0300 Subject: [PATCH 141/591] [#1451] ec: Check all parts are saved Signed-off-by: Dmitrii Stepanov --- pkg/services/object/common/writer/ec.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index 3f7d4d49c..571bae7bb 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -25,7 +25,10 @@ import ( var _ transformer.ObjectWriter = (*ECWriter)(nil) -var errUnsupportedECObject = errors.New("object is not supported for erasure coding") +var ( + errUnsupportedECObject = errors.New("object is not supported for erasure coding") + errFailedToSaveAllECParts = errors.New("failed to save all EC parts") +) type ECWriter struct { Config *Config @@ -249,6 +252,13 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er singleErr: err, } } + for idx := range partsProcessed { + if !partsProcessed[idx].Load() { + return errIncompletePut{ + singleErr: errFailedToSaveAllECParts, + } + } + } return nil } From 7edec9193cf7fd3d3f68a7cae3def81247b525eb Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 29 Oct 2024 12:39:50 +0300 Subject: [PATCH 142/591] [#1451] placement: Return copy of slice from container nodes cache Nodes from cache could be changed by traverser, if no objectID specified. So it is required to return copy of cache's slice. Signed-off-by: Dmitrii Stepanov --- pkg/services/object_manager/placement/cache.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/pkg/services/object_manager/placement/cache.go b/pkg/services/object_manager/placement/cache.go index 217261877..2a8460ca5 100644 --- a/pkg/services/object_manager/placement/cache.go +++ b/pkg/services/object_manager/placement/cache.go @@ -3,6 +3,7 @@ package placement import ( "crypto/sha256" "fmt" + "slices" "sync" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -44,7 +45,7 @@ func (c *ContainerNodesCache) ContainerNodes(nm *netmapSDK.NetMap, cnr cid.ID, p raw, ok := c.containerCache.Get(cnr) c.mtx.Unlock() if ok { - return raw, nil + return c.cloneResult(raw), nil } } else { c.lastEpoch = nm.Epoch() @@ -65,5 +66,13 @@ func (c *ContainerNodesCache) ContainerNodes(nm *netmapSDK.NetMap, cnr cid.ID, p c.containerCache.Add(cnr, cn) } c.mtx.Unlock() - return cn, nil + return c.cloneResult(cn), nil +} + +func (c *ContainerNodesCache) cloneResult(nodes [][]netmapSDK.NodeInfo) [][]netmapSDK.NodeInfo { + result := make([][]netmapSDK.NodeInfo, len(nodes)) + for repIdx := range nodes { + result[repIdx] = slices.Clone(nodes[repIdx]) + } + return result } From c8fb154151c4a944c87bea373fe9828ac267f2ea Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 6 Nov 2024 15:25:02 +0300 Subject: [PATCH 143/591] [#1475] Remove container estimation code Signed-off-by: Evgenii Stratonikov --- internal/logs/logs.go | 3 - pkg/innerring/blocktimer.go | 53 +------ pkg/innerring/blocktimer_test.go | 42 +----- pkg/innerring/initialization.go | 15 +- pkg/innerring/innerring.go | 2 +- .../processors/netmap/handlers_test.go | 19 --- .../processors/netmap/process_epoch.go | 15 -- pkg/innerring/processors/netmap/processor.go | 10 -- pkg/morph/client/container/client.go | 6 - pkg/morph/client/container/estimations.go | 54 -------- pkg/morph/client/container/load.go | 131 ------------------ pkg/morph/event/container/estimates.go | 78 ----------- pkg/morph/event/container/estimates_test.go | 80 ----------- 13 files changed, 9 insertions(+), 499 deletions(-) delete mode 100644 pkg/morph/client/container/estimations.go delete mode 100644 pkg/morph/client/container/load.go delete mode 100644 pkg/morph/event/container/estimates.go delete mode 100644 pkg/morph/event/container/estimates_test.go diff --git a/internal/logs/logs.go b/internal/logs/logs.go index e4bac4930..d0bac4d11 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -17,8 +17,6 @@ const ( ) const ( - InnerringNonalphabetModeDoNotStopContainerEstimations = "non-alphabet mode, do not stop container estimations" - InnerringCantStopEpochEstimation = "can't stop epoch estimation" InnerringCantMakeNotaryDepositInMainChain = "can't make notary deposit in main chain" InnerringCantMakeNotaryDepositInSideChain = "can't make notary deposit in side chain" InnerringNotaryDepositHasAlreadyBeenMade = "notary deposit has already been made" @@ -343,7 +341,6 @@ const ( NetmapCantGetTransactionHeight = "can't get transaction height" NetmapCantResetEpochTimer = "can't reset epoch timer" NetmapCantGetNetmapSnapshotToPerformCleanup = "can't get netmap snapshot to perform cleanup" - NetmapCantStartContainerSizeEstimation = "can't start container size estimation" NetmapNonAlphabetModeIgnoreNewEpochTick = "non alphabet mode, ignore new epoch tick" NetmapNextEpoch = "next epoch" NetmapCantInvokeNetmapNewEpoch = "can't invoke netmap.NewEpoch" diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go index ad69f207b..3db504368 100644 --- a/pkg/innerring/blocktimer.go +++ b/pkg/innerring/blocktimer.go @@ -3,14 +3,10 @@ package innerring import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "github.com/nspcc-dev/neo-go/pkg/util" - "go.uber.org/zap" ) type ( @@ -19,28 +15,12 @@ type ( EpochDuration() uint64 } - alphaState interface { - IsAlphabet() bool - } - newEpochHandler func() - containerEstimationStopper interface { - StopEstimation(p container.StopEstimationPrm) error - } - epochTimerArgs struct { - l *logger.Logger - - alphabetState alphaState - newEpochHandlers []newEpochHandler - cnrWrapper containerEstimationStopper // to invoke stop container estimation - epoch epochState // to specify which epoch to stop, and epoch duration - - stopEstimationDMul uint32 // X: X/Y of epoch in blocks - stopEstimationDDiv uint32 // Y: X/Y of epoch in blocks + epoch epochState // to specify which epoch to stop, and epoch duration } emitTimerArgs struct { @@ -74,7 +54,7 @@ func (s *Server) tickTimers(h uint32) { } func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { - epochTimer := timer.NewBlockTimer( + return timer.NewBlockTimer( func() (uint32, error) { return uint32(args.epoch.EpochDuration()), nil }, @@ -84,35 +64,6 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { } }, ) - - // sub-timer for epoch timer to tick stop container estimation events at - // some block in epoch - epochTimer.OnDelta( - args.stopEstimationDMul, - args.stopEstimationDDiv, - func() { - if !args.alphabetState.IsAlphabet() { - args.l.Debug(logs.InnerringNonalphabetModeDoNotStopContainerEstimations) - return - } - - epochN := args.epoch.EpochCounter() - if epochN == 0 { // estimates are invalid in genesis epoch - return - } - - prm := container.StopEstimationPrm{} - prm.SetEpoch(epochN - 1) - - err := args.cnrWrapper.StopEstimation(prm) - if err != nil { - args.l.Warn(logs.InnerringCantStopEpochEstimation, - zap.Uint64("epoch", epochN), - zap.String("error", err.Error())) - } - }) - - return epochTimer } func newEmissionTimer(args *emitTimerArgs) *timer.BlockTimer { diff --git a/pkg/innerring/blocktimer_test.go b/pkg/innerring/blocktimer_test.go index 242c0903b..4cbe7e394 100644 --- a/pkg/innerring/blocktimer_test.go +++ b/pkg/innerring/blocktimer_test.go @@ -3,29 +3,20 @@ package innerring import ( "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" "github.com/stretchr/testify/require" ) func TestEpochTimer(t *testing.T) { t.Parallel() - alphaState := &testAlphabetState{isAlphabet: true} neh := &testNewEpochHandler{} - cnrStopper := &testContainerEstStopper{} epochState := &testEpochState{ counter: 99, duration: 10, } args := &epochTimerArgs{ - l: test.NewLogger(t), - alphabetState: alphaState, - newEpochHandlers: []newEpochHandler{neh.Handle}, - cnrWrapper: cnrStopper, - epoch: epochState, - stopEstimationDMul: 2, - stopEstimationDDiv: 10, + newEpochHandlers: []newEpochHandler{neh.Handle}, + epoch: epochState, } et := newEpochTimer(args) err := et.Reset() @@ -33,63 +24,43 @@ func TestEpochTimer(t *testing.T) { et.Tick(100) require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - require.Equal(t, 0, cnrStopper.called, "invalid container stop handler calls") et.Tick(101) require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") et.Tick(102) require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") et.Tick(103) require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") var h uint32 for h = 104; h < 109; h++ { et.Tick(h) require.Equal(t, 0, neh.called, "invalid new epoch handler calls") - require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") } et.Tick(109) require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") et.Tick(110) require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - require.Equal(t, 1, cnrStopper.called, "invalid container stop handler calls") et.Tick(111) require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") et.Tick(112) require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") et.Tick(113) require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") for h = 114; h < 119; h++ { et.Tick(h) require.Equal(t, 1, neh.called, "invalid new epoch handler calls") - require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") } et.Tick(120) require.Equal(t, 2, neh.called, "invalid new epoch handler calls") - require.Equal(t, 2, cnrStopper.called, "invalid container stop handler calls") -} - -type testAlphabetState struct { - isAlphabet bool -} - -func (s *testAlphabetState) IsAlphabet() bool { - return s.isAlphabet } type testNewEpochHandler struct { @@ -100,15 +71,6 @@ func (h *testNewEpochHandler) Handle() { h.called++ } -type testContainerEstStopper struct { - called int -} - -func (s *testContainerEstStopper) StopEstimation(_ container.StopEstimationPrm) error { - s.called++ - return nil -} - type testEpochState struct { counter uint64 duration uint64 diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index cb0654b6e..d6b474c32 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -36,7 +36,6 @@ import ( ) func (s *Server) initNetmapProcessor(cfg *viper.Viper, - cnrClient *container.Client, alphaSync event.Handler, ) error { locodeValidator, err := s.newLocodeValidator(cfg) @@ -59,7 +58,6 @@ func (s *Server) initNetmapProcessor(cfg *viper.Viper, AlphabetState: s, CleanupEnabled: cfg.GetBool("netmap_cleaner.enabled"), CleanupThreshold: cfg.GetUint64("netmap_cleaner.threshold"), - ContainerWrapper: cnrClient, NotaryDepositHandler: s.onlyAlphabetEventHandler( s.notaryHandler, ), @@ -198,15 +196,10 @@ func (s *Server) createIRFetcher() irFetcher { return irf } -func (s *Server) initTimers(cfg *viper.Viper, morphClients *serverMorphClients) { +func (s *Server) initTimers(cfg *viper.Viper) { s.epochTimer = newEpochTimer(&epochTimerArgs{ - l: s.log, - alphabetState: s, - newEpochHandlers: s.newEpochTickHandlers(), - cnrWrapper: morphClients.CnrClient, - epoch: s, - stopEstimationDMul: cfg.GetUint32("timers.stop_estimation.mul"), - stopEstimationDDiv: cfg.GetUint32("timers.stop_estimation.div"), + newEpochHandlers: s.newEpochTickHandlers(), + epoch: s, }) s.addBlockTimer(s.epochTimer) @@ -425,7 +418,7 @@ func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClien return err } - err = s.initNetmapProcessor(cfg, morphClients.CnrClient, alphaSync) + err = s.initNetmapProcessor(cfg, alphaSync) if err != nil { return err } diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index b94312645..5fae302c4 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -405,7 +405,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan return nil, err } - server.initTimers(cfg, morphClients) + server.initTimers(cfg) err = server.initGRPCServer(cfg, log, audit) if err != nil { diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go index 8875880bf..35f4469b1 100644 --- a/pkg/innerring/processors/netmap/handlers_test.go +++ b/pkg/innerring/processors/netmap/handlers_test.go @@ -9,7 +9,6 @@ import ( netmapContract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" @@ -68,7 +67,6 @@ func TestNewEpoch(t *testing.T) { duration: 10, } r := &testEpochResetter{} - cc := &testContainerClient{} nc := &testNetmapClient{ epochDuration: 20, txHeights: map[util.Uint256]uint32{ @@ -82,7 +80,6 @@ func TestNewEpoch(t *testing.T) { p.NotaryDepositHandler = eh.Handle p.AlphabetSyncHandler = eh.Handle p.NetmapClient = nc - p.ContainerWrapper = cc p.EpochTimer = r p.EpochState = es }) @@ -103,11 +100,6 @@ func TestNewEpoch(t *testing.T) { require.Equal(t, ev.Num, es.counter, "invalid epoch counter") require.EqualValues(t, []uint32{nc.txHeights[ev.Hash]}, r.timers, "invalid epoch timer resets") - var expEstimation cntClient.StartEstimationPrm - expEstimation.SetEpoch(ev.Num - 1) - expEstimation.SetHash(ev.Hash) - require.EqualValues(t, []cntClient.StartEstimationPrm{expEstimation}, cc.estimations, "invalid estimations") - require.EqualValues(t, []event.Event{ governance.NewSyncEvent(ev.TxHash()), ev, @@ -274,7 +266,6 @@ func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) { as := &testAlphabetState{ isAlphabet: true, } - cc := &testContainerClient{} nc := &testNetmapClient{} eh := &testEventHandler{} @@ -288,7 +279,6 @@ func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) { EpochState: es, EpochTimer: r, AlphabetState: as, - ContainerWrapper: cc, NetmapClient: nc, NotaryDepositHandler: eh.Handle, AlphabetSyncHandler: eh.Handle, @@ -354,15 +344,6 @@ func (s *testAlphabetState) IsAlphabet() bool { return s.isAlphabet } -type testContainerClient struct { - estimations []cntClient.StartEstimationPrm -} - -func (c *testContainerClient) StartEstimation(p cntClient.StartEstimationPrm) error { - c.estimations = append(c.estimations, p) - return nil -} - type notaryInvoke struct { contract util.Uint160 fee fixedn.Fixed8 diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go index 4dfa3997b..9522df26c 100644 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ b/pkg/innerring/processors/netmap/process_epoch.go @@ -3,7 +3,6 @@ package netmap import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" "go.uber.org/zap" ) @@ -44,20 +43,6 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool { return false } - prm := cntClient.StartEstimationPrm{} - - prm.SetEpoch(epoch - 1) - prm.SetHash(ev.TxHash()) - - if epoch > 0 && np.alphabetState.IsAlphabet() { // estimates are invalid in genesis epoch - err = np.containerWrp.StartEstimation(prm) - if err != nil { - np.log.Warn(logs.NetmapCantStartContainerSizeEstimation, - zap.Uint64("epoch", epoch), - zap.String("error", err.Error())) - } - } - np.netmapSnapshot.update(*networkMap, epoch) np.handleCleanupTick(netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()}) np.handleAlphabetSync(governance.NewSyncEvent(ev.TxHash())) diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go index 4cecda59c..f5a91dee2 100644 --- a/pkg/innerring/processors/netmap/processor.go +++ b/pkg/innerring/processors/netmap/processor.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -65,10 +64,6 @@ type ( MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error } - ContainerClient interface { - StartEstimation(p cntClient.StartEstimationPrm) error - } - // Processor of events produced by network map contract // and new epoch ticker, because it is related to contract. Processor struct { @@ -80,7 +75,6 @@ type ( alphabetState AlphabetState netmapClient Client - containerWrp ContainerClient netmapSnapshot cleanupTable @@ -103,7 +97,6 @@ type ( AlphabetState AlphabetState CleanupEnabled bool CleanupThreshold uint64 // in epochs - ContainerWrapper ContainerClient AlphabetSyncHandler event.Handler NotaryDepositHandler event.Handler @@ -133,8 +126,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/netmap: alphabet sync handler is not set") case p.NotaryDepositHandler == nil: return nil, errors.New("ir/netmap: notary deposit handler is not set") - case p.ContainerWrapper == nil: - return nil, errors.New("ir/netmap: container contract wrapper is not set") case p.NodeValidator == nil: return nil, errors.New("ir/netmap: node validator is not set") case p.NodeStateSettings == nil: @@ -161,7 +152,6 @@ func New(p *Params) (*Processor, error) { epochState: p.EpochState, alphabetState: p.AlphabetState, netmapClient: p.NetmapClient, - containerWrp: p.ContainerWrapper, netmapSnapshot: newCleanupTable(p.CleanupEnabled, p.CleanupThreshold), handleAlphabetSync: p.AlphabetSyncHandler, diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go index f735a5ff7..b512a6594 100644 --- a/pkg/morph/client/container/client.go +++ b/pkg/morph/client/container/client.go @@ -30,12 +30,6 @@ const ( eaclMethod = "eACL" deletionInfoMethod = "deletionInfo" - startEstimationMethod = "startContainerEstimation" - stopEstimationMethod = "stopContainerEstimation" - - listSizesMethod = "listContainerSizes" - getSizeMethod = "getContainerSize" - // putNamedMethod is method name for container put with an alias. It is exported to provide custom fee. putNamedMethod = "putNamed" ) diff --git a/pkg/morph/client/container/estimations.go b/pkg/morph/client/container/estimations.go deleted file mode 100644 index f288c63cf..000000000 --- a/pkg/morph/client/container/estimations.go +++ /dev/null @@ -1,54 +0,0 @@ -package container - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" -) - -// StartEstimationPrm groups parameters of StartEstimation operation. -type StartEstimationPrm struct { - commonEstimationPrm -} - -// StopEstimationPrm groups parameters of StopEstimation operation. -type StopEstimationPrm struct { - commonEstimationPrm -} - -type commonEstimationPrm struct { - epoch uint64 - - client.InvokePrmOptional -} - -// SetEpoch sets epoch. -func (p *commonEstimationPrm) SetEpoch(epoch uint64) { - p.epoch = epoch -} - -// StartEstimation votes to produce start estimation notification. -func (c *Client) StartEstimation(p StartEstimationPrm) error { - prm := client.InvokePrm{} - prm.SetMethod(startEstimationMethod) - prm.SetArgs(p.epoch) - prm.InvokePrmOptional = p.InvokePrmOptional - - if _, err := c.client.Invoke(prm); err != nil { - return fmt.Errorf("could not invoke method (%s): %w", startEstimationMethod, err) - } - return nil -} - -// StopEstimation votes to produce stop estimation notification. -func (c *Client) StopEstimation(p StopEstimationPrm) error { - prm := client.InvokePrm{} - prm.SetMethod(stopEstimationMethod) - prm.SetArgs(p.epoch) - prm.InvokePrmOptional = p.InvokePrmOptional - - if _, err := c.client.Invoke(prm); err != nil { - return fmt.Errorf("could not invoke method (%s): %w", stopEstimationMethod, err) - } - return nil -} diff --git a/pkg/morph/client/container/load.go b/pkg/morph/client/container/load.go deleted file mode 100644 index 5e2c3c2c3..000000000 --- a/pkg/morph/client/container/load.go +++ /dev/null @@ -1,131 +0,0 @@ -package container - -import ( - "fmt" - - v2refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" -) - -// EstimationID is an identity of container load estimation inside Container contract. -type EstimationID []byte - -// ListLoadEstimationsByEpoch returns a list of container load estimations for to the specified epoch. -// The list is composed through Container contract call. -func (c *Client) ListLoadEstimationsByEpoch(epoch uint64) ([]EstimationID, error) { - invokePrm := client.TestInvokePrm{} - invokePrm.SetMethod(listSizesMethod) - invokePrm.SetArgs(epoch) - - prms, err := c.client.TestInvoke(invokePrm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", listSizesMethod, err) - } else if ln := len(prms); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", listSizesMethod, ln) - } - - prms, err = client.ArrayFromStackItem(prms[0]) - if err != nil { - return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listSizesMethod, err) - } - - res := make([]EstimationID, 0, len(prms)) - for i := range prms { - id, err := client.BytesFromStackItem(prms[i]) - if err != nil { - return nil, fmt.Errorf("could not get ID byte array from stack item (%s): %w", listSizesMethod, err) - } - - res = append(res, id) - } - - return res, nil -} - -// Estimation is a structure of single container load estimation -// reported by storage node. -type Estimation struct { - Size uint64 - - Reporter []byte -} - -// Estimations is a structure of grouped container load estimation inside Container contract. -type Estimations struct { - ContainerID cid.ID - - Values []Estimation -} - -// GetUsedSpaceEstimations returns a list of container load estimations by ID. -// The list is composed through Container contract call. -func (c *Client) GetUsedSpaceEstimations(id EstimationID) (*Estimations, error) { - prm := client.TestInvokePrm{} - prm.SetMethod(getSizeMethod) - prm.SetArgs([]byte(id)) - - prms, err := c.client.TestInvoke(prm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", getSizeMethod, err) - } else if ln := len(prms); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", getSizeMethod, ln) - } - - prms, err = client.ArrayFromStackItem(prms[0]) - if err != nil { - return nil, fmt.Errorf("could not get stack items of estimation fields from stack item (%s): %w", getSizeMethod, err) - } else if ln := len(prms); ln != 2 { - return nil, fmt.Errorf("unexpected stack item count of estimations fields (%s)", getSizeMethod) - } - - rawCnr, err := client.BytesFromStackItem(prms[0]) - if err != nil { - return nil, fmt.Errorf("could not get container ID byte array from stack item (%s): %w", getSizeMethod, err) - } - - prms, err = client.ArrayFromStackItem(prms[1]) - if err != nil { - return nil, fmt.Errorf("could not get estimation list array from stack item (%s): %w", getSizeMethod, err) - } - - var cnr cid.ID - - err = cnr.Decode(rawCnr) - if err != nil { - return nil, fmt.Errorf("decode container ID: %w", err) - } - - v2 := new(v2refs.ContainerID) - v2.SetValue(rawCnr) - res := &Estimations{ - ContainerID: cnr, - Values: make([]Estimation, 0, len(prms)), - } - - for i := range prms { - arr, err := client.ArrayFromStackItem(prms[i]) - if err != nil { - return nil, fmt.Errorf("could not get estimation struct from stack item (%s): %w", getSizeMethod, err) - } else if ln := len(arr); ln != 2 { - return nil, fmt.Errorf("unexpected stack item count of estimation fields (%s)", getSizeMethod) - } - - reporter, err := client.BytesFromStackItem(arr[0]) - if err != nil { - return nil, fmt.Errorf("could not get reporter byte array from stack item (%s): %w", getSizeMethod, err) - } - - sz, err := client.IntFromStackItem(arr[1]) - if err != nil { - return nil, fmt.Errorf("could not get estimation size from stack item (%s): %w", getSizeMethod, err) - } - - res.Values = append(res.Values, Estimation{ - Reporter: reporter, - Size: uint64(sz), - }) - } - - return res, nil -} diff --git a/pkg/morph/event/container/estimates.go b/pkg/morph/event/container/estimates.go deleted file mode 100644 index 9fd21e2b5..000000000 --- a/pkg/morph/event/container/estimates.go +++ /dev/null @@ -1,78 +0,0 @@ -package container - -import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -// StartEstimation structure of container.StartEstimation notification from -// morph chain. -type StartEstimation struct { - epoch uint64 -} - -// StopEstimation structure of container.StopEstimation notification from -// morph chain. -type StopEstimation struct { - epoch uint64 -} - -// MorphEvent implements Neo:Morph Event interface. -func (StartEstimation) MorphEvent() {} - -// MorphEvent implements Neo:Morph Event interface. -func (StopEstimation) MorphEvent() {} - -// Epoch returns epoch value for which to start container size estimation. -func (s StartEstimation) Epoch() uint64 { return s.epoch } - -// Epoch returns epoch value for which to stop container size estimation. -func (s StopEstimation) Epoch() uint64 { return s.epoch } - -// ParseStartEstimation from notification into container event structure. -func ParseStartEstimation(e *state.ContainedNotificationEvent) (event.Event, error) { - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - epoch, err := parseEstimation(params) - if err != nil { - return nil, err - } - - return StartEstimation{epoch: epoch}, nil -} - -// ParseStopEstimation from notification into container event structure. -func ParseStopEstimation(e *state.ContainedNotificationEvent) (event.Event, error) { - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - epoch, err := parseEstimation(params) - if err != nil { - return nil, err - } - - return StopEstimation{epoch: epoch}, nil -} - -func parseEstimation(params []stackitem.Item) (uint64, error) { - if ln := len(params); ln != 1 { - return 0, event.WrongNumberOfParameters(1, ln) - } - - // parse container - epoch, err := client.IntFromStackItem(params[0]) - if err != nil { - return 0, fmt.Errorf("could not get estimation epoch: %w", err) - } - - return uint64(epoch), nil -} diff --git a/pkg/morph/event/container/estimates_test.go b/pkg/morph/event/container/estimates_test.go deleted file mode 100644 index be46e62c4..000000000 --- a/pkg/morph/event/container/estimates_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package container - -import ( - "math/big" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func TestStartEstimation(t *testing.T) { - var epochNum uint64 = 100 - epochItem := stackitem.NewBigInteger(new(big.Int).SetUint64(epochNum)) - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseStartEstimation(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) - }) - - t.Run("wrong estimation parameter", func(t *testing.T) { - _, err := ParseStartEstimation(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := ParseStartEstimation(createNotifyEventFromItems([]stackitem.Item{ - epochItem, - })) - - require.NoError(t, err) - - require.Equal(t, StartEstimation{ - epochNum, - }, ev) - }) -} - -func TestStopEstimation(t *testing.T) { - var epochNum uint64 = 100 - epochItem := stackitem.NewBigInteger(new(big.Int).SetUint64(epochNum)) - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseStopEstimation(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) - }) - - t.Run("wrong estimation parameter", func(t *testing.T) { - _, err := ParseStopEstimation(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewMap(), - })) - - require.Error(t, err) - }) - - t.Run("correct behavior", func(t *testing.T) { - ev, err := ParseStopEstimation(createNotifyEventFromItems([]stackitem.Item{ - epochItem, - })) - - require.NoError(t, err) - - require.Equal(t, StopEstimation{ - epochNum, - }, ev) - }) -} From ef64930feff929fb613e2e90ce16c9fdc7f232ce Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 7 Nov 2024 16:00:20 +0300 Subject: [PATCH 144/591] [#1477] ape: Fix EC chunk test Initially, this test was a check that only the container node can assemble an EC object. But the implementation of this test was wrong. Signed-off-by: Dmitrii Stepanov --- pkg/services/object/ape/checker_test.go | 56 +++++++++++++++---------- 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go index 7ebd147f3..66f0822e4 100644 --- a/pkg/services/object/ape/checker_test.go +++ b/pkg/services/object/ape/checker_test.go @@ -652,7 +652,7 @@ func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) { return nil, nil } -func TestPutECChunk(t *testing.T) { +func TestGetECChunk(t *testing.T) { headerProvider := newHeaderProviderMock() frostfsidProvider := newFrostfsIDProviderMock(t) @@ -666,11 +666,10 @@ func TestPutECChunk(t *testing.T) { Rules: []chain.Rule{ { Status: chain.AccessDenied, - Actions: chain.Actions{Names: methodsOptionalOID}, + Actions: chain.Actions{Names: methodsRequiredOID}, Resources: chain.Resources{ Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, }, - Any: true, Condition: []chain.Condition{ { Op: chain.CondStringEquals, @@ -680,17 +679,27 @@ func TestPutECChunk(t *testing.T) { }, }, }, + { + Status: chain.Allow, + Actions: chain.Actions{Names: methodsRequiredOID}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + }, }, - MatchType: chain.MatchTypeFirstMatch, }) node1Key, err := keys.NewPrivateKey() require.NoError(t, err) node1 := netmapSDK.NodeInfo{} node1.SetPublicKey(node1Key.PublicKey().Bytes()) + node2Key, err := keys.NewPrivateKey() + require.NoError(t, err) + node2 := netmapSDK.NodeInfo{} + node2.SetPublicKey(node1Key.PublicKey().Bytes()) netmap := &netmapSDK.NetMap{} netmap.SetEpoch(100) - netmap.SetNodes([]netmapSDK.NodeInfo{node1}) + netmap.SetNodes([]netmapSDK.NodeInfo{node1, node2}) nm := &netmapStub{ currentEpoch: 100, @@ -703,7 +712,7 @@ func TestPutECChunk(t *testing.T) { cont := containerSDK.Container{} cont.Init() pp := netmapSDK.PlacementPolicy{} - require.NoError(t, pp.DecodeString("REP 1")) + require.NoError(t, pp.DecodeString("EC 1.1")) cont.SetPlacementPolicy(pp) cs := &testContainerSource{ containers: map[cid.ID]*container.Container{ @@ -719,7 +728,7 @@ func TestPutECChunk(t *testing.T) { chunkHeader := newHeaderObjectSDK(cnr, obj, nil).ToV2().GetHeader() ecHeader := object.ECHeader{ Index: 1, - Total: 5, + Total: 2, Parent: &refs.ObjectID{}, } chunkHeader.SetEC(&ecHeader) @@ -738,32 +747,33 @@ func TestPutECChunk(t *testing.T) { }) headerProvider.addHeader(cnr, ecParentID, parentHeader) - t.Run("access denied for container node", func(t *testing.T) { + // container node requests EC parent headers, so container node denies access by matching attribute key/value + t.Run("access denied on container node", func(t *testing.T) { prm := Prm{ - Method: nativeschema.MethodPutObject, - Container: cnr, - Object: obj, - Role: role, - SenderKey: senderKey, - Header: chunkHeader, - SoftAPECheck: true, + Method: nativeschema.MethodGetObject, + Container: cnr, + Object: obj, + Role: role, + SenderKey: hex.EncodeToString(node2Key.PublicKey().Bytes()), + Header: chunkHeader, } err = checker.CheckAPE(context.Background(), prm) require.Error(t, err) }) - t.Run("access allowed for non container node", func(t *testing.T) { + + // non container node has no access rights to collect EC parent header, so it uses EC chunk headers + t.Run("access allowed on non container node", func(t *testing.T) { otherKey, err := keys.NewPrivateKey() require.NoError(t, err) checker = NewChecker(ls, ms, headerProvider, frostfsidProvider, nm, &stMock{}, cs, otherKey.PublicKey().Bytes()) prm := Prm{ - Method: nativeschema.MethodPutObject, - Container: cnr, - Object: obj, - Role: nativeschema.PropertyValueContainerRoleOthers, - SenderKey: senderKey, - Header: chunkHeader, - SoftAPECheck: true, + Method: nativeschema.MethodGetObject, + Container: cnr, + Object: obj, + Role: nativeschema.PropertyValueContainerRoleOthers, + SenderKey: senderKey, + Header: chunkHeader, } err = checker.CheckAPE(context.Background(), prm) From 9b13a18aacf6591a6d165ac9b0d070a7940ba45f Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Thu, 7 Nov 2024 17:32:10 +0300 Subject: [PATCH 145/591] [#1479] go.mod: Bump frostfs-sdk-go version * Update version within go.mod; * Fix deprecated frostfs-api-go/v2 package and use frostfs-sdk-go/api instead. Signed-off-by: Airat Arifullin --- Makefile | 4 ++-- cmd/frostfs-cli/modules/container/create.go | 2 +- cmd/frostfs-cli/modules/control/add_rule.go | 2 +- cmd/frostfs-cli/modules/control/detach_shards.go | 2 +- cmd/frostfs-cli/modules/control/doctor.go | 2 +- cmd/frostfs-cli/modules/control/drop_objects.go | 2 +- cmd/frostfs-cli/modules/control/evacuate_shard.go | 2 +- cmd/frostfs-cli/modules/control/evacuation.go | 2 +- cmd/frostfs-cli/modules/control/flush_cache.go | 2 +- cmd/frostfs-cli/modules/control/get_rule.go | 2 +- cmd/frostfs-cli/modules/control/healthcheck.go | 2 +- cmd/frostfs-cli/modules/control/ir_healthcheck.go | 2 +- .../modules/control/ir_remove_container.go | 4 ++-- cmd/frostfs-cli/modules/control/ir_remove_node.go | 2 +- cmd/frostfs-cli/modules/control/ir_tick_epoch.go | 2 +- cmd/frostfs-cli/modules/control/list_rules.go | 2 +- cmd/frostfs-cli/modules/control/list_targets.go | 2 +- cmd/frostfs-cli/modules/control/rebuild_shards.go | 2 +- cmd/frostfs-cli/modules/control/remove_rule.go | 2 +- cmd/frostfs-cli/modules/control/set_netmap_status.go | 2 +- cmd/frostfs-cli/modules/control/shards_list.go | 2 +- cmd/frostfs-cli/modules/control/shards_set_mode.go | 2 +- cmd/frostfs-cli/modules/control/synchronize_tree.go | 2 +- cmd/frostfs-cli/modules/control/util.go | 2 +- cmd/frostfs-cli/modules/control/writecache.go | 2 +- cmd/frostfs-cli/modules/object/head.go | 2 +- cmd/frostfs-cli/modules/object/lock.go | 2 +- cmd/frostfs-cli/modules/object/put.go | 2 +- cmd/frostfs-node/accounting.go | 2 +- cmd/frostfs-node/apemanager.go | 2 +- cmd/frostfs-node/config.go | 2 +- cmd/frostfs-node/container.go | 2 +- cmd/frostfs-node/netmap.go | 2 +- cmd/frostfs-node/object.go | 4 ++-- cmd/frostfs-node/session.go | 4 ++-- go.mod | 3 +-- go.sum | 6 ++---- internal/ape/converter.go | 2 +- internal/audit/request.go | 2 +- internal/audit/target.go | 2 +- pkg/core/client/client.go | 2 +- pkg/core/container/delete.go | 2 +- pkg/core/object/fmt.go | 4 ++-- pkg/core/object/fmt_test.go | 2 +- pkg/innerring/processors/netmap/handlers_test.go | 2 +- pkg/innerring/processors/netmap/process_cleanup.go | 2 +- pkg/local_object_storage/engine/lock_test.go | 2 +- pkg/local_object_storage/metabase/db.go | 2 +- pkg/local_object_storage/metabase/db_test.go | 2 +- pkg/local_object_storage/metabase/iterators_test.go | 2 +- pkg/local_object_storage/metabase/put.go | 2 +- pkg/local_object_storage/metabase/select.go | 2 +- pkg/local_object_storage/metabase/select_test.go | 2 +- pkg/local_object_storage/metabase/upgrade.go | 2 +- pkg/local_object_storage/metabase/upgrade_test.go | 2 +- pkg/local_object_storage/shard/gc_test.go | 2 +- pkg/morph/client/container/eacl.go | 2 +- pkg/morph/client/container/get.go | 2 +- pkg/morph/client/container/put.go | 2 +- pkg/network/address.go | 2 +- pkg/network/cache/multi.go | 2 +- pkg/network/transport/accounting/grpc/service.go | 4 ++-- pkg/network/transport/apemanager/grpc/service.go | 4 ++-- pkg/network/transport/container/grpc/service.go | 4 ++-- pkg/network/transport/netmap/grpc/service.go | 4 ++-- pkg/network/transport/object/grpc/get.go | 4 ++-- pkg/network/transport/object/grpc/range.go | 4 ++-- pkg/network/transport/object/grpc/search.go | 4 ++-- pkg/network/transport/object/grpc/service.go | 4 ++-- pkg/network/transport/session/grpc/service.go | 4 ++-- pkg/services/accounting/executor.go | 2 +- pkg/services/accounting/morph/executor.go | 2 +- pkg/services/accounting/server.go | 2 +- pkg/services/accounting/sign.go | 2 +- pkg/services/apemanager/audit.go | 4 ++-- pkg/services/apemanager/executor.go | 8 ++++---- pkg/services/apemanager/server.go | 2 +- pkg/services/apemanager/sign.go | 2 +- pkg/services/container/ape.go | 6 +++--- pkg/services/container/ape_test.go | 8 ++++---- pkg/services/container/audit.go | 4 ++-- pkg/services/container/executor.go | 4 ++-- pkg/services/container/morph/executor.go | 6 +++--- pkg/services/container/morph/executor_test.go | 6 +++--- pkg/services/container/server.go | 2 +- pkg/services/container/sign.go | 2 +- pkg/services/control/convert.go | 4 ++-- pkg/services/control/ir/convert.go | 4 ++-- pkg/services/control/ir/rpc.go | 6 +++--- pkg/services/control/ir/server/audit.go | 2 +- pkg/services/control/ir/server/calls.go | 2 +- pkg/services/control/ir/server/sign.go | 2 +- pkg/services/control/ir/service_frostfs.pb.go | 6 +++--- pkg/services/control/ir/types_frostfs.pb.go | 6 +++--- pkg/services/control/rpc.go | 4 ++-- pkg/services/control/server/ctrlmessage/sign.go | 2 +- pkg/services/control/server/sign.go | 2 +- pkg/services/control/service_frostfs.pb.go | 6 +++--- pkg/services/control/types_frostfs.pb.go | 6 +++--- pkg/services/netmap/executor.go | 4 ++-- pkg/services/netmap/server.go | 2 +- pkg/services/netmap/sign.go | 2 +- pkg/services/object/acl/eacl/v2/eacl_test.go | 6 +++--- pkg/services/object/acl/eacl/v2/headers.go | 8 ++++---- pkg/services/object/acl/eacl/v2/object.go | 2 +- pkg/services/object/acl/eacl/v2/xheader.go | 2 +- pkg/services/object/acl/v2/request.go | 2 +- pkg/services/object/acl/v2/request_test.go | 6 +++--- pkg/services/object/acl/v2/service.go | 4 ++-- pkg/services/object/acl/v2/util.go | 6 +++--- pkg/services/object/acl/v2/util_test.go | 4 ++-- pkg/services/object/ape/checker.go | 4 ++-- pkg/services/object/ape/checker_test.go | 6 +++--- pkg/services/object/ape/request.go | 2 +- pkg/services/object/ape/request_test.go | 2 +- pkg/services/object/ape/service.go | 4 ++-- pkg/services/object/audit.go | 6 +++--- pkg/services/object/common.go | 2 +- pkg/services/object/common/writer/ec_test.go | 2 +- pkg/services/object/delete/exec.go | 2 +- pkg/services/object/delete/v2/service.go | 2 +- pkg/services/object/delete/v2/util.go | 4 ++-- pkg/services/object/get/getrangeec_test.go | 2 +- pkg/services/object/get/v2/errors.go | 4 ++-- pkg/services/object/get/v2/get_forwarder.go | 10 +++++----- pkg/services/object/get/v2/get_range_forwarder.go | 10 +++++----- pkg/services/object/get/v2/get_range_hash.go | 10 +++++----- pkg/services/object/get/v2/head_forwarder.go | 12 ++++++------ pkg/services/object/get/v2/service.go | 2 +- pkg/services/object/get/v2/streamer.go | 2 +- pkg/services/object/get/v2/util.go | 10 +++++----- pkg/services/object/internal/key.go | 2 +- pkg/services/object/metrics.go | 2 +- pkg/services/object/patch/streamer.go | 4 ++-- pkg/services/object/patch/util.go | 4 ++-- pkg/services/object/put/single.go | 10 +++++----- pkg/services/object/put/v2/service.go | 2 +- pkg/services/object/put/v2/streamer.go | 10 +++++----- pkg/services/object/put/v2/util.go | 4 ++-- pkg/services/object/response.go | 2 +- pkg/services/object/search/search_test.go | 2 +- pkg/services/object/search/v2/request_forwarder.go | 10 +++++----- pkg/services/object/search/v2/service.go | 2 +- pkg/services/object/search/v2/streamer.go | 4 ++-- pkg/services/object/search/v2/util.go | 2 +- pkg/services/object/server.go | 2 +- pkg/services/object/sign.go | 2 +- pkg/services/object/transport_splitter.go | 2 +- pkg/services/object/util/key_test.go | 4 ++-- pkg/services/object/util/prm.go | 2 +- pkg/services/object_manager/tombstone/checker.go | 2 +- pkg/services/session/executor.go | 2 +- pkg/services/session/server.go | 2 +- pkg/services/session/sign.go | 2 +- pkg/services/session/storage/persistent/executor.go | 2 +- .../session/storage/persistent/executor_test.go | 4 ++-- pkg/services/session/storage/temporary/executor.go | 2 +- pkg/services/tree/service_frostfs.pb.go | 6 +++--- pkg/services/tree/signature.go | 2 +- pkg/services/tree/signature_test.go | 2 +- pkg/services/tree/types_frostfs.pb.go | 6 +++--- pkg/services/util/response/service.go | 4 ++-- pkg/services/util/sign.go | 4 ++-- 163 files changed, 271 insertions(+), 274 deletions(-) diff --git a/Makefile b/Makefile index 94b1542d5..68a31febe 100755 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ GO_VERSION ?= 1.22 LINT_VERSION ?= 1.61.0 TRUECLOUDLAB_LINT_VERSION ?= 0.0.7 PROTOC_VERSION ?= 25.0 -PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2) +PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOC_OS_VERSION=osx-x86_64 ifeq ($(shell uname), Linux) PROTOC_OS_VERSION=linux-x86_64 @@ -121,7 +121,7 @@ protoc-install: @unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR) @rm $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip @echo "⇒ Instaling protogen FrostFS plugin..." - @GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen@$(PROTOGEN_FROSTFS_VERSION) + @GOBIN=$(PROTOGEN_FROSTFS_DIR) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/protogen@$(PROTOGEN_FROSTFS_VERSION) # Build FrostFS component's docker image image-%: diff --git a/cmd/frostfs-cli/modules/container/create.go b/cmd/frostfs-cli/modules/container/create.go index f37b169ce..017f4b5ce 100644 --- a/cmd/frostfs-cli/modules/container/create.go +++ b/cmd/frostfs-cli/modules/container/create.go @@ -7,12 +7,12 @@ import ( "strings" "time" - containerApi "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + containerApi "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" diff --git a/cmd/frostfs-cli/modules/control/add_rule.go b/cmd/frostfs-cli/modules/control/add_rule.go index a22d0525d..c648377bd 100644 --- a/cmd/frostfs-cli/modules/control/add_rule.go +++ b/cmd/frostfs-cli/modules/control/add_rule.go @@ -4,11 +4,11 @@ import ( "encoding/hex" "errors" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/detach_shards.go b/cmd/frostfs-cli/modules/control/detach_shards.go index 5e5b60c3d..025a6e561 100644 --- a/cmd/frostfs-cli/modules/control/detach_shards.go +++ b/cmd/frostfs-cli/modules/control/detach_shards.go @@ -1,10 +1,10 @@ package control import ( - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/doctor.go b/cmd/frostfs-cli/modules/control/doctor.go index 13bb81a0a..632cdd6a7 100644 --- a/cmd/frostfs-cli/modules/control/doctor.go +++ b/cmd/frostfs-cli/modules/control/doctor.go @@ -1,10 +1,10 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/drop_objects.go b/cmd/frostfs-cli/modules/control/drop_objects.go index 8c0bb2332..dcc1c1229 100644 --- a/cmd/frostfs-cli/modules/control/drop_objects.go +++ b/cmd/frostfs-cli/modules/control/drop_objects.go @@ -1,10 +1,10 @@ package control import ( - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/evacuate_shard.go b/cmd/frostfs-cli/modules/control/evacuate_shard.go index 458e4cc0b..1e48c1df4 100644 --- a/cmd/frostfs-cli/modules/control/evacuate_shard.go +++ b/cmd/frostfs-cli/modules/control/evacuate_shard.go @@ -1,10 +1,10 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go index fffc5e33e..73700e56d 100644 --- a/cmd/frostfs-cli/modules/control/evacuation.go +++ b/cmd/frostfs-cli/modules/control/evacuation.go @@ -7,11 +7,11 @@ import ( "sync/atomic" "time" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/flush_cache.go b/cmd/frostfs-cli/modules/control/flush_cache.go index 541961903..280aacfad 100644 --- a/cmd/frostfs-cli/modules/control/flush_cache.go +++ b/cmd/frostfs-cli/modules/control/flush_cache.go @@ -1,10 +1,10 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/get_rule.go b/cmd/frostfs-cli/modules/control/get_rule.go index 050cf165c..4b4d6eef5 100644 --- a/cmd/frostfs-cli/modules/control/get_rule.go +++ b/cmd/frostfs-cli/modules/control/get_rule.go @@ -3,11 +3,11 @@ package control import ( "encoding/hex" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/healthcheck.go b/cmd/frostfs-cli/modules/control/healthcheck.go index 2241a403f..1d4441f1e 100644 --- a/cmd/frostfs-cli/modules/control/healthcheck.go +++ b/cmd/frostfs-cli/modules/control/healthcheck.go @@ -3,11 +3,11 @@ package control import ( "os" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/ir_healthcheck.go b/cmd/frostfs-cli/modules/control/ir_healthcheck.go index 4f272c1b4..373f21c30 100644 --- a/cmd/frostfs-cli/modules/control/ir_healthcheck.go +++ b/cmd/frostfs-cli/modules/control/ir_healthcheck.go @@ -3,12 +3,12 @@ package control import ( "os" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/ir_remove_container.go b/cmd/frostfs-cli/modules/control/ir_remove_container.go index a66d7e06d..460e299e5 100644 --- a/cmd/frostfs-cli/modules/control/ir_remove_container.go +++ b/cmd/frostfs-cli/modules/control/ir_remove_container.go @@ -1,13 +1,13 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" diff --git a/cmd/frostfs-cli/modules/control/ir_remove_node.go b/cmd/frostfs-cli/modules/control/ir_remove_node.go index 412dc7934..2fe686d63 100644 --- a/cmd/frostfs-cli/modules/control/ir_remove_node.go +++ b/cmd/frostfs-cli/modules/control/ir_remove_node.go @@ -4,11 +4,11 @@ import ( "encoding/hex" "errors" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/ir_tick_epoch.go b/cmd/frostfs-cli/modules/control/ir_tick_epoch.go index 6965b5dca..5f09e92c1 100644 --- a/cmd/frostfs-cli/modules/control/ir_tick_epoch.go +++ b/cmd/frostfs-cli/modules/control/ir_tick_epoch.go @@ -1,11 +1,11 @@ package control import ( - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" ircontrol "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" ircontrolsrv "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir/server" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/list_rules.go b/cmd/frostfs-cli/modules/control/list_rules.go index f5fc27bda..7162df5e0 100644 --- a/cmd/frostfs-cli/modules/control/list_rules.go +++ b/cmd/frostfs-cli/modules/control/list_rules.go @@ -5,11 +5,11 @@ import ( "fmt" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "github.com/nspcc-dev/neo-go/cli/input" diff --git a/cmd/frostfs-cli/modules/control/list_targets.go b/cmd/frostfs-cli/modules/control/list_targets.go index 6a988c355..7c401eb17 100644 --- a/cmd/frostfs-cli/modules/control/list_targets.go +++ b/cmd/frostfs-cli/modules/control/list_targets.go @@ -7,10 +7,10 @@ import ( "strconv" "text/tabwriter" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/spf13/cobra" "google.golang.org/grpc/codes" diff --git a/cmd/frostfs-cli/modules/control/rebuild_shards.go b/cmd/frostfs-cli/modules/control/rebuild_shards.go index e2b408712..3df12a15d 100644 --- a/cmd/frostfs-cli/modules/control/rebuild_shards.go +++ b/cmd/frostfs-cli/modules/control/rebuild_shards.go @@ -3,10 +3,10 @@ package control import ( "fmt" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/mr-tron/base58" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/remove_rule.go b/cmd/frostfs-cli/modules/control/remove_rule.go index 4189ea76b..a996156a5 100644 --- a/cmd/frostfs-cli/modules/control/remove_rule.go +++ b/cmd/frostfs-cli/modules/control/remove_rule.go @@ -4,10 +4,10 @@ import ( "encoding/hex" "errors" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/set_netmap_status.go b/cmd/frostfs-cli/modules/control/set_netmap_status.go index a107b2b53..87c4f3b3d 100644 --- a/cmd/frostfs-cli/modules/control/set_netmap_status.go +++ b/cmd/frostfs-cli/modules/control/set_netmap_status.go @@ -6,12 +6,12 @@ import ( "fmt" "time" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/shards_list.go b/cmd/frostfs-cli/modules/control/shards_list.go index a81034a9e..40d6628ee 100644 --- a/cmd/frostfs-cli/modules/control/shards_list.go +++ b/cmd/frostfs-cli/modules/control/shards_list.go @@ -7,11 +7,11 @@ import ( "sort" "strings" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/mr-tron/base58" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/shards_set_mode.go b/cmd/frostfs-cli/modules/control/shards_set_mode.go index dd0d77748..8fe01ba30 100644 --- a/cmd/frostfs-cli/modules/control/shards_set_mode.go +++ b/cmd/frostfs-cli/modules/control/shards_set_mode.go @@ -6,10 +6,10 @@ import ( "slices" "strings" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/mr-tron/base58" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/synchronize_tree.go b/cmd/frostfs-cli/modules/control/synchronize_tree.go index 5f2e4da96..1e4575f49 100644 --- a/cmd/frostfs-cli/modules/control/synchronize_tree.go +++ b/cmd/frostfs-cli/modules/control/synchronize_tree.go @@ -4,12 +4,12 @@ import ( "crypto/sha256" "errors" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/util.go b/cmd/frostfs-cli/modules/control/util.go index ef547681f..41d9dbf8a 100644 --- a/cmd/frostfs-cli/modules/control/util.go +++ b/cmd/frostfs-cli/modules/control/util.go @@ -4,11 +4,11 @@ import ( "crypto/ecdsa" "errors" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" "github.com/spf13/cobra" diff --git a/cmd/frostfs-cli/modules/control/writecache.go b/cmd/frostfs-cli/modules/control/writecache.go index ffe9009ab..80e4a0c87 100644 --- a/cmd/frostfs-cli/modules/control/writecache.go +++ b/cmd/frostfs-cli/modules/control/writecache.go @@ -1,10 +1,10 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/mr-tron/base58" "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go index cf2e2d5e6..70c273443 100644 --- a/cmd/frostfs-cli/modules/object/head.go +++ b/cmd/frostfs-cli/modules/object/head.go @@ -6,12 +6,12 @@ import ( "fmt" "os" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" diff --git a/cmd/frostfs-cli/modules/object/lock.go b/cmd/frostfs-cli/modules/object/lock.go index d2e9af24c..53dd01868 100644 --- a/cmd/frostfs-cli/modules/object/lock.go +++ b/cmd/frostfs-cli/modules/object/lock.go @@ -7,12 +7,12 @@ import ( "strconv" "time" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" diff --git a/cmd/frostfs-cli/modules/object/put.go b/cmd/frostfs-cli/modules/object/put.go index 45e02edb3..affe9bbba 100644 --- a/cmd/frostfs-cli/modules/object/put.go +++ b/cmd/frostfs-cli/modules/object/put.go @@ -10,11 +10,11 @@ import ( "strings" "time" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" diff --git a/cmd/frostfs-node/accounting.go b/cmd/frostfs-node/accounting.go index 1d065c227..72a6e5331 100644 --- a/cmd/frostfs-node/accounting.go +++ b/cmd/frostfs-node/accounting.go @@ -4,11 +4,11 @@ import ( "context" "net" - accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting/grpc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" accountingTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/accounting/grpc" accountingService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting" accounting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting/morph" + accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting/grpc" "google.golang.org/grpc" ) diff --git a/cmd/frostfs-node/apemanager.go b/cmd/frostfs-node/apemanager.go index 79c45c254..c4d7725f5 100644 --- a/cmd/frostfs-node/apemanager.go +++ b/cmd/frostfs-node/apemanager.go @@ -3,11 +3,11 @@ package main import ( "net" - apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager/grpc" ape_contract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/contract_storage" morph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" apemanager_transport "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/apemanager/grpc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager" + apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc" "google.golang.org/grpc" ) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 800c49127..40af23841 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -15,7 +15,6 @@ import ( "syscall" "time" - netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/audit" @@ -70,6 +69,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state" "git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index f95f671cd..9c3505922 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -5,7 +5,6 @@ import ( "context" "net" - containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc" morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" @@ -18,6 +17,7 @@ import ( containerTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/container/grpc" containerService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container" containerMorph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/morph" + containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 6e2a7c44a..58e066fc9 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -8,7 +8,6 @@ import ( "net" "sync/atomic" - netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -19,6 +18,7 @@ import ( netmapTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/netmap/grpc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" netmapService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap" + netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap/grpc" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "go.uber.org/zap" diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index c6bde2cff..7f26393a7 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -6,8 +6,6 @@ import ( "fmt" "net" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc" metricsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/metrics" policerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/policer" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" @@ -38,6 +36,8 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" diff --git a/cmd/frostfs-node/session.go b/cmd/frostfs-node/session.go index ee21ec230..20d2d318f 100644 --- a/cmd/frostfs-node/session.go +++ b/cmd/frostfs-node/session.go @@ -6,8 +6,6 @@ import ( "net" "time" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session/grpc" nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" @@ -16,6 +14,8 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/persistent" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "google.golang.org/grpc" ) diff --git a/go.mod b/go.mod index 886fa958f..8a70c3819 100644 --- a/go.mod +++ b/go.mod @@ -4,12 +4,11 @@ go 1.22 require ( code.gitea.io/sdk/gitea v0.17.1 - git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3 git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 diff --git a/go.sum b/go.sum index 6ed130cdb..9778f91e2 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,5 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= -git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3 h1:6QXNnfBgYx81UZsBdpPnQY+ZMSKGFbFc29wV7DJ/UG4= -git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU= git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0 h1:8Z5iPhieCrbcdhxBuY/Bajh6V5fki7Whh0b4S2zYJYU= git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0/go.mod h1:Y2Xorxc8SBO4phoek7n3XxaPZz5rIrFgDsU4TOjmlGA= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= @@ -10,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509 h1:5gtEq4bjVgAbTOrbEquspyM3s+qsMtkpGC5m9FtfImk= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509/go.mod h1:jmb7yxzZota9jWbC10p+7YR+6wwJPBj7J/Fl5VDkXys= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823 h1:sepm9FeuoInmygH1K/+3L+Yp5bJhGiVi/oGCH6Emp2c= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823/go.mod h1:eoK7+KZQ9GJxbzIs6vTnoUJqFDppavInLRHaN4MYgZg= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= diff --git a/internal/ape/converter.go b/internal/ape/converter.go index eb80e7ded..c706cf052 100644 --- a/internal/ape/converter.go +++ b/internal/ape/converter.go @@ -4,7 +4,7 @@ import ( "encoding/hex" "fmt" - v2acl "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl" + v2acl "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" diff --git a/internal/audit/request.go b/internal/audit/request.go index cf0797300..3355087f1 100644 --- a/internal/audit/request.go +++ b/internal/audit/request.go @@ -1,10 +1,10 @@ package audit import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" crypto "git.frostfs.info/TrueCloudLab/frostfs-crypto" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "go.uber.org/zap" ) diff --git a/internal/audit/target.go b/internal/audit/target.go index 8bc87ee8e..2d6881e29 100644 --- a/internal/audit/target.go +++ b/internal/audit/target.go @@ -3,7 +3,7 @@ package audit import ( "strings" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) diff --git a/pkg/core/client/client.go b/pkg/core/client/client.go index 854fbc49f..98bdf99e7 100644 --- a/pkg/core/client/client.go +++ b/pkg/core/client/client.go @@ -3,8 +3,8 @@ package client import ( "context" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" ) diff --git a/pkg/core/container/delete.go b/pkg/core/container/delete.go index 8e0aaebb9..8c14bdf5e 100644 --- a/pkg/core/container/delete.go +++ b/pkg/core/container/delete.go @@ -1,7 +1,7 @@ package container import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" ) diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go index 317d62cb0..5bc5c8bea 100644 --- a/pkg/core/object/fmt.go +++ b/pkg/core/object/fmt.go @@ -8,11 +8,11 @@ import ( "fmt" "strconv" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go index 77afbfc45..b428b56da 100644 --- a/pkg/core/object/fmt_test.go +++ b/pkg/core/object/fmt_test.go @@ -7,9 +7,9 @@ import ( "strconv" "testing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go index 35f4469b1..a53458179 100644 --- a/pkg/innerring/processors/netmap/handlers_test.go +++ b/pkg/innerring/processors/netmap/handlers_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" netmapContract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" timerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" @@ -13,6 +12,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go index 170c39e2c..269e79c5e 100644 --- a/pkg/innerring/processors/netmap/process_cleanup.go +++ b/pkg/innerring/processors/netmap/process_cleanup.go @@ -1,8 +1,8 @@ package netmap import ( - v2netmap "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "go.uber.org/zap" ) diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go index 3702f567f..7e15c76f5 100644 --- a/pkg/local_object_storage/engine/lock_test.go +++ b/pkg/local_object_storage/engine/lock_test.go @@ -6,12 +6,12 @@ import ( "testing" "time" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" diff --git a/pkg/local_object_storage/metabase/db.go b/pkg/local_object_storage/metabase/db.go index 1f444a3ef..2cd990814 100644 --- a/pkg/local_object_storage/metabase/db.go +++ b/pkg/local_object_storage/metabase/db.go @@ -11,9 +11,9 @@ import ( "sync" "time" - v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/mr-tron/base58" "go.etcd.io/bbolt" diff --git a/pkg/local_object_storage/metabase/db_test.go b/pkg/local_object_storage/metabase/db_test.go index 0abb5ea89..c61d762bc 100644 --- a/pkg/local_object_storage/metabase/db_test.go +++ b/pkg/local_object_storage/metabase/db_test.go @@ -6,10 +6,10 @@ import ( "strconv" "testing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go index 777a94a6f..646dc196c 100644 --- a/pkg/local_object_storage/metabase/iterators_test.go +++ b/pkg/local_object_storage/metabase/iterators_test.go @@ -5,10 +5,10 @@ import ( "strconv" "testing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" object2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index b329e8032..09c5e04ad 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -9,12 +9,12 @@ import ( "strconv" "time" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index 41f05b756..f802036be 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -8,9 +8,9 @@ import ( "strings" "time" - v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index 5cc25a9f6..6f48607be 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -7,10 +7,10 @@ import ( "strconv" "testing" - v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + v2object "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go index 1f2c7956b..bcf72f440 100644 --- a/pkg/local_object_storage/metabase/upgrade.go +++ b/pkg/local_object_storage/metabase/upgrade.go @@ -12,8 +12,8 @@ import ( "sync/atomic" "time" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go index 9c525291a..aeb14aeb6 100644 --- a/pkg/local_object_storage/metabase/upgrade_test.go +++ b/pkg/local_object_storage/metabase/upgrade_test.go @@ -11,12 +11,12 @@ import ( "testing" "time" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go index 90958cd35..2b97111e7 100644 --- a/pkg/local_object_storage/shard/gc_test.go +++ b/pkg/local_object_storage/shard/gc_test.go @@ -5,13 +5,13 @@ import ( "errors" "testing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" diff --git a/pkg/morph/client/container/eacl.go b/pkg/morph/client/container/eacl.go index 8e9455050..9e604e091 100644 --- a/pkg/morph/client/container/eacl.go +++ b/pkg/morph/client/container/eacl.go @@ -4,9 +4,9 @@ import ( "crypto/sha256" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go index 6715f870f..ea57a3a95 100644 --- a/pkg/morph/client/container/get.go +++ b/pkg/morph/client/container/get.go @@ -5,10 +5,10 @@ import ( "fmt" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" containerContract "git.frostfs.info/TrueCloudLab/frostfs-contract/container" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go index ee323af00..777ae2d4e 100644 --- a/pkg/morph/client/container/put.go +++ b/pkg/morph/client/container/put.go @@ -3,9 +3,9 @@ package container import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" ) diff --git a/pkg/network/address.go b/pkg/network/address.go index 88f4a571d..cb83a813d 100644 --- a/pkg/network/address.go +++ b/pkg/network/address.go @@ -7,7 +7,7 @@ import ( "net/url" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" ) diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index 2ecce3a01..481d1ea4a 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -7,11 +7,11 @@ import ( "sync" "time" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "google.golang.org/grpc" diff --git a/pkg/network/transport/accounting/grpc/service.go b/pkg/network/transport/accounting/grpc/service.go index 2144a3001..78129bfbe 100644 --- a/pkg/network/transport/accounting/grpc/service.go +++ b/pkg/network/transport/accounting/grpc/service.go @@ -3,9 +3,9 @@ package accounting import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting" - accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting/grpc" accountingsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" + accountingGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting/grpc" ) // Server wraps FrostFS API Accounting service and diff --git a/pkg/network/transport/apemanager/grpc/service.go b/pkg/network/transport/apemanager/grpc/service.go index 59783cfc0..850d38a65 100644 --- a/pkg/network/transport/apemanager/grpc/service.go +++ b/pkg/network/transport/apemanager/grpc/service.go @@ -3,9 +3,9 @@ package apemanager import ( "context" - apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager" - apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager/grpc" apemanager_svc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager" + apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" + apemanager_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc" ) type Server struct { diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go index 9fae22b45..49d083a90 100644 --- a/pkg/network/transport/container/grpc/service.go +++ b/pkg/network/transport/container/grpc/service.go @@ -3,9 +3,9 @@ package container import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc" containersvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + containerGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc" ) // Server wraps FrostFS API Container service and diff --git a/pkg/network/transport/netmap/grpc/service.go b/pkg/network/transport/netmap/grpc/service.go index 406c77e58..4bc3a42f8 100644 --- a/pkg/network/transport/netmap/grpc/service.go +++ b/pkg/network/transport/netmap/grpc/service.go @@ -3,9 +3,9 @@ package grpc import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" - netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap/grpc" netmapsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" + netmapGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap/grpc" ) // Server wraps FrostFS API Netmap service and diff --git a/pkg/network/transport/object/grpc/get.go b/pkg/network/transport/object/grpc/get.go index e1655c183..655b1f9fb 100644 --- a/pkg/network/transport/object/grpc/get.go +++ b/pkg/network/transport/object/grpc/get.go @@ -1,8 +1,8 @@ package object import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" ) type getStreamerV2 struct { diff --git a/pkg/network/transport/object/grpc/range.go b/pkg/network/transport/object/grpc/range.go index 391536e8e..7d7ce0e4c 100644 --- a/pkg/network/transport/object/grpc/range.go +++ b/pkg/network/transport/object/grpc/range.go @@ -1,8 +1,8 @@ package object import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" ) type getRangeStreamerV2 struct { diff --git a/pkg/network/transport/object/grpc/search.go b/pkg/network/transport/object/grpc/search.go index a151ced09..8432707f7 100644 --- a/pkg/network/transport/object/grpc/search.go +++ b/pkg/network/transport/object/grpc/search.go @@ -1,8 +1,8 @@ package object import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" ) type searchStreamerV2 struct { diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go index d55e3d87f..fa6252118 100644 --- a/pkg/network/transport/object/grpc/service.go +++ b/pkg/network/transport/object/grpc/service.go @@ -5,10 +5,10 @@ import ( "errors" "io" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" ) // Server wraps FrostFS API Object service and diff --git a/pkg/network/transport/session/grpc/service.go b/pkg/network/transport/session/grpc/service.go index e0dc74942..6fce397f3 100644 --- a/pkg/network/transport/session/grpc/service.go +++ b/pkg/network/transport/session/grpc/service.go @@ -3,9 +3,9 @@ package session import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session/grpc" sessionsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc" ) // Server wraps FrostFS API Session service and diff --git a/pkg/services/accounting/executor.go b/pkg/services/accounting/executor.go index b0722cf8a..93e44c52b 100644 --- a/pkg/services/accounting/executor.go +++ b/pkg/services/accounting/executor.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" ) type ServiceExecutor interface { diff --git a/pkg/services/accounting/morph/executor.go b/pkg/services/accounting/morph/executor.go index ac836b71d..b77d3e3e6 100644 --- a/pkg/services/accounting/morph/executor.go +++ b/pkg/services/accounting/morph/executor.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" accountingSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/accounting" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" ) diff --git a/pkg/services/accounting/server.go b/pkg/services/accounting/server.go index 72833c46c..a280416fb 100644 --- a/pkg/services/accounting/server.go +++ b/pkg/services/accounting/server.go @@ -3,7 +3,7 @@ package accounting import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" ) // Server is an interface of the FrostFS API Accounting service server. diff --git a/pkg/services/accounting/sign.go b/pkg/services/accounting/sign.go index cd6ff0307..d8feb76bd 100644 --- a/pkg/services/accounting/sign.go +++ b/pkg/services/accounting/sign.go @@ -4,8 +4,8 @@ import ( "context" "crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/accounting" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/accounting" ) type signService struct { diff --git a/pkg/services/apemanager/audit.go b/pkg/services/apemanager/audit.go index d132ae7db..b9bea07fb 100644 --- a/pkg/services/apemanager/audit.go +++ b/pkg/services/apemanager/audit.go @@ -4,10 +4,10 @@ import ( "context" "sync/atomic" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager" - ape_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager/grpc" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" + ape_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager/grpc" ) var _ Server = (*auditService)(nil) diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go index 25f43486a..86f9cb893 100644 --- a/pkg/services/apemanager/executor.go +++ b/pkg/services/apemanager/executor.go @@ -8,14 +8,14 @@ import ( "errors" "fmt" - apeV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/ape" - apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" ape_contract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/contract_storage" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" apemanager_errors "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/apemanager/errors" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + apeV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/ape" + apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" diff --git a/pkg/services/apemanager/server.go b/pkg/services/apemanager/server.go index 90b2d92ae..e624177ac 100644 --- a/pkg/services/apemanager/server.go +++ b/pkg/services/apemanager/server.go @@ -3,7 +3,7 @@ package apemanager import ( "context" - apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager" + apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" ) type Server interface { diff --git a/pkg/services/apemanager/sign.go b/pkg/services/apemanager/sign.go index eda2a7342..a172624ff 100644 --- a/pkg/services/apemanager/sign.go +++ b/pkg/services/apemanager/sign.go @@ -4,8 +4,8 @@ import ( "context" "crypto/ecdsa" - apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + apemanager_v2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/apemanager" ) type signService struct { diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go index dd4878331..2cdb30b45 100644 --- a/pkg/services/container/ape.go +++ b/pkg/services/container/ape.go @@ -12,14 +12,14 @@ import ( "net" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go index d6f9b75ef..b6b42a559 100644 --- a/pkg/services/container/ape_test.go +++ b/pkg/services/container/ape_test.go @@ -9,13 +9,13 @@ import ( "net" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + session "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go index b257272f5..03d3dc13d 100644 --- a/pkg/services/container/audit.go +++ b/pkg/services/container/audit.go @@ -4,10 +4,10 @@ import ( "context" "sync/atomic" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - container_grpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + container_grpc "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container/grpc" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" ) diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go index 0917e3bd0..70234d3de 100644 --- a/pkg/services/container/executor.go +++ b/pkg/services/container/executor.go @@ -4,9 +4,9 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" ) type ServiceExecutor interface { diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go index 05d8749cf..adb808af3 100644 --- a/pkg/services/container/morph/executor.go +++ b/pkg/services/container/morph/executor.go @@ -5,11 +5,11 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go index c64310eb3..87d307385 100644 --- a/pkg/services/container/morph/executor_test.go +++ b/pkg/services/container/morph/executor_test.go @@ -4,12 +4,12 @@ import ( "context" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" containerSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container" containerSvcMorph "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/container/morph" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" containertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/test" diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go index a19d83c56..78fd3d34c 100644 --- a/pkg/services/container/server.go +++ b/pkg/services/container/server.go @@ -3,7 +3,7 @@ package container import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" ) // Server is an interface of the FrostFS API Container service server. diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go index f7f5d6486..c478c0e1c 100644 --- a/pkg/services/container/sign.go +++ b/pkg/services/container/sign.go @@ -4,8 +4,8 @@ import ( "context" "crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" ) type signService struct { diff --git a/pkg/services/control/convert.go b/pkg/services/control/convert.go index fd6f020d1..37daf67be 100644 --- a/pkg/services/control/convert.go +++ b/pkg/services/control/convert.go @@ -1,8 +1,8 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/message" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message" ) type requestWrapper struct { diff --git a/pkg/services/control/ir/convert.go b/pkg/services/control/ir/convert.go index c892c5b6c..024676b87 100644 --- a/pkg/services/control/ir/convert.go +++ b/pkg/services/control/ir/convert.go @@ -1,8 +1,8 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/message" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/message" ) type requestWrapper struct { diff --git a/pkg/services/control/ir/rpc.go b/pkg/services/control/ir/rpc.go index 0c9400f6c..62f800d99 100644 --- a/pkg/services/control/ir/rpc.go +++ b/pkg/services/control/ir/rpc.go @@ -1,9 +1,9 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/common" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/grpc" ) const serviceName = "ircontrol.ControlService" diff --git a/pkg/services/control/ir/server/audit.go b/pkg/services/control/ir/server/audit.go index 9f7a8b879..e54fa9824 100644 --- a/pkg/services/control/ir/server/audit.go +++ b/pkg/services/control/ir/server/audit.go @@ -6,10 +6,10 @@ import ( "strings" "sync/atomic" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" ) diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go index 642932c91..63be22411 100644 --- a/pkg/services/control/ir/server/calls.go +++ b/pkg/services/control/ir/server/calls.go @@ -5,10 +5,10 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "google.golang.org/grpc/codes" diff --git a/pkg/services/control/ir/server/sign.go b/pkg/services/control/ir/server/sign.go index f72d51f9e..d39f6d5f9 100644 --- a/pkg/services/control/ir/server/sign.go +++ b/pkg/services/control/ir/server/sign.go @@ -6,8 +6,8 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" ) diff --git a/pkg/services/control/ir/service_frostfs.pb.go b/pkg/services/control/ir/service_frostfs.pb.go index 66d196617..ff4ce7245 100644 --- a/pkg/services/control/ir/service_frostfs.pb.go +++ b/pkg/services/control/ir/service_frostfs.pb.go @@ -5,9 +5,9 @@ package control import ( json "encoding/json" fmt "fmt" - pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool" - proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto" - encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding" + pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" + proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" + encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" easyproto "github.com/VictoriaMetrics/easyproto" jlexer "github.com/mailru/easyjson/jlexer" jwriter "github.com/mailru/easyjson/jwriter" diff --git a/pkg/services/control/ir/types_frostfs.pb.go b/pkg/services/control/ir/types_frostfs.pb.go index b230726a9..32bf457a1 100644 --- a/pkg/services/control/ir/types_frostfs.pb.go +++ b/pkg/services/control/ir/types_frostfs.pb.go @@ -5,9 +5,9 @@ package control import ( json "encoding/json" fmt "fmt" - pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool" - proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto" - encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding" + pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" + proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" + encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" easyproto "github.com/VictoriaMetrics/easyproto" jlexer "github.com/mailru/easyjson/jlexer" jwriter "github.com/mailru/easyjson/jwriter" diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go index 04524a68c..514061db4 100644 --- a/pkg/services/control/rpc.go +++ b/pkg/services/control/rpc.go @@ -1,8 +1,8 @@ package control import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/common" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common" ) const serviceName = "control.ControlService" diff --git a/pkg/services/control/server/ctrlmessage/sign.go b/pkg/services/control/server/ctrlmessage/sign.go index 31425b337..d9d5c5f5e 100644 --- a/pkg/services/control/server/ctrlmessage/sign.go +++ b/pkg/services/control/server/ctrlmessage/sign.go @@ -4,8 +4,8 @@ import ( "crypto/ecdsa" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" ) diff --git a/pkg/services/control/server/sign.go b/pkg/services/control/server/sign.go index 514af273f..0e8e24b6e 100644 --- a/pkg/services/control/server/sign.go +++ b/pkg/services/control/server/sign.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" ) diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go index e16f082b1..96b896478 100644 --- a/pkg/services/control/service_frostfs.pb.go +++ b/pkg/services/control/service_frostfs.pb.go @@ -5,9 +5,9 @@ package control import ( json "encoding/json" fmt "fmt" - pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool" - proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto" - encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding" + pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" + proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" + encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" easyproto "github.com/VictoriaMetrics/easyproto" jlexer "github.com/mailru/easyjson/jlexer" jwriter "github.com/mailru/easyjson/jwriter" diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go index f92106589..50984f15a 100644 --- a/pkg/services/control/types_frostfs.pb.go +++ b/pkg/services/control/types_frostfs.pb.go @@ -5,9 +5,9 @@ package control import ( json "encoding/json" fmt "fmt" - pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool" - proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto" - encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding" + pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" + proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" + encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" easyproto "github.com/VictoriaMetrics/easyproto" jlexer "github.com/mailru/easyjson/jlexer" jwriter "github.com/mailru/easyjson/jwriter" diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go index f48357915..5223047df 100644 --- a/pkg/services/netmap/executor.go +++ b/pkg/services/netmap/executor.go @@ -5,10 +5,10 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" versionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" ) diff --git a/pkg/services/netmap/server.go b/pkg/services/netmap/server.go index 0a09c9f44..eff880dbe 100644 --- a/pkg/services/netmap/server.go +++ b/pkg/services/netmap/server.go @@ -3,7 +3,7 @@ package netmap import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" ) // Server is an interface of the FrostFS API Netmap service server. diff --git a/pkg/services/netmap/sign.go b/pkg/services/netmap/sign.go index 9a16ad8f1..5f184d5c0 100644 --- a/pkg/services/netmap/sign.go +++ b/pkg/services/netmap/sign.go @@ -4,8 +4,8 @@ import ( "context" "crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" ) type signService struct { diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go index 023b99239..94e015abe 100644 --- a/pkg/services/object/acl/eacl/v2/eacl_test.go +++ b/pkg/services/object/acl/eacl/v2/eacl_test.go @@ -6,9 +6,9 @@ import ( "errors" "testing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go index 34975e1e6..ecb793df8 100644 --- a/pkg/services/object/acl/eacl/v2/headers.go +++ b/pkg/services/object/acl/eacl/v2/headers.go @@ -5,10 +5,10 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" diff --git a/pkg/services/object/acl/eacl/v2/object.go b/pkg/services/object/acl/eacl/v2/object.go index 72bd4c2d2..92570a3c5 100644 --- a/pkg/services/object/acl/eacl/v2/object.go +++ b/pkg/services/object/acl/eacl/v2/object.go @@ -3,7 +3,7 @@ package v2 import ( "strconv" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" diff --git a/pkg/services/object/acl/eacl/v2/xheader.go b/pkg/services/object/acl/eacl/v2/xheader.go index c1fdea9d8..ce380c117 100644 --- a/pkg/services/object/acl/eacl/v2/xheader.go +++ b/pkg/services/object/acl/eacl/v2/xheader.go @@ -1,7 +1,7 @@ package v2 import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" ) diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go index 74279e453..e35cd2e11 100644 --- a/pkg/services/object/acl/v2/request.go +++ b/pkg/services/object/acl/v2/request.go @@ -4,7 +4,7 @@ import ( "crypto/ecdsa" "fmt" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" diff --git a/pkg/services/object/acl/v2/request_test.go b/pkg/services/object/acl/v2/request_test.go index 980d1a2e5..618af3469 100644 --- a/pkg/services/object/acl/v2/request_test.go +++ b/pkg/services/object/acl/v2/request_test.go @@ -3,9 +3,9 @@ package v2 import ( "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - sigutilV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/signature" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + sigutilV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/signature" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go index 5a8e8b065..e02a3be36 100644 --- a/pkg/services/object/acl/v2/service.go +++ b/pkg/services/object/acl/v2/service.go @@ -6,13 +6,13 @@ import ( "fmt" "strings" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" diff --git a/pkg/services/object/acl/v2/util.go b/pkg/services/object/acl/v2/util.go index c5225e8c4..e02f70771 100644 --- a/pkg/services/object/acl/v2/util.go +++ b/pkg/services/object/acl/v2/util.go @@ -6,9 +6,9 @@ import ( "errors" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go index 8c6d01ae9..4b19cecfe 100644 --- a/pkg/services/object/acl/v2/util_test.go +++ b/pkg/services/object/acl/v2/util_test.go @@ -6,8 +6,8 @@ import ( "crypto/rand" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test" aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go index 8ce1b429d..abcd2f4bb 100644 --- a/pkg/services/object/ape/checker.go +++ b/pkg/services/object/ape/checker.go @@ -5,12 +5,12 @@ import ( "errors" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go index 66f0822e4..e03b5750c 100644 --- a/pkg/services/object/ape/checker_test.go +++ b/pkg/services/object/ape/checker_test.go @@ -8,13 +8,13 @@ import ( "fmt" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go index e12fccb5e..cb9bbf1b8 100644 --- a/pkg/services/object/ape/request.go +++ b/pkg/services/object/ape/request.go @@ -8,10 +8,10 @@ import ( "net" "strconv" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go index 9dad69d17..787785b60 100644 --- a/pkg/services/object/ape/request_test.go +++ b/pkg/services/object/ape/request_test.go @@ -6,8 +6,8 @@ import ( "net" "testing" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go index 6eedaf99e..c114f02f6 100644 --- a/pkg/services/object/ape/service.go +++ b/pkg/services/object/ape/service.go @@ -6,12 +6,12 @@ import ( "errors" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go index 39e1f9f2d..b42084634 100644 --- a/pkg/services/object/audit.go +++ b/pkg/services/object/audit.go @@ -5,12 +5,12 @@ import ( "errors" "sync/atomic" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/audit" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go index f48cc5b3d..758156607 100644 --- a/pkg/services/object/common.go +++ b/pkg/services/object/common.go @@ -3,7 +3,7 @@ package object import ( "context" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" ) diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go index c828c79ba..8b2599e5f 100644 --- a/pkg/services/object/common/writer/ec_test.go +++ b/pkg/services/object/common/writer/ec_test.go @@ -10,13 +10,13 @@ import ( "strconv" "testing" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go index c2f92950f..ec771320e 100644 --- a/pkg/services/object/delete/exec.go +++ b/pkg/services/object/delete/exec.go @@ -6,10 +6,10 @@ import ( "fmt" "strconv" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" diff --git a/pkg/services/object/delete/v2/service.go b/pkg/services/object/delete/v2/service.go index 10dcd0e87..7146f0361 100644 --- a/pkg/services/object/delete/v2/service.go +++ b/pkg/services/object/delete/v2/service.go @@ -3,8 +3,8 @@ package deletesvc import ( "context" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) // Service implements Delete operation of Object service v2. diff --git a/pkg/services/object/delete/v2/util.go b/pkg/services/object/delete/v2/util.go index d0db1f543..c57d4562a 100644 --- a/pkg/services/object/delete/v2/util.go +++ b/pkg/services/object/delete/v2/util.go @@ -4,10 +4,10 @@ import ( "errors" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go index a6882d4a8..599a6f176 100644 --- a/pkg/services/object/get/getrangeec_test.go +++ b/pkg/services/object/get/getrangeec_test.go @@ -6,12 +6,12 @@ import ( "fmt" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" diff --git a/pkg/services/object/get/v2/errors.go b/pkg/services/object/get/v2/errors.go index 213455e10..aaa09b891 100644 --- a/pkg/services/object/get/v2/errors.go +++ b/pkg/services/object/get/v2/errors.go @@ -4,8 +4,8 @@ import ( "errors" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + refs "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" ) var ( diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go index 18194c740..60fcd7fbf 100644 --- a/pkg/services/object/get/v2/get_forwarder.go +++ b/pkg/services/object/get/v2/get_forwarder.go @@ -7,16 +7,16 @@ import ( "io" "sync" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" diff --git a/pkg/services/object/get/v2/get_range_forwarder.go b/pkg/services/object/get/v2/get_range_forwarder.go index 10ecfc4a3..a44616fc9 100644 --- a/pkg/services/object/get/v2/get_range_forwarder.go +++ b/pkg/services/object/get/v2/get_range_forwarder.go @@ -7,15 +7,15 @@ import ( "io" "sync" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go index e97b60f66..e8e82ddd9 100644 --- a/pkg/services/object/get/v2/get_range_hash.go +++ b/pkg/services/object/get/v2/get_range_hash.go @@ -5,15 +5,15 @@ import ( "encoding/hex" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" diff --git a/pkg/services/object/get/v2/head_forwarder.go b/pkg/services/object/get/v2/head_forwarder.go index 5e16008b8..56056398d 100644 --- a/pkg/services/object/get/v2/head_forwarder.go +++ b/pkg/services/object/get/v2/head_forwarder.go @@ -5,15 +5,15 @@ import ( "crypto/ecdsa" "sync" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go index edd19b441..24b2f0099 100644 --- a/pkg/services/object/get/v2/service.go +++ b/pkg/services/object/get/v2/service.go @@ -4,7 +4,6 @@ import ( "context" "errors" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -12,6 +11,7 @@ import ( getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.uber.org/zap" ) diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go index ce9a5c767..98207336c 100644 --- a/pkg/services/object/get/v2/streamer.go +++ b/pkg/services/object/get/v2/streamer.go @@ -3,8 +3,8 @@ package getsvc import ( "context" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" ) diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go index 852c2aec3..bfa7fd619 100644 --- a/pkg/services/object/get/v2/util.go +++ b/pkg/services/object/get/v2/util.go @@ -5,17 +5,17 @@ import ( "crypto/sha256" "hash" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/status" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/status" clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" diff --git a/pkg/services/object/internal/key.go b/pkg/services/object/internal/key.go index eba716976..1e0a7ef90 100644 --- a/pkg/services/object/internal/key.go +++ b/pkg/services/object/internal/key.go @@ -3,8 +3,8 @@ package internal import ( "bytes" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" ) // VerifyResponseKeyV2 checks if response is signed with expected key. Returns client.ErrWrongPublicKey if not. diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go index 61aed5003..377350fdd 100644 --- a/pkg/services/object/metrics.go +++ b/pkg/services/object/metrics.go @@ -4,8 +4,8 @@ import ( "context" "time" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) type ( diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go index 677c6610f..91b4efdc1 100644 --- a/pkg/services/object/patch/streamer.go +++ b/pkg/services/object/patch/streamer.go @@ -7,13 +7,13 @@ import ( "fmt" "io" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/patcher" diff --git a/pkg/services/object/patch/util.go b/pkg/services/object/patch/util.go index 4f3c3ef17..b9416789c 100644 --- a/pkg/services/object/patch/util.go +++ b/pkg/services/object/patch/util.go @@ -6,8 +6,8 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" ) diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go index 5f9b5d110..3a0b3901f 100644 --- a/pkg/services/object/put/single.go +++ b/pkg/services/object/put/single.go @@ -9,11 +9,6 @@ import ( "hash" "sync" - objectAPI "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" @@ -28,6 +23,11 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" diff --git a/pkg/services/object/put/v2/service.go b/pkg/services/object/put/v2/service.go index db902ae59..78d4c711d 100644 --- a/pkg/services/object/put/v2/service.go +++ b/pkg/services/object/put/v2/service.go @@ -4,10 +4,10 @@ import ( "context" "fmt" - objectAPI "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) // Service implements Put operation of Object service v2. diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go index 5bf15b4cd..36b514fbc 100644 --- a/pkg/services/object/put/v2/streamer.go +++ b/pkg/services/object/put/v2/streamer.go @@ -4,11 +4,6 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target" @@ -17,6 +12,11 @@ import ( putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) diff --git a/pkg/services/object/put/v2/util.go b/pkg/services/object/put/v2/util.go index a157a9542..5ec9ebe10 100644 --- a/pkg/services/object/put/v2/util.go +++ b/pkg/services/object/put/v2/util.go @@ -1,10 +1,10 @@ package putsvc import ( - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" ) diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go index d7ba9f843..3787b4168 100644 --- a/pkg/services/object/response.go +++ b/pkg/services/object/response.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) type ResponseService struct { diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go index 44abcfe5b..0a40025e1 100644 --- a/pkg/services/object/search/search_test.go +++ b/pkg/services/object/search/search_test.go @@ -9,7 +9,6 @@ import ( "strconv" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -17,6 +16,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" diff --git a/pkg/services/object/search/v2/request_forwarder.go b/pkg/services/object/search/v2/request_forwarder.go index 5a2e9b936..7bb6e4d3c 100644 --- a/pkg/services/object/search/v2/request_forwarder.go +++ b/pkg/services/object/search/v2/request_forwarder.go @@ -8,14 +8,14 @@ import ( "io" "sync" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc" - rpcclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" + rpcclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) diff --git a/pkg/services/object/search/v2/service.go b/pkg/services/object/search/v2/service.go index 78b72ac79..856cd9f04 100644 --- a/pkg/services/object/search/v2/service.go +++ b/pkg/services/object/search/v2/service.go @@ -1,10 +1,10 @@ package searchsvc import ( - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search" objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) // Service implements Search operation of Object service v2. diff --git a/pkg/services/object/search/v2/streamer.go b/pkg/services/object/search/v2/streamer.go index 15e2d53d5..93b281343 100644 --- a/pkg/services/object/search/v2/streamer.go +++ b/pkg/services/object/search/v2/streamer.go @@ -1,9 +1,9 @@ package searchsvc import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" ) diff --git a/pkg/services/object/search/v2/util.go b/pkg/services/object/search/v2/util.go index e971fa8e5..48ae98958 100644 --- a/pkg/services/object/search/v2/util.go +++ b/pkg/services/object/search/v2/util.go @@ -5,12 +5,12 @@ import ( "errors" "fmt" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" searchsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index da98ce245..c570e9d8e 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -3,8 +3,8 @@ package object import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) // GetObjectStream is an interface of FrostFS API v2 compatible object streamer. diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go index f5ae97b62..2c5e794e9 100644 --- a/pkg/services/object/sign.go +++ b/pkg/services/object/sign.go @@ -5,8 +5,8 @@ import ( "crypto/ecdsa" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) type SignService struct { diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go index e560d6d8c..1438a0ea2 100644 --- a/pkg/services/object/transport_splitter.go +++ b/pkg/services/object/transport_splitter.go @@ -4,8 +4,8 @@ import ( "bytes" "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) type ( diff --git a/pkg/services/object/util/key_test.go b/pkg/services/object/util/key_test.go index cb7ddfde5..1753a26f7 100644 --- a/pkg/services/object/util/key_test.go +++ b/pkg/services/object/util/key_test.go @@ -5,10 +5,10 @@ import ( "crypto/elliptic" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" tokenStorage "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" diff --git a/pkg/services/object/util/prm.go b/pkg/services/object/util/prm.go index 80c0db39e..34d8ec704 100644 --- a/pkg/services/object/util/prm.go +++ b/pkg/services/object/util/prm.go @@ -4,7 +4,7 @@ import ( "fmt" "strconv" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" sessionsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" ) diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go index 48a08b693..7476dbd48 100644 --- a/pkg/services/object_manager/tombstone/checker.go +++ b/pkg/services/object_manager/tombstone/checker.go @@ -4,9 +4,9 @@ import ( "context" "strconv" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" lru "github.com/hashicorp/golang-lru/v2" diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go index 76c220fab..e914119b4 100644 --- a/pkg/services/session/executor.go +++ b/pkg/services/session/executor.go @@ -4,10 +4,10 @@ import ( "context" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "go.uber.org/zap" ) diff --git a/pkg/services/session/server.go b/pkg/services/session/server.go index 9e44ae667..e8555a7c9 100644 --- a/pkg/services/session/server.go +++ b/pkg/services/session/server.go @@ -3,7 +3,7 @@ package session import ( "context" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" ) // Server is an interface of the FrostFS API Session service server. diff --git a/pkg/services/session/sign.go b/pkg/services/session/sign.go index 690fff896..3664c1403 100644 --- a/pkg/services/session/sign.go +++ b/pkg/services/session/sign.go @@ -4,8 +4,8 @@ import ( "context" "crypto/ecdsa" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" ) type signService struct { diff --git a/pkg/services/session/storage/persistent/executor.go b/pkg/services/session/storage/persistent/executor.go index 21f55a7d1..ea0233f9a 100644 --- a/pkg/services/session/storage/persistent/executor.go +++ b/pkg/services/session/storage/persistent/executor.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "go.etcd.io/bbolt" diff --git a/pkg/services/session/storage/persistent/executor_test.go b/pkg/services/session/storage/persistent/executor_test.go index 124d36930..f80ecb591 100644 --- a/pkg/services/session/storage/persistent/executor_test.go +++ b/pkg/services/session/storage/persistent/executor_test.go @@ -8,8 +8,8 @@ import ( "path/filepath" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/stretchr/testify/require" diff --git a/pkg/services/session/storage/temporary/executor.go b/pkg/services/session/storage/temporary/executor.go index cd498709c..d531b25cb 100644 --- a/pkg/services/session/storage/temporary/executor.go +++ b/pkg/services/session/storage/temporary/executor.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/mr-tron/base58" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" diff --git a/pkg/services/tree/service_frostfs.pb.go b/pkg/services/tree/service_frostfs.pb.go index 7b6abb1dd..05076ee03 100644 --- a/pkg/services/tree/service_frostfs.pb.go +++ b/pkg/services/tree/service_frostfs.pb.go @@ -5,9 +5,9 @@ package tree import ( json "encoding/json" fmt "fmt" - pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool" - proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto" - encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding" + pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" + proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" + encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" easyproto "github.com/VictoriaMetrics/easyproto" jlexer "github.com/mailru/easyjson/jlexer" jwriter "github.com/mailru/easyjson/jwriter" diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index 20a629fcc..4fd4a7e1e 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -8,8 +8,8 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index 939ff170d..7bc5002dc 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -8,13 +8,13 @@ import ( "errors" "testing" - aclV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl" "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" + aclV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" diff --git a/pkg/services/tree/types_frostfs.pb.go b/pkg/services/tree/types_frostfs.pb.go index 4399f8a8b..13f1a43be 100644 --- a/pkg/services/tree/types_frostfs.pb.go +++ b/pkg/services/tree/types_frostfs.pb.go @@ -5,9 +5,9 @@ package tree import ( json "encoding/json" fmt "fmt" - pool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool" - proto "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto" - encoding "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/proto/encoding" + pool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/pool" + proto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto" + encoding "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/proto/encoding" easyproto "github.com/VictoriaMetrics/easyproto" jlexer "github.com/mailru/easyjson/jlexer" jwriter "github.com/mailru/easyjson/jwriter" diff --git a/pkg/services/util/response/service.go b/pkg/services/util/response/service.go index 005a643e5..5152a8ece 100644 --- a/pkg/services/util/response/service.go +++ b/pkg/services/util/response/service.go @@ -1,10 +1,10 @@ package response import ( - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" ) diff --git a/pkg/services/util/sign.go b/pkg/services/util/sign.go index bce43d6e8..348a45a94 100644 --- a/pkg/services/util/sign.go +++ b/pkg/services/util/sign.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session" - "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" ) From 755cae3f19801437fd60dcbbbc2cda64969006ab Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Thu, 7 Nov 2024 17:50:14 +0300 Subject: [PATCH 146/591] [#1479] control: Regenerate protobufs for service Signed-off-by: Airat Arifullin --- pkg/services/control/ir/service_frostfs.pb.go | 340 ++- pkg/services/control/ir/types_frostfs.pb.go | 45 +- pkg/services/control/service_frostfs.pb.go | 1897 ++++++++++++++--- pkg/services/control/types_frostfs.pb.go | 283 ++- 4 files changed, 2144 insertions(+), 421 deletions(-) diff --git a/pkg/services/control/ir/service_frostfs.pb.go b/pkg/services/control/ir/service_frostfs.pb.go index ff4ce7245..d27746263 100644 --- a/pkg/services/control/ir/service_frostfs.pb.go +++ b/pkg/services/control/ir/service_frostfs.pb.go @@ -233,14 +233,25 @@ func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -375,11 +386,22 @@ func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"healthStatus\":" - out.RawString(prefix[1:]) - out.Int32(int32(x.HealthStatus)) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"healthStatus\":" + out.RawString(prefix) + v := int32(x.HealthStatus) + if vv, ok := HealthStatus_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } } out.RawByte('}') } @@ -564,14 +586,25 @@ func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -706,10 +739,16 @@ func (x *TickEpochRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"vub\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"vub\":" + out.RawString(prefix) out.Uint32(x.Vub) } out.RawByte('}') @@ -743,7 +782,15 @@ func (x *TickEpochRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "vub": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.Vub = f } } @@ -879,14 +926,25 @@ func (x *TickEpochRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -1021,10 +1079,16 @@ func (x *TickEpochResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"vub\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"vub\":" + out.RawString(prefix) out.Uint32(x.Vub) } out.RawByte('}') @@ -1058,7 +1122,15 @@ func (x *TickEpochResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "vub": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.Vub = f } } @@ -1194,14 +1266,25 @@ func (x *TickEpochResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -1356,14 +1439,29 @@ func (x *RemoveNodeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"key\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.Key) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"key\":" + out.RawString(prefix) + if x.Key != nil { + out.Base64Bytes(x.Key) + } else { + out.String("") + } } { - const prefix string = ",\"vub\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"vub\":" out.RawString(prefix) out.Uint32(x.Vub) } @@ -1398,13 +1496,27 @@ func (x *RemoveNodeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "key": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Key = f } case "vub": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.Vub = f } } @@ -1540,14 +1652,25 @@ func (x *RemoveNodeRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -1682,10 +1805,16 @@ func (x *RemoveNodeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"vub\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"vub\":" + out.RawString(prefix) out.Uint32(x.Vub) } out.RawByte('}') @@ -1719,7 +1848,15 @@ func (x *RemoveNodeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "vub": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.Vub = f } } @@ -1855,14 +1992,25 @@ func (x *RemoveNodeResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -2037,19 +2185,43 @@ func (x *RemoveContainerRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"containerId\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.ContainerId) - } - { - const prefix string = ",\"owner\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" out.RawString(prefix) - out.Base64Bytes(x.Owner) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } } { - const prefix string = ",\"vub\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"owner\":" + out.RawString(prefix) + if x.Owner != nil { + out.Base64Bytes(x.Owner) + } else { + out.String("") + } + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"vub\":" out.RawString(prefix) out.Uint32(x.Vub) } @@ -2084,19 +2256,39 @@ func (x *RemoveContainerRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "containerId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ContainerId = f } case "owner": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Owner = f } case "vub": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.Vub = f } } @@ -2232,14 +2424,25 @@ func (x *RemoveContainerRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -2374,10 +2577,16 @@ func (x *RemoveContainerResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"vub\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"vub\":" + out.RawString(prefix) out.Uint32(x.Vub) } out.RawByte('}') @@ -2411,7 +2620,15 @@ func (x *RemoveContainerResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "vub": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.Vub = f } } @@ -2547,14 +2764,25 @@ func (x *RemoveContainerResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } diff --git a/pkg/services/control/ir/types_frostfs.pb.go b/pkg/services/control/ir/types_frostfs.pb.go index 32bf457a1..407eec6ad 100644 --- a/pkg/services/control/ir/types_frostfs.pb.go +++ b/pkg/services/control/ir/types_frostfs.pb.go @@ -155,16 +155,35 @@ func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"key\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.Key) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"key\":" + out.RawString(prefix) + if x.Key != nil { + out.Base64Bytes(x.Key) + } else { + out.String("") + } } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) - out.Base64Bytes(x.Sign) + if x.Sign != nil { + out.Base64Bytes(x.Sign) + } else { + out.String("") + } } out.RawByte('}') } @@ -197,13 +216,25 @@ func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) { case "key": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Key = f } case "signature": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Sign = f } } diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go index 96b896478..0b4e3cf32 100644 --- a/pkg/services/control/service_frostfs.pb.go +++ b/pkg/services/control/service_frostfs.pb.go @@ -233,14 +233,25 @@ func (x *HealthCheckRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -395,16 +406,37 @@ func (x *HealthCheckResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"netmapStatus\":" - out.RawString(prefix[1:]) - out.Int32(int32(x.NetmapStatus)) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"netmapStatus\":" + out.RawString(prefix) + v := int32(x.NetmapStatus) + if vv, ok := NetmapStatus_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } } { - const prefix string = ",\"healthStatus\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"healthStatus\":" out.RawString(prefix) - out.Int32(int32(x.HealthStatus)) + v := int32(x.HealthStatus) + if vv, ok := HealthStatus_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } } out.RawByte('}') } @@ -611,14 +643,25 @@ func (x *HealthCheckResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -773,14 +816,30 @@ func (x *SetNetmapStatusRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"status\":" - out.RawString(prefix[1:]) - out.Int32(int32(x.Status)) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"status\":" + out.RawString(prefix) + v := int32(x.Status) + if vv, ok := NetmapStatus_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } } { - const prefix string = ",\"forceMaintenance\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"forceMaintenance\":" out.RawString(prefix) out.Bool(x.ForceMaintenance) } @@ -973,14 +1032,25 @@ func (x *SetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -1254,14 +1324,25 @@ func (x *SetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -1535,14 +1616,25 @@ func (x *GetNetmapStatusRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -1697,16 +1789,34 @@ func (x *GetNetmapStatusResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"status\":" - out.RawString(prefix[1:]) - out.Int32(int32(x.Status)) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"status\":" + out.RawString(prefix) + v := int32(x.Status) + if vv, ok := NetmapStatus_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } } { - const prefix string = ",\"epoch\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"epoch\":" out.RawString(prefix) - out.Uint64(x.Epoch) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10) + out.RawByte('"') } out.RawByte('}') } @@ -1761,7 +1871,15 @@ func (x *GetNetmapStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "epoch": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.Epoch = f } } @@ -1897,14 +2015,25 @@ func (x *GetNetmapStatusResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -2039,16 +2168,26 @@ func (x *DropObjectsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"addressList\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"addressList\":" + out.RawString(prefix) out.RawByte('[') for i := range x.AddressList { if i != 0 { out.RawByte(',') } - out.Base64Bytes(x.AddressList[i]) + if x.AddressList[i] != nil { + out.Base64Bytes(x.AddressList[i]) + } else { + out.String("") + } } out.RawByte(']') } @@ -2086,7 +2225,13 @@ func (x *DropObjectsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { var list [][]byte in.Delim('[') for !in.IsDelim(']') { - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } list = append(list, f) in.WantComma() } @@ -2226,14 +2371,25 @@ func (x *DropObjectsRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -2507,14 +2663,25 @@ func (x *DropObjectsResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -2788,14 +2955,25 @@ func (x *ListShardsRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -2936,10 +3114,16 @@ func (x *ListShardsResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"shards\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shards\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Shards { if i != 0 { @@ -3124,14 +3308,25 @@ func (x *ListShardsResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -3306,26 +3501,51 @@ func (x *SetShardModeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"shardID\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Shard_ID { if i != 0 { out.RawByte(',') } - out.Base64Bytes(x.Shard_ID[i]) + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } } out.RawByte(']') } { - const prefix string = ",\"mode\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"mode\":" out.RawString(prefix) - out.Int32(int32(x.Mode)) + v := int32(x.Mode) + if vv, ok := ShardMode_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } } { - const prefix string = ",\"resetErrorCounter\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"resetErrorCounter\":" out.RawString(prefix) out.Bool(x.ResetErrorCounter) } @@ -3363,7 +3583,13 @@ func (x *SetShardModeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { var list [][]byte in.Delim('[') for !in.IsDelim(']') { - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } list = append(list, f) in.WantComma() } @@ -3531,14 +3757,25 @@ func (x *SetShardModeRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -3812,14 +4049,25 @@ func (x *SetShardModeResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -3994,21 +4242,43 @@ func (x *SynchronizeTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"containerId\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.ContainerId) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } } { - const prefix string = ",\"treeId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" out.RawString(prefix) out.String(x.TreeId) } { - const prefix string = ",\"height\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"height\":" out.RawString(prefix) - out.Uint64(x.Height) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10) + out.RawByte('"') } out.RawByte('}') } @@ -4041,7 +4311,13 @@ func (x *SynchronizeTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "containerId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ContainerId = f } case "treeId": @@ -4053,7 +4329,15 @@ func (x *SynchronizeTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "height": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.Height = f } } @@ -4189,14 +4473,25 @@ func (x *SynchronizeTreeRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -4470,14 +4765,25 @@ func (x *SynchronizeTreeResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -4632,21 +4938,36 @@ func (x *EvacuateShardRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"shardID\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Shard_ID { if i != 0 { out.RawByte(',') } - out.Base64Bytes(x.Shard_ID[i]) + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } } out.RawByte(']') } { - const prefix string = ",\"ignoreErrors\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"ignoreErrors\":" out.RawString(prefix) out.Bool(x.IgnoreErrors) } @@ -4684,7 +5005,13 @@ func (x *EvacuateShardRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { var list [][]byte in.Delim('[') for !in.IsDelim(']') { - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } list = append(list, f) in.WantComma() } @@ -4830,14 +5157,25 @@ func (x *EvacuateShardRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -4972,10 +5310,16 @@ func (x *EvacuateShardResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"count\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"count\":" + out.RawString(prefix) out.Uint32(x.Count) } out.RawByte('}') @@ -5009,7 +5353,15 @@ func (x *EvacuateShardResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "count": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.Count = f } } @@ -5145,14 +5497,25 @@ func (x *EvacuateShardResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -5307,21 +5670,36 @@ func (x *FlushCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"shardID\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Shard_ID { if i != 0 { out.RawByte(',') } - out.Base64Bytes(x.Shard_ID[i]) + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } } out.RawByte(']') } { - const prefix string = ",\"seal\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"seal\":" out.RawString(prefix) out.Bool(x.Seal) } @@ -5359,7 +5737,13 @@ func (x *FlushCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { var list [][]byte in.Delim('[') for !in.IsDelim(']') { - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } list = append(list, f) in.WantComma() } @@ -5505,14 +5889,25 @@ func (x *FlushCacheRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -5786,14 +6181,25 @@ func (x *FlushCacheResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -5948,14 +6354,25 @@ func (x *DoctorRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"concurrency\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"concurrency\":" + out.RawString(prefix) out.Uint32(x.Concurrency) } { - const prefix string = ",\"removeDuplicates\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"removeDuplicates\":" out.RawString(prefix) out.Bool(x.RemoveDuplicates) } @@ -5990,7 +6407,15 @@ func (x *DoctorRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "concurrency": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.Concurrency = f } case "removeDuplicates": @@ -6132,14 +6557,25 @@ func (x *DoctorRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -6413,14 +6849,25 @@ func (x *DoctorResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -6690,41 +7137,76 @@ func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer) out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"shardID\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Shard_ID { if i != 0 { out.RawByte(',') } - out.Base64Bytes(x.Shard_ID[i]) + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } } out.RawByte(']') } { - const prefix string = ",\"ignoreErrors\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"ignoreErrors\":" out.RawString(prefix) out.Bool(x.IgnoreErrors) } { - const prefix string = ",\"scope\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"scope\":" out.RawString(prefix) out.Uint32(x.Scope) } { - const prefix string = ",\"containerWorkerCount\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerWorkerCount\":" out.RawString(prefix) out.Uint32(x.ContainerWorkerCount) } { - const prefix string = ",\"objectWorkerCount\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"objectWorkerCount\":" out.RawString(prefix) out.Uint32(x.ObjectWorkerCount) } { - const prefix string = ",\"repOneOnly\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"repOneOnly\":" out.RawString(prefix) out.Bool(x.RepOneOnly) } @@ -6762,7 +7244,13 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { var list [][]byte in.Delim('[') for !in.IsDelim(']') { - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } list = append(list, f) in.WantComma() } @@ -6778,19 +7266,43 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "scope": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.Scope = f } case "containerWorkerCount": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.ContainerWorkerCount = f } case "objectWorkerCount": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.ObjectWorkerCount = f } case "repOneOnly": @@ -6932,14 +7444,25 @@ func (x *StartShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -7213,14 +7736,25 @@ func (x *StartShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -7494,14 +8028,25 @@ func (x *GetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -7671,11 +8216,19 @@ func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) MarshalEasyJSON(ou out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"value\":" - out.RawString(prefix[1:]) - out.Int64(x.Value) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"value\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Value, 10) + out.RawByte('"') } out.RawByte('}') } @@ -7708,7 +8261,15 @@ func (x *GetShardEvacuationStatusResponse_Body_UnixTimestamp) UnmarshalEasyJSON( case "value": { var f int64 - f = in.Int64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseInt(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := int64(v) + f = pv x.Value = f } } @@ -7800,11 +8361,19 @@ func (x *GetShardEvacuationStatusResponse_Body_Duration) MarshalEasyJSON(out *jw out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"seconds\":" - out.RawString(prefix[1:]) - out.Int64(x.Seconds) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"seconds\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendInt(out.Buffer.Buf, x.Seconds, 10) + out.RawByte('"') } out.RawByte('}') } @@ -7837,7 +8406,15 @@ func (x *GetShardEvacuationStatusResponse_Body_Duration) UnmarshalEasyJSON(in *j case "seconds": { var f int64 - f = in.Int64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseInt(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := int64(v) + f = pv x.Seconds = f } } @@ -8155,73 +8732,157 @@ func (x *GetShardEvacuationStatusResponse_Body) MarshalEasyJSON(out *jwriter.Wri out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"totalObjects\":" - out.RawString(prefix[1:]) - out.Uint64(x.TotalObjects) - } - { - const prefix string = ",\"evacuatedObjects\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"totalObjects\":" out.RawString(prefix) - out.Uint64(x.EvacuatedObjects) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalObjects, 10) + out.RawByte('"') } { - const prefix string = ",\"failedObjects\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"evacuatedObjects\":" out.RawString(prefix) - out.Uint64(x.FailedObjects) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedObjects, 10) + out.RawByte('"') } { - const prefix string = ",\"shardID\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"failedObjects\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedObjects, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" out.RawString(prefix) out.RawByte('[') for i := range x.Shard_ID { if i != 0 { out.RawByte(',') } - out.Base64Bytes(x.Shard_ID[i]) + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } } out.RawByte(']') } { - const prefix string = ",\"status\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"status\":" out.RawString(prefix) - out.Int32(int32(x.Status)) + v := int32(x.Status) + if vv, ok := GetShardEvacuationStatusResponse_Body_Status_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } } { - const prefix string = ",\"duration\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"duration\":" out.RawString(prefix) x.Duration.MarshalEasyJSON(out) } { - const prefix string = ",\"startedAt\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"startedAt\":" out.RawString(prefix) x.StartedAt.MarshalEasyJSON(out) } { - const prefix string = ",\"errorMessage\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"errorMessage\":" out.RawString(prefix) out.String(x.ErrorMessage) } { - const prefix string = ",\"skippedObjects\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"skippedObjects\":" out.RawString(prefix) - out.Uint64(x.SkippedObjects) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.SkippedObjects, 10) + out.RawByte('"') } { - const prefix string = ",\"totalTrees\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"totalTrees\":" out.RawString(prefix) - out.Uint64(x.TotalTrees) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.TotalTrees, 10) + out.RawByte('"') } { - const prefix string = ",\"evacuatedTrees\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"evacuatedTrees\":" out.RawString(prefix) - out.Uint64(x.EvacuatedTrees) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.EvacuatedTrees, 10) + out.RawByte('"') } { - const prefix string = ",\"failedTrees\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"failedTrees\":" out.RawString(prefix) - out.Uint64(x.FailedTrees) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.FailedTrees, 10) + out.RawByte('"') } out.RawByte('}') } @@ -8254,19 +8915,43 @@ func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lex case "totalObjects": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.TotalObjects = f } case "evacuatedObjects": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.EvacuatedObjects = f } case "failedObjects": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.FailedObjects = f } case "shardID": @@ -8275,7 +8960,13 @@ func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lex var list [][]byte in.Delim('[') for !in.IsDelim(']') { - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } list = append(list, f) in.WantComma() } @@ -8327,25 +9018,57 @@ func (x *GetShardEvacuationStatusResponse_Body) UnmarshalEasyJSON(in *jlexer.Lex case "skippedObjects": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.SkippedObjects = f } case "totalTrees": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.TotalTrees = f } case "evacuatedTrees": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.EvacuatedTrees = f } case "failedTrees": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.FailedTrees = f } } @@ -8481,14 +9204,25 @@ func (x *GetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer) out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -8762,14 +9496,25 @@ func (x *ResetShardEvacuationStatusRequest) MarshalEasyJSON(out *jwriter.Writer) out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -9043,14 +9788,25 @@ func (x *ResetShardEvacuationStatusResponse) MarshalEasyJSON(out *jwriter.Writer out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -9324,14 +10080,25 @@ func (x *StopShardEvacuationRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -9605,14 +10372,25 @@ func (x *StopShardEvacuationResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -9770,16 +10548,31 @@ func (x *AddChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"target\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"target\":" + out.RawString(prefix) x.Target.MarshalEasyJSON(out) } { - const prefix string = ",\"chain\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chain\":" out.RawString(prefix) - out.Base64Bytes(x.Chain) + if x.Chain != nil { + out.Base64Bytes(x.Chain) + } else { + out.String("") + } } out.RawByte('}') } @@ -9819,7 +10612,13 @@ func (x *AddChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) case "chain": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Chain = f } } @@ -9955,14 +10754,25 @@ func (x *AddChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -10097,11 +10907,21 @@ func (x *AddChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"chainId\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.ChainId) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chainId\":" + out.RawString(prefix) + if x.ChainId != nil { + out.Base64Bytes(x.ChainId) + } else { + out.String("") + } } out.RawByte('}') } @@ -10134,7 +10954,13 @@ func (x *AddChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) case "chainId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ChainId = f } } @@ -10270,14 +11096,25 @@ func (x *AddChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -10435,16 +11272,31 @@ func (x *GetChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writer) out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"target\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"target\":" + out.RawString(prefix) x.Target.MarshalEasyJSON(out) } { - const prefix string = ",\"chainId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chainId\":" out.RawString(prefix) - out.Base64Bytes(x.ChainId) + if x.ChainId != nil { + out.Base64Bytes(x.ChainId) + } else { + out.String("") + } } out.RawByte('}') } @@ -10484,7 +11336,13 @@ func (x *GetChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) case "chainId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ChainId = f } } @@ -10620,14 +11478,25 @@ func (x *GetChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -10762,11 +11631,21 @@ func (x *GetChainLocalOverrideResponse_Body) MarshalEasyJSON(out *jwriter.Writer out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"chain\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.Chain) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chain\":" + out.RawString(prefix) + if x.Chain != nil { + out.Base64Bytes(x.Chain) + } else { + out.String("") + } } out.RawByte('}') } @@ -10799,7 +11678,13 @@ func (x *GetChainLocalOverrideResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) case "chain": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Chain = f } } @@ -10935,14 +11820,25 @@ func (x *GetChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -11080,10 +11976,16 @@ func (x *ListChainLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Write out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"target\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"target\":" + out.RawString(prefix) x.Target.MarshalEasyJSON(out) } out.RawByte('}') @@ -11254,14 +12156,25 @@ func (x *ListChainLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -11396,16 +12309,26 @@ func (x *ListChainLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Writ out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"chains\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chains\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Chains { if i != 0 { out.RawByte(',') } - out.Base64Bytes(x.Chains[i]) + if x.Chains[i] != nil { + out.Base64Bytes(x.Chains[i]) + } else { + out.String("") + } } out.RawByte(']') } @@ -11443,7 +12366,13 @@ func (x *ListChainLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexe var list [][]byte in.Delim('[') for !in.IsDelim(']') { - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } list = append(list, f) in.WantComma() } @@ -11583,14 +12512,25 @@ func (x *ListChainLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -11725,10 +12665,16 @@ func (x *ListTargetsLocalOverridesRequest_Body) MarshalEasyJSON(out *jwriter.Wri out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"chainName\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chainName\":" + out.RawString(prefix) out.String(x.ChainName) } out.RawByte('}') @@ -11898,14 +12844,25 @@ func (x *ListTargetsLocalOverridesRequest) MarshalEasyJSON(out *jwriter.Writer) out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -12046,10 +13003,16 @@ func (x *ListTargetsLocalOverridesResponse_Body) MarshalEasyJSON(out *jwriter.Wr out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"targets\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"targets\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Targets { if i != 0 { @@ -12234,14 +13197,25 @@ func (x *ListTargetsLocalOverridesResponse) MarshalEasyJSON(out *jwriter.Writer) out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -12399,16 +13373,31 @@ func (x *RemoveChainLocalOverrideRequest_Body) MarshalEasyJSON(out *jwriter.Writ out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"target\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"target\":" + out.RawString(prefix) x.Target.MarshalEasyJSON(out) } { - const prefix string = ",\"chainId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"chainId\":" out.RawString(prefix) - out.Base64Bytes(x.ChainId) + if x.ChainId != nil { + out.Base64Bytes(x.ChainId) + } else { + out.String("") + } } out.RawByte('}') } @@ -12448,7 +13437,13 @@ func (x *RemoveChainLocalOverrideRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexe case "chainId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ChainId = f } } @@ -12584,14 +13579,25 @@ func (x *RemoveChainLocalOverrideRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -12865,14 +13871,25 @@ func (x *RemoveChainLocalOverrideResponse) MarshalEasyJSON(out *jwriter.Writer) out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -13010,10 +14027,16 @@ func (x *RemoveChainLocalOverridesByTargetRequest_Body) MarshalEasyJSON(out *jwr out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"target\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"target\":" + out.RawString(prefix) x.Target.MarshalEasyJSON(out) } out.RawByte('}') @@ -13184,14 +14207,25 @@ func (x *RemoveChainLocalOverridesByTargetRequest) MarshalEasyJSON(out *jwriter. out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -13465,14 +14499,25 @@ func (x *RemoveChainLocalOverridesByTargetResponse) MarshalEasyJSON(out *jwriter out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -13687,36 +14732,66 @@ func (x *SealWriteCacheRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"shardID\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Shard_ID { if i != 0 { out.RawByte(',') } - out.Base64Bytes(x.Shard_ID[i]) + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } } out.RawByte(']') } { - const prefix string = ",\"ignoreErrors\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"ignoreErrors\":" out.RawString(prefix) out.Bool(x.IgnoreErrors) } { - const prefix string = ",\"async\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"async\":" out.RawString(prefix) out.Bool(x.Async) } { - const prefix string = ",\"restoreMode\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"restoreMode\":" out.RawString(prefix) out.Bool(x.RestoreMode) } { - const prefix string = ",\"shrink\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shrink\":" out.RawString(prefix) out.Bool(x.Shrink) } @@ -13754,7 +14829,13 @@ func (x *SealWriteCacheRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { var list [][]byte in.Delim('[') for !in.IsDelim(']') { - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } list = append(list, f) in.WantComma() } @@ -13918,14 +14999,25 @@ func (x *SealWriteCacheRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -14100,19 +15192,39 @@ func (x *SealWriteCacheResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"shardID\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.Shard_ID) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + if x.Shard_ID != nil { + out.Base64Bytes(x.Shard_ID) + } else { + out.String("") + } } { - const prefix string = ",\"success\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"success\":" out.RawString(prefix) out.Bool(x.Success) } { - const prefix string = ",\"error\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"error\":" out.RawString(prefix) out.String(x.Error) } @@ -14147,7 +15259,13 @@ func (x *SealWriteCacheResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer) case "shardID": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Shard_ID = f } case "success": @@ -14257,10 +15375,16 @@ func (x *SealWriteCacheResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"results\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"results\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Results { if i != 0 { @@ -14445,14 +15569,25 @@ func (x *SealWriteCacheResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -14587,16 +15722,26 @@ func (x *DetachShardsRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"shardID\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Shard_ID { if i != 0 { out.RawByte(',') } - out.Base64Bytes(x.Shard_ID[i]) + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } } out.RawByte(']') } @@ -14634,7 +15779,13 @@ func (x *DetachShardsRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { var list [][]byte in.Delim('[') for !in.IsDelim(']') { - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } list = append(list, f) in.WantComma() } @@ -14774,14 +15925,25 @@ func (x *DetachShardsRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -15055,14 +16217,25 @@ func (x *DetachShardsResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -15237,26 +16410,46 @@ func (x *StartShardRebuildRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"shardID\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Shard_ID { if i != 0 { out.RawByte(',') } - out.Base64Bytes(x.Shard_ID[i]) + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } } out.RawByte(']') } { - const prefix string = ",\"targetFillPercent\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"targetFillPercent\":" out.RawString(prefix) out.Uint32(x.TargetFillPercent) } { - const prefix string = ",\"concurrencyLimit\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"concurrencyLimit\":" out.RawString(prefix) out.Uint32(x.ConcurrencyLimit) } @@ -15294,7 +16487,13 @@ func (x *StartShardRebuildRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { var list [][]byte in.Delim('[') for !in.IsDelim(']') { - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } list = append(list, f) in.WantComma() } @@ -15304,13 +16503,29 @@ func (x *StartShardRebuildRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "targetFillPercent": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.TargetFillPercent = f } case "concurrencyLimit": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.ConcurrencyLimit = f } } @@ -15446,14 +16661,25 @@ func (x *StartShardRebuildRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -15628,19 +16854,39 @@ func (x *StartShardRebuildResponse_Body_Status) MarshalEasyJSON(out *jwriter.Wri out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"shardID\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.Shard_ID) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + if x.Shard_ID != nil { + out.Base64Bytes(x.Shard_ID) + } else { + out.String("") + } } { - const prefix string = ",\"success\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"success\":" out.RawString(prefix) out.Bool(x.Success) } { - const prefix string = ",\"error\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"error\":" out.RawString(prefix) out.String(x.Error) } @@ -15675,7 +16921,13 @@ func (x *StartShardRebuildResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lex case "shardID": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Shard_ID = f } case "success": @@ -15785,10 +17037,16 @@ func (x *StartShardRebuildResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"results\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"results\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Results { if i != 0 { @@ -15973,14 +17231,25 @@ func (x *StartShardRebuildResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go index 50984f15a..69d87292d 100644 --- a/pkg/services/control/types_frostfs.pb.go +++ b/pkg/services/control/types_frostfs.pb.go @@ -234,16 +234,35 @@ func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"key\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.Key) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"key\":" + out.RawString(prefix) + if x.Key != nil { + out.Base64Bytes(x.Key) + } else { + out.String("") + } } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) - out.Base64Bytes(x.Sign) + if x.Sign != nil { + out.Base64Bytes(x.Sign) + } else { + out.String("") + } } out.RawByte('}') } @@ -276,13 +295,25 @@ func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) { case "key": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Key = f } case "signature": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Sign = f } } @@ -414,19 +445,35 @@ func (x *NodeInfo_Attribute) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"key\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"key\":" + out.RawString(prefix) out.String(x.Key) } { - const prefix string = ",\"value\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"value\":" out.RawString(prefix) out.String(x.Value) } { - const prefix string = ",\"parents\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parents\":" out.RawString(prefix) out.RawByte('[') for i := range x.Parents { @@ -645,14 +692,29 @@ func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"publicKey\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.PublicKey) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"publicKey\":" + out.RawString(prefix) + if x.PublicKey != nil { + out.Base64Bytes(x.PublicKey) + } else { + out.String("") + } } { - const prefix string = ",\"addresses\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"addresses\":" out.RawString(prefix) out.RawByte('[') for i := range x.Addresses { @@ -664,7 +726,12 @@ func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) { out.RawByte(']') } { - const prefix string = ",\"attributes\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"attributes\":" out.RawString(prefix) out.RawByte('[') for i := range x.Attributes { @@ -676,9 +743,19 @@ func (x *NodeInfo) MarshalEasyJSON(out *jwriter.Writer) { out.RawByte(']') } { - const prefix string = ",\"state\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"state\":" out.RawString(prefix) - out.Int32(int32(x.State)) + v := int32(x.State) + if vv, ok := NetmapStatus_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } } out.RawByte('}') } @@ -711,7 +788,13 @@ func (x *NodeInfo) UnmarshalEasyJSON(in *jlexer.Lexer) { case "publicKey": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.PublicKey = f } case "addresses": @@ -878,14 +961,27 @@ func (x *Netmap) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"epoch\":" - out.RawString(prefix[1:]) - out.Uint64(x.Epoch) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"epoch\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Epoch, 10) + out.RawByte('"') } { - const prefix string = ",\"nodes\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodes\":" out.RawString(prefix) out.RawByte('[') for i := range x.Nodes { @@ -927,7 +1023,15 @@ func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) { case "epoch": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.Epoch = f } case "nodes": @@ -1179,19 +1283,39 @@ func (x *ShardInfo) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"shardID\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.Shard_ID) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + if x.Shard_ID != nil { + out.Base64Bytes(x.Shard_ID) + } else { + out.String("") + } } { - const prefix string = ",\"metabasePath\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"metabasePath\":" out.RawString(prefix) out.String(x.MetabasePath) } { - const prefix string = ",\"blobstor\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"blobstor\":" out.RawString(prefix) out.RawByte('[') for i := range x.Blobstor { @@ -1203,27 +1327,57 @@ func (x *ShardInfo) MarshalEasyJSON(out *jwriter.Writer) { out.RawByte(']') } { - const prefix string = ",\"writecachePath\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"writecachePath\":" out.RawString(prefix) out.String(x.WritecachePath) } { - const prefix string = ",\"mode\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"mode\":" out.RawString(prefix) - out.Int32(int32(x.Mode)) + v := int32(x.Mode) + if vv, ok := ShardMode_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } } { - const prefix string = ",\"errorCount\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"errorCount\":" out.RawString(prefix) out.Uint32(x.ErrorCount) } { - const prefix string = ",\"piloramaPath\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"piloramaPath\":" out.RawString(prefix) out.String(x.PiloramaPath) } { - const prefix string = ",\"evacuationInProgress\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"evacuationInProgress\":" out.RawString(prefix) out.Bool(x.EvacuationInProgress) } @@ -1258,7 +1412,13 @@ func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) { case "shardID": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Shard_ID = f } case "metabasePath": @@ -1312,7 +1472,15 @@ func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) { case "errorCount": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.ErrorCount = f } case "piloramaPath": @@ -1436,14 +1604,25 @@ func (x *BlobstorInfo) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"path\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"path\":" + out.RawString(prefix) out.String(x.Path) } { - const prefix string = ",\"type\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"type\":" out.RawString(prefix) out.String(x.Type) } @@ -1637,14 +1816,30 @@ func (x *ChainTarget) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"type\":" - out.RawString(prefix[1:]) - out.Int32(int32(x.Type)) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"type\":" + out.RawString(prefix) + v := int32(x.Type) + if vv, ok := ChainTarget_TargetType_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } } { - const prefix string = ",\"Name\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"Name\":" out.RawString(prefix) out.String(x.Name) } From 764450d04a38bedc3d1b38a098a7546983fcdf93 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Thu, 7 Nov 2024 17:51:33 +0300 Subject: [PATCH 147/591] [#1479] tree: Regenerate service protobufs Signed-off-by: Airat Arifullin --- pkg/services/tree/service_frostfs.pb.go | 1252 +++++++++++++++++++---- pkg/services/tree/types_frostfs.pb.go | 143 ++- 2 files changed, 1172 insertions(+), 223 deletions(-) diff --git a/pkg/services/tree/service_frostfs.pb.go b/pkg/services/tree/service_frostfs.pb.go index 05076ee03..88d002621 100644 --- a/pkg/services/tree/service_frostfs.pb.go +++ b/pkg/services/tree/service_frostfs.pb.go @@ -181,24 +181,51 @@ func (x *AddRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"containerId\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.ContainerId) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } } { - const prefix string = ",\"treeId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" out.RawString(prefix) out.String(x.TreeId) } { - const prefix string = ",\"parentId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parentId\":" out.RawString(prefix) - out.Uint64(x.ParentId) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) + out.RawByte('"') } { - const prefix string = ",\"meta\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"meta\":" out.RawString(prefix) out.RawByte('[') for i := range x.Meta { @@ -210,9 +237,18 @@ func (x *AddRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawByte(']') } { - const prefix string = ",\"bearerToken\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"bearerToken\":" out.RawString(prefix) - out.Base64Bytes(x.BearerToken) + if x.BearerToken != nil { + out.Base64Bytes(x.BearerToken) + } else { + out.String("") + } } out.RawByte('}') } @@ -245,7 +281,13 @@ func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "containerId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ContainerId = f } case "treeId": @@ -257,7 +299,15 @@ func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "parentId": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.ParentId = f } case "meta": @@ -277,7 +327,13 @@ func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "bearerToken": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.BearerToken = f } } @@ -413,14 +469,25 @@ func (x *AddRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -555,11 +622,19 @@ func (x *AddResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"nodeId\":" - out.RawString(prefix[1:]) - out.Uint64(x.NodeId) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodeId\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10) + out.RawByte('"') } out.RawByte('}') } @@ -592,7 +667,15 @@ func (x *AddResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "nodeId": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.NodeId = f } } @@ -728,14 +811,25 @@ func (x *AddResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -976,24 +1070,49 @@ func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"containerId\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.ContainerId) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } } { - const prefix string = ",\"treeId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" out.RawString(prefix) out.String(x.TreeId) } { - const prefix string = ",\"pathAttribute\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"pathAttribute\":" out.RawString(prefix) out.String(x.PathAttribute) } { - const prefix string = ",\"path\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"path\":" out.RawString(prefix) out.RawByte('[') for i := range x.Path { @@ -1005,7 +1124,12 @@ func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawByte(']') } { - const prefix string = ",\"meta\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"meta\":" out.RawString(prefix) out.RawByte('[') for i := range x.Meta { @@ -1017,9 +1141,18 @@ func (x *AddByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawByte(']') } { - const prefix string = ",\"bearerToken\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"bearerToken\":" out.RawString(prefix) - out.Base64Bytes(x.BearerToken) + if x.BearerToken != nil { + out.Base64Bytes(x.BearerToken) + } else { + out.String("") + } } out.RawByte('}') } @@ -1052,7 +1185,13 @@ func (x *AddByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "containerId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ContainerId = f } case "treeId": @@ -1097,7 +1236,13 @@ func (x *AddByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "bearerToken": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.BearerToken = f } } @@ -1233,14 +1378,25 @@ func (x *AddByPathRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -1397,23 +1553,38 @@ func (x *AddByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"nodes\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodes\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Nodes { if i != 0 { out.RawByte(',') } - out.Uint64(x.Nodes[i]) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Nodes[i], 10) + out.RawByte('"') } out.RawByte(']') } { - const prefix string = ",\"parentId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parentId\":" out.RawString(prefix) - out.Uint64(x.ParentId) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) + out.RawByte('"') } out.RawByte('}') } @@ -1449,7 +1620,15 @@ func (x *AddByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { var list []uint64 in.Delim('[') for !in.IsDelim(']') { - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv list = append(list, f) in.WantComma() } @@ -1459,7 +1638,15 @@ func (x *AddByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "parentId": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.ParentId = f } } @@ -1595,14 +1782,25 @@ func (x *AddByPathResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -1797,26 +1995,57 @@ func (x *RemoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"containerId\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.ContainerId) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } } { - const prefix string = ",\"treeId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" out.RawString(prefix) out.String(x.TreeId) } { - const prefix string = ",\"nodeId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodeId\":" out.RawString(prefix) - out.Uint64(x.NodeId) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10) + out.RawByte('"') } { - const prefix string = ",\"bearerToken\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"bearerToken\":" out.RawString(prefix) - out.Base64Bytes(x.BearerToken) + if x.BearerToken != nil { + out.Base64Bytes(x.BearerToken) + } else { + out.String("") + } } out.RawByte('}') } @@ -1849,7 +2078,13 @@ func (x *RemoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "containerId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ContainerId = f } case "treeId": @@ -1861,13 +2096,27 @@ func (x *RemoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "nodeId": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.NodeId = f } case "bearerToken": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.BearerToken = f } } @@ -2003,14 +2252,25 @@ func (x *RemoveRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -2284,14 +2544,25 @@ func (x *RemoveResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -2532,29 +2803,63 @@ func (x *MoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"containerId\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.ContainerId) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } } { - const prefix string = ",\"treeId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" out.RawString(prefix) out.String(x.TreeId) } { - const prefix string = ",\"parentId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parentId\":" out.RawString(prefix) - out.Uint64(x.ParentId) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) + out.RawByte('"') } { - const prefix string = ",\"nodeId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodeId\":" out.RawString(prefix) - out.Uint64(x.NodeId) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10) + out.RawByte('"') } { - const prefix string = ",\"meta\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"meta\":" out.RawString(prefix) out.RawByte('[') for i := range x.Meta { @@ -2566,9 +2871,18 @@ func (x *MoveRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawByte(']') } { - const prefix string = ",\"bearerToken\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"bearerToken\":" out.RawString(prefix) - out.Base64Bytes(x.BearerToken) + if x.BearerToken != nil { + out.Base64Bytes(x.BearerToken) + } else { + out.String("") + } } out.RawByte('}') } @@ -2601,7 +2915,13 @@ func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "containerId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ContainerId = f } case "treeId": @@ -2613,13 +2933,29 @@ func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "parentId": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.ParentId = f } case "nodeId": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.NodeId = f } case "meta": @@ -2639,7 +2975,13 @@ func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "bearerToken": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.BearerToken = f } } @@ -2775,14 +3117,25 @@ func (x *MoveRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -3056,14 +3409,25 @@ func (x *MoveResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -3338,24 +3702,49 @@ func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"containerId\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.ContainerId) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } } { - const prefix string = ",\"treeId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" out.RawString(prefix) out.String(x.TreeId) } { - const prefix string = ",\"pathAttribute\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"pathAttribute\":" out.RawString(prefix) out.String(x.PathAttribute) } { - const prefix string = ",\"path\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"path\":" out.RawString(prefix) out.RawByte('[') for i := range x.Path { @@ -3367,7 +3756,12 @@ func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawByte(']') } { - const prefix string = ",\"attributes\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"attributes\":" out.RawString(prefix) out.RawByte('[') for i := range x.Attributes { @@ -3379,19 +3773,38 @@ func (x *GetNodeByPathRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawByte(']') } { - const prefix string = ",\"latestOnly\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"latestOnly\":" out.RawString(prefix) out.Bool(x.LatestOnly) } { - const prefix string = ",\"allAttributes\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"allAttributes\":" out.RawString(prefix) out.Bool(x.AllAttributes) } { - const prefix string = ",\"bearerToken\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"bearerToken\":" out.RawString(prefix) - out.Base64Bytes(x.BearerToken) + if x.BearerToken != nil { + out.Base64Bytes(x.BearerToken) + } else { + out.String("") + } } out.RawByte('}') } @@ -3424,7 +3837,13 @@ func (x *GetNodeByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "containerId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ContainerId = f } case "treeId": @@ -3480,7 +3899,13 @@ func (x *GetNodeByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "bearerToken": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.BearerToken = f } } @@ -3616,14 +4041,25 @@ func (x *GetNodeByPathRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -3824,19 +4260,39 @@ func (x *GetNodeByPathResponse_Info) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"nodeId\":" - out.RawString(prefix[1:]) - out.Uint64(x.NodeId) - } - { - const prefix string = ",\"timestamp\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodeId\":" out.RawString(prefix) - out.Uint64(x.Timestamp) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId, 10) + out.RawByte('"') } { - const prefix string = ",\"meta\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"timestamp\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp, 10) + out.RawByte('"') + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"meta\":" out.RawString(prefix) out.RawByte('[') for i := range x.Meta { @@ -3848,9 +4304,16 @@ func (x *GetNodeByPathResponse_Info) MarshalEasyJSON(out *jwriter.Writer) { out.RawByte(']') } { - const prefix string = ",\"parentId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parentId\":" out.RawString(prefix) - out.Uint64(x.ParentId) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) + out.RawByte('"') } out.RawByte('}') } @@ -3883,13 +4346,29 @@ func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) { case "nodeId": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.NodeId = f } case "timestamp": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.Timestamp = f } case "meta": @@ -3909,7 +4388,15 @@ func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) { case "parentId": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.ParentId = f } } @@ -4007,10 +4494,16 @@ func (x *GetNodeByPathResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"nodes\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodes\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Nodes { if i != 0 { @@ -4195,14 +4688,25 @@ func (x *GetNodeByPathResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -4369,11 +4873,22 @@ func (x *GetSubTreeRequest_Body_Order) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"direction\":" - out.RawString(prefix[1:]) - out.Int32(int32(x.Direction)) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"direction\":" + out.RawString(prefix) + v := int32(x.Direction) + if vv, ok := GetSubTreeRequest_Body_Order_Direction_name[v]; ok { + out.String(vv) + } else { + out.Int32(v) + } } out.RawByte('}') } @@ -4619,41 +5134,82 @@ func (x *GetSubTreeRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"containerId\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.ContainerId) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } } { - const prefix string = ",\"treeId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" out.RawString(prefix) out.String(x.TreeId) } { - const prefix string = ",\"rootId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"rootId\":" out.RawString(prefix) out.RawByte('[') for i := range x.RootId { if i != 0 { out.RawByte(',') } - out.Uint64(x.RootId[i]) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.RootId[i], 10) + out.RawByte('"') } out.RawByte(']') } { - const prefix string = ",\"depth\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"depth\":" out.RawString(prefix) out.Uint32(x.Depth) } { - const prefix string = ",\"bearerToken\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"bearerToken\":" out.RawString(prefix) - out.Base64Bytes(x.BearerToken) + if x.BearerToken != nil { + out.Base64Bytes(x.BearerToken) + } else { + out.String("") + } } { - const prefix string = ",\"orderBy\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"orderBy\":" out.RawString(prefix) x.OrderBy.MarshalEasyJSON(out) } @@ -4688,7 +5244,13 @@ func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "containerId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ContainerId = f } case "treeId": @@ -4703,7 +5265,15 @@ func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { var list []uint64 in.Delim('[') for !in.IsDelim(']') { - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv list = append(list, f) in.WantComma() } @@ -4713,13 +5283,27 @@ func (x *GetSubTreeRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "depth": { var f uint32 - f = in.Uint32() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 32) + if err != nil { + in.AddError(err) + return + } + pv := uint32(v) + f = pv x.Depth = f } case "bearerToken": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.BearerToken = f } case "orderBy": @@ -4862,14 +5446,25 @@ func (x *GetSubTreeRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -5076,45 +5671,72 @@ func (x *GetSubTreeResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"nodeId\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"nodeId\":" + out.RawString(prefix) out.RawByte('[') for i := range x.NodeId { if i != 0 { out.RawByte(',') } - out.Uint64(x.NodeId[i]) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.NodeId[i], 10) + out.RawByte('"') } out.RawByte(']') } { - const prefix string = ",\"parentId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parentId\":" out.RawString(prefix) out.RawByte('[') for i := range x.ParentId { if i != 0 { out.RawByte(',') } - out.Uint64(x.ParentId[i]) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId[i], 10) + out.RawByte('"') } out.RawByte(']') } { - const prefix string = ",\"timestamp\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"timestamp\":" out.RawString(prefix) out.RawByte('[') for i := range x.Timestamp { if i != 0 { out.RawByte(',') } - out.Uint64(x.Timestamp[i]) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Timestamp[i], 10) + out.RawByte('"') } out.RawByte(']') } { - const prefix string = ",\"meta\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"meta\":" out.RawString(prefix) out.RawByte('[') for i := range x.Meta { @@ -5159,7 +5781,15 @@ func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { var list []uint64 in.Delim('[') for !in.IsDelim(']') { - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv list = append(list, f) in.WantComma() } @@ -5172,7 +5802,15 @@ func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { var list []uint64 in.Delim('[') for !in.IsDelim(']') { - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv list = append(list, f) in.WantComma() } @@ -5185,7 +5823,15 @@ func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { var list []uint64 in.Delim('[') for !in.IsDelim(']') { - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv list = append(list, f) in.WantComma() } @@ -5339,14 +5985,25 @@ func (x *GetSubTreeResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -5481,11 +6138,21 @@ func (x *TreeListRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"containerId\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.ContainerId) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } } out.RawByte('}') } @@ -5518,7 +6185,13 @@ func (x *TreeListRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "containerId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ContainerId = f } } @@ -5654,14 +6327,25 @@ func (x *TreeListRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -5796,10 +6480,16 @@ func (x *TreeListResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"ids\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"ids\":" + out.RawString(prefix) out.RawByte('[') for i := range x.Ids { if i != 0 { @@ -5983,14 +6673,25 @@ func (x *TreeListResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -6168,19 +6869,39 @@ func (x *ApplyRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"containerId\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.ContainerId) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } } { - const prefix string = ",\"treeId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" out.RawString(prefix) out.String(x.TreeId) } { - const prefix string = ",\"operation\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"operation\":" out.RawString(prefix) x.Operation.MarshalEasyJSON(out) } @@ -6215,7 +6936,13 @@ func (x *ApplyRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "containerId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ContainerId = f } case "treeId": @@ -6364,14 +7091,25 @@ func (x *ApplyRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -6645,14 +7383,25 @@ func (x *ApplyResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -6847,26 +7596,55 @@ func (x *GetOpLogRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"containerId\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.ContainerId) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + if x.ContainerId != nil { + out.Base64Bytes(x.ContainerId) + } else { + out.String("") + } } { - const prefix string = ",\"treeId\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"treeId\":" out.RawString(prefix) out.String(x.TreeId) } { - const prefix string = ",\"height\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"height\":" out.RawString(prefix) - out.Uint64(x.Height) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Height, 10) + out.RawByte('"') } { - const prefix string = ",\"count\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"count\":" out.RawString(prefix) - out.Uint64(x.Count) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.Count, 10) + out.RawByte('"') } out.RawByte('}') } @@ -6899,7 +7677,13 @@ func (x *GetOpLogRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "containerId": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.ContainerId = f } case "treeId": @@ -6911,13 +7695,29 @@ func (x *GetOpLogRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { case "height": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.Height = f } case "count": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.Count = f } } @@ -7053,14 +7853,25 @@ func (x *GetOpLogRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -7198,10 +8009,16 @@ func (x *GetOpLogResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"operation\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"operation\":" + out.RawString(prefix) x.Operation.MarshalEasyJSON(out) } out.RawByte('}') @@ -7372,14 +8189,25 @@ func (x *GetOpLogResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -7653,14 +8481,25 @@ func (x *HealthcheckResponse) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } @@ -7934,14 +8773,25 @@ func (x *HealthcheckRequest) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"body\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) x.Body.MarshalEasyJSON(out) } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) x.Signature.MarshalEasyJSON(out) } diff --git a/pkg/services/tree/types_frostfs.pb.go b/pkg/services/tree/types_frostfs.pb.go index 13f1a43be..2827b10a9 100644 --- a/pkg/services/tree/types_frostfs.pb.go +++ b/pkg/services/tree/types_frostfs.pb.go @@ -11,6 +11,7 @@ import ( easyproto "github.com/VictoriaMetrics/easyproto" jlexer "github.com/mailru/easyjson/jlexer" jwriter "github.com/mailru/easyjson/jwriter" + strconv "strconv" ) type KeyValue struct { @@ -113,16 +114,31 @@ func (x *KeyValue) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"key\":" - out.RawString(prefix[1:]) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"key\":" + out.RawString(prefix) out.String(x.Key) } { - const prefix string = ",\"value\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"value\":" out.RawString(prefix) - out.Base64Bytes(x.Value) + if x.Value != nil { + out.Base64Bytes(x.Value) + } else { + out.String("") + } } out.RawByte('}') } @@ -161,7 +177,13 @@ func (x *KeyValue) UnmarshalEasyJSON(in *jlexer.Lexer) { case "value": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Value = f } } @@ -293,21 +315,45 @@ func (x *LogMove) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"parentID\":" - out.RawString(prefix[1:]) - out.Uint64(x.ParentId) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"parentID\":" + out.RawString(prefix) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ParentId, 10) + out.RawByte('"') } { - const prefix string = ",\"meta\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"meta\":" out.RawString(prefix) - out.Base64Bytes(x.Meta) + if x.Meta != nil { + out.Base64Bytes(x.Meta) + } else { + out.String("") + } } { - const prefix string = ",\"childID\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"childID\":" out.RawString(prefix) - out.Uint64(x.ChildId) + out.RawByte('"') + out.Buffer.Buf = strconv.AppendUint(out.Buffer.Buf, x.ChildId, 10) + out.RawByte('"') } out.RawByte('}') } @@ -340,19 +386,41 @@ func (x *LogMove) UnmarshalEasyJSON(in *jlexer.Lexer) { case "parentID": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.ParentId = f } case "meta": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Meta = f } case "childID": { var f uint64 - f = in.Uint64() + r := in.JsonNumber() + n := r.String() + v, err := strconv.ParseUint(n, 10, 64) + if err != nil { + in.AddError(err) + return + } + pv := uint64(v) + f = pv x.ChildId = f } } @@ -464,16 +532,35 @@ func (x *Signature) MarshalEasyJSON(out *jwriter.Writer) { out.RawString("null") return } + first := true out.RawByte('{') { - const prefix string = ",\"key\":" - out.RawString(prefix[1:]) - out.Base64Bytes(x.Key) + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"key\":" + out.RawString(prefix) + if x.Key != nil { + out.Base64Bytes(x.Key) + } else { + out.String("") + } } { - const prefix string = ",\"signature\":" + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" out.RawString(prefix) - out.Base64Bytes(x.Sign) + if x.Sign != nil { + out.Base64Bytes(x.Sign) + } else { + out.String("") + } } out.RawByte('}') } @@ -506,13 +593,25 @@ func (x *Signature) UnmarshalEasyJSON(in *jlexer.Lexer) { case "key": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Key = f } case "signature": { var f []byte - f = in.Bytes() + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } x.Sign = f } } From b1a31281e4901e73d93a5690ccb9ac6ae3e2aa4f Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 8 Nov 2024 09:51:14 +0300 Subject: [PATCH 148/591] [#1480] ape: Remove SoftAPECheck flag Previous release was EACL-compatible. Starting from now all EACL should've been migrated to APE chains. Signed-off-by: Evgenii Stratonikov --- pkg/services/common/ape/checker.go | 5 +- pkg/services/object/acl/acl.go | 48 ------------ pkg/services/object/acl/acl_test.go | 89 ----------------------- pkg/services/object/acl/v2/errors.go | 7 -- pkg/services/object/acl/v2/errors_test.go | 10 --- pkg/services/object/acl/v2/request.go | 7 -- pkg/services/object/acl/v2/service.go | 67 ----------------- pkg/services/object/acl/v2/types.go | 11 --- pkg/services/object/ape/checker.go | 4 - pkg/services/object/ape/service.go | 12 --- pkg/services/object/request_context.go | 2 - pkg/services/tree/ape.go | 1 - 12 files changed, 1 insertion(+), 262 deletions(-) delete mode 100644 pkg/services/object/acl/acl_test.go diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go index 278f6da31..eb4fd03c7 100644 --- a/pkg/services/common/ape/checker.go +++ b/pkg/services/common/ape/checker.go @@ -44,9 +44,6 @@ type CheckPrm struct { // The request's bearer token. It is used in order to check APE overrides with the token. BearerToken *bearer.Token - - // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow. - SoftAPECheck bool } // CheckCore provides methods to perform the common logic of APE check. @@ -104,7 +101,7 @@ func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error { if err != nil { return err } - if !found && prm.SoftAPECheck || status == apechain.Allow { + if found && status == apechain.Allow { return nil } err = fmt.Errorf("access to operation %s is denied by access policy engine: %s", prm.Request.Operation(), status.String()) diff --git a/pkg/services/object/acl/acl.go b/pkg/services/object/acl/acl.go index 921545c8b..53ba652e1 100644 --- a/pkg/services/object/acl/acl.go +++ b/pkg/services/object/acl/acl.go @@ -3,7 +3,6 @@ package acl import ( "context" "crypto/ecdsa" - "crypto/elliptic" "errors" "fmt" "io" @@ -22,7 +21,6 @@ import ( objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" ) // Checker implements v2.ACLChecker interfaces and provides @@ -72,33 +70,6 @@ func NewChecker( } } -// CheckBasicACL is a main check function for basic ACL. -func (c *Checker) CheckBasicACL(info v2.RequestInfo) bool { - // check basic ACL permissions - return info.BasicACL().IsOpAllowed(info.Operation(), info.RequestRole()) -} - -// StickyBitCheck validates owner field in the request if sticky bit is enabled. -func (c *Checker) StickyBitCheck(info v2.RequestInfo, owner user.ID) bool { - // According to FrostFS specification sticky bit has no effect on system nodes - // for correct intra-container work with objects (in particular, replication). - if info.RequestRole() == acl.RoleContainer { - return true - } - - if !info.BasicACL().Sticky() { - return true - } - - if len(info.SenderKey()) == 0 { - return false - } - - requestSenderKey := unmarshalPublicKey(info.SenderKey()) - - return isOwnerFromKey(owner, requestSenderKey) -} - // CheckEACL is a main check function for extended ACL. func (c *Checker) CheckEACL(msg any, reqInfo v2.RequestInfo) error { basicACL := reqInfo.BasicACL() @@ -241,22 +212,3 @@ func isValidBearer(reqInfo v2.RequestInfo, st netmap.State) error { return nil } - -func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { - if key == nil { - return false - } - - var id2 user.ID - user.IDFromKey(&id2, (ecdsa.PublicKey)(*key)) - - return id.Equals(id2) -} - -func unmarshalPublicKey(bs []byte) *keys.PublicKey { - pub, err := keys.NewPublicKeyFromBytes(bs, elliptic.P256()) - if err != nil { - return nil - } - return pub -} diff --git a/pkg/services/object/acl/acl_test.go b/pkg/services/object/acl/acl_test.go deleted file mode 100644 index d63cb1285..000000000 --- a/pkg/services/object/acl/acl_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package acl - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" - "github.com/stretchr/testify/require" -) - -type emptyEACLSource struct{} - -func (e emptyEACLSource) GetEACL(_ cid.ID) (*container.EACL, error) { - return nil, nil -} - -type emptyNetmapState struct{} - -func (e emptyNetmapState) CurrentEpoch() uint64 { - return 0 -} - -func TestStickyCheck(t *testing.T) { - checker := NewChecker( - emptyNetmapState{}, - emptyEACLSource{}, - eaclSDK.NewValidator(), - &engine.StorageEngine{}) - - t.Run("system role", func(t *testing.T) { - var info v2.RequestInfo - - info.SetSenderKey(make([]byte, 33)) // any non-empty key - info.SetRequestRole(acl.RoleContainer) - - require.True(t, checker.StickyBitCheck(info, usertest.ID())) - - var basicACL acl.Basic - basicACL.MakeSticky() - - info.SetBasicACL(basicACL) - - require.True(t, checker.StickyBitCheck(info, usertest.ID())) - }) - - t.Run("owner ID and/or public key emptiness", func(t *testing.T) { - var info v2.RequestInfo - - info.SetRequestRole(acl.RoleOthers) // should be non-system role - - assertFn := func(isSticky, withKey, withOwner, expected bool) { - info := info - if isSticky { - var basicACL acl.Basic - basicACL.MakeSticky() - - info.SetBasicACL(basicACL) - } - - if withKey { - info.SetSenderKey(make([]byte, 33)) - } else { - info.SetSenderKey(nil) - } - - var ownerID user.ID - - if withOwner { - ownerID = usertest.ID() - } - - require.Equal(t, expected, checker.StickyBitCheck(info, ownerID)) - } - - assertFn(true, false, false, false) - assertFn(true, true, false, false) - assertFn(true, false, true, false) - assertFn(false, false, false, true) - assertFn(false, true, false, true) - assertFn(false, false, true, true) - assertFn(false, true, true, true) - }) -} diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go index 11b9e6e5f..e969d37fa 100644 --- a/pkg/services/object/acl/v2/errors.go +++ b/pkg/services/object/acl/v2/errors.go @@ -26,13 +26,6 @@ const ( accessDeniedEACLReasonFmt = "access to operation %s is denied by extended ACL check: %v" ) -func basicACLErr(info RequestInfo) error { - errAccessDenied := &apistatus.ObjectAccessDenied{} - errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedACLReasonFmt, info.operation)) - - return errAccessDenied -} - func eACLErr(info RequestInfo, err error) error { errAccessDenied := &apistatus.ObjectAccessDenied{} errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedEACLReasonFmt, info.operation, err)) diff --git a/pkg/services/object/acl/v2/errors_test.go b/pkg/services/object/acl/v2/errors_test.go index 2d2b7bc8d..3cc74e6aa 100644 --- a/pkg/services/object/acl/v2/errors_test.go +++ b/pkg/services/object/acl/v2/errors_test.go @@ -8,16 +8,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestBasicACLErr(t *testing.T) { - var reqInfo RequestInfo - err := basicACLErr(reqInfo) - - var errAccessDenied *apistatus.ObjectAccessDenied - - require.ErrorAs(t, err, &errAccessDenied, - "basicACLErr must be able to be casted to apistatus.ObjectAccessDenied") -} - func TestEACLErr(t *testing.T) { var reqInfo RequestInfo testErr := errors.New("test-eacl") diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go index e35cd2e11..8bd34ccb3 100644 --- a/pkg/services/object/acl/v2/request.go +++ b/pkg/services/object/acl/v2/request.go @@ -104,13 +104,6 @@ func (r RequestInfo) RequestRole() acl.Role { return r.requestRole } -// IsSoftAPECheck states if APE should perform soft checks. -// Soft APE check allows a request if CheckAPE returns NoRuleFound for it, -// otherwise it denies the request. -func (r RequestInfo) IsSoftAPECheck() bool { - return r.BasicACL().Bits() != 0 -} - // MetaWithToken groups session and bearer tokens, // verification header and raw API request. type MetaWithToken struct { diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go index e02a3be36..9f5ac5a27 100644 --- a/pkg/services/object/acl/v2/service.go +++ b/pkg/services/object/acl/v2/service.go @@ -123,7 +123,6 @@ func (w *wrappedGetObjectStream) Context() context.Context { ContainerOwner: w.requestInfo.ContainerOwner(), SenderKey: w.requestInfo.SenderKey(), Role: w.requestInfo.RequestRole(), - SoftAPECheck: w.requestInfo.IsSoftAPECheck(), BearerToken: w.requestInfo.Bearer(), }) } @@ -149,7 +148,6 @@ func (w *wrappedRangeStream) Context() context.Context { ContainerOwner: w.requestInfo.ContainerOwner(), SenderKey: w.requestInfo.SenderKey(), Role: w.requestInfo.RequestRole(), - SoftAPECheck: w.requestInfo.IsSoftAPECheck(), BearerToken: w.requestInfo.Bearer(), }) } @@ -175,7 +173,6 @@ func (w *wrappedSearchStream) Context() context.Context { ContainerOwner: w.requestInfo.ContainerOwner(), SenderKey: w.requestInfo.SenderKey(), Role: w.requestInfo.RequestRole(), - SoftAPECheck: w.requestInfo.IsSoftAPECheck(), BearerToken: w.requestInfo.Bearer(), }) } @@ -231,14 +228,6 @@ func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream reqInfo.obj = obj - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) { - return basicACLErr(reqInfo) - } else if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return eACLErr(reqInfo, err) - } - } - return b.next.Get(request, &getStreamBasicChecker{ GetObjectStream: newWrappedGetObjectStreamStream(stream, reqInfo), info: reqInfo, @@ -309,14 +298,6 @@ func (b Service) Head( reqInfo.obj = obj - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) { - return nil, basicACLErr(reqInfo) - } else if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return nil, eACLErr(reqInfo, err) - } - } - resp, err := b.next.Head(requestContext(ctx, reqInfo), request) if err == nil { if err = b.checker.CheckEACL(resp, reqInfo); err != nil { @@ -362,14 +343,6 @@ func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStr return err } - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) { - return basicACLErr(reqInfo) - } else if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return eACLErr(reqInfo, err) - } - } - return b.next.Search(request, &searchStreamBasicChecker{ checker: b.checker, SearchStream: newWrappedSearchStream(stream, reqInfo), @@ -422,14 +395,6 @@ func (b Service) Delete( reqInfo.obj = obj - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) { - return nil, basicACLErr(reqInfo) - } else if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return nil, eACLErr(reqInfo, err) - } - } - return b.next.Delete(requestContext(ctx, reqInfo), request) } @@ -475,14 +440,6 @@ func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetOb reqInfo.obj = obj - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) { - return basicACLErr(reqInfo) - } else if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return eACLErr(reqInfo, err) - } - } - return b.next.GetRange(request, &rangeStreamBasicChecker{ checker: b.checker, GetObjectRangeStream: newWrappedRangeStream(stream, reqInfo), @@ -496,7 +453,6 @@ func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context { ContainerOwner: reqInfo.ContainerOwner(), SenderKey: reqInfo.SenderKey(), Role: reqInfo.RequestRole(), - SoftAPECheck: reqInfo.IsSoftAPECheck(), BearerToken: reqInfo.Bearer(), }) } @@ -546,14 +502,6 @@ func (b Service) GetRangeHash( reqInfo.obj = obj - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) { - return nil, basicACLErr(reqInfo) - } else if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return nil, eACLErr(reqInfo, err) - } - } - return b.next.GetRangeHash(requestContext(ctx, reqInfo), request) } @@ -605,15 +553,6 @@ func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleReque reqInfo.obj = obj - if reqInfo.IsSoftAPECheck() { - if !b.checker.CheckBasicACL(reqInfo) || !b.checker.StickyBitCheck(reqInfo, idOwner) { - return nil, basicACLErr(reqInfo) - } - if err := b.checker.CheckEACL(request, reqInfo); err != nil { - return nil, eACLErr(reqInfo, err) - } - } - return b.next.PutSingle(requestContext(ctx, reqInfo), request) } @@ -679,12 +618,6 @@ func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRe reqInfo.obj = obj - if reqInfo.IsSoftAPECheck() { - if !p.source.checker.CheckBasicACL(reqInfo) || !p.source.checker.StickyBitCheck(reqInfo, idOwner) { - return basicACLErr(reqInfo) - } - } - ctx = requestContext(ctx, reqInfo) } diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go index 061cd26b6..6ae80e9c2 100644 --- a/pkg/services/object/acl/v2/types.go +++ b/pkg/services/object/acl/v2/types.go @@ -1,22 +1,11 @@ package v2 -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - // ACLChecker is an interface that must provide // ACL related checks. type ACLChecker interface { - // CheckBasicACL must return true only if request - // passes basic ACL validation. - CheckBasicACL(RequestInfo) bool // CheckEACL must return non-nil error if request // doesn't pass extended ACL validation. CheckEACL(any, RequestInfo) error - // StickyBitCheck must return true only if sticky bit - // is disabled or enabled but request contains correct - // owner field. - StickyBitCheck(RequestInfo, user.ID) bool } // InnerRingFetcher is an interface that must provide diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go index abcd2f4bb..4a3b5ba5e 100644 --- a/pkg/services/object/ape/checker.go +++ b/pkg/services/object/ape/checker.go @@ -64,9 +64,6 @@ type Prm struct { // An encoded container's owner user ID. ContainerOwner user.ID - // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow. - SoftAPECheck bool - // The request's bearer token. It is used in order to check APE overrides with the token. BearerToken *bearer.Token @@ -109,6 +106,5 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error { Container: prm.Container, ContainerOwner: prm.ContainerOwner, BearerToken: prm.BearerToken, - SoftAPECheck: prm.SoftAPECheck, }) } diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go index c114f02f6..558c48da8 100644 --- a/pkg/services/object/ape/service.go +++ b/pkg/services/object/ape/service.go @@ -84,8 +84,6 @@ type getStreamBasicChecker struct { role string - softAPECheck bool - bearerToken *bearer.Token } @@ -105,7 +103,6 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { SenderKey: hex.EncodeToString(g.senderKey), ContainerOwner: g.containerOwner, Role: g.role, - SoftAPECheck: g.softAPECheck, BearerToken: g.bearerToken, XHeaders: resp.GetMetaHeader().GetXHeaders(), } @@ -142,7 +139,6 @@ func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectSt senderKey: reqCtx.SenderKey, containerOwner: reqCtx.ContainerOwner, role: nativeSchemaRole(reqCtx.Role), - softAPECheck: reqCtx.SoftAPECheck, bearerToken: reqCtx.BearerToken, }) } @@ -174,7 +170,6 @@ func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutR SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, Role: nativeSchemaRole(reqCtx.Role), - SoftAPECheck: reqCtx.SoftAPECheck, BearerToken: reqCtx.BearerToken, XHeaders: request.GetMetaHeader().GetXHeaders(), } @@ -230,7 +225,6 @@ func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.Pa SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, Role: nativeSchemaRole(reqCtx.Role), - SoftAPECheck: reqCtx.SoftAPECheck, BearerToken: reqCtx.BearerToken, XHeaders: request.GetMetaHeader().GetXHeaders(), } @@ -300,7 +294,6 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj Role: nativeSchemaRole(reqCtx.Role), SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, - SoftAPECheck: reqCtx.SoftAPECheck, BearerToken: reqCtx.BearerToken, XHeaders: request.GetMetaHeader().GetXHeaders(), }) @@ -330,7 +323,6 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc Role: nativeSchemaRole(reqCtx.Role), SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, - SoftAPECheck: reqCtx.SoftAPECheck, BearerToken: reqCtx.BearerToken, XHeaders: request.GetMetaHeader().GetXHeaders(), }) @@ -360,7 +352,6 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) ( Role: nativeSchemaRole(reqCtx.Role), SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, - SoftAPECheck: reqCtx.SoftAPECheck, BearerToken: reqCtx.BearerToken, XHeaders: request.GetMetaHeader().GetXHeaders(), }) @@ -395,7 +386,6 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G Role: nativeSchemaRole(reqCtx.Role), SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, - SoftAPECheck: reqCtx.SoftAPECheck, BearerToken: reqCtx.BearerToken, XHeaders: request.GetMetaHeader().GetXHeaders(), }) @@ -425,7 +415,6 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa Role: nativeSchemaRole(reqCtx.Role), SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, - SoftAPECheck: reqCtx.SoftAPECheck, BearerToken: reqCtx.BearerToken, XHeaders: request.GetMetaHeader().GetXHeaders(), } @@ -461,7 +450,6 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ Role: nativeSchemaRole(reqCtx.Role), SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, - SoftAPECheck: reqCtx.SoftAPECheck, BearerToken: reqCtx.BearerToken, XHeaders: request.GetMetaHeader().GetXHeaders(), } diff --git a/pkg/services/object/request_context.go b/pkg/services/object/request_context.go index 95d4c9d93..eb4041f80 100644 --- a/pkg/services/object/request_context.go +++ b/pkg/services/object/request_context.go @@ -20,7 +20,5 @@ type RequestContext struct { Role acl.Role - SoftAPECheck bool - BearerToken *bearer.Token } diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go index 69cf59405..606044f8e 100644 --- a/pkg/services/tree/ape.go +++ b/pkg/services/tree/ape.go @@ -81,7 +81,6 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, ContainerOwner: container.Value.Owner(), PublicKey: publicKey, BearerToken: bt, - SoftAPECheck: false, }) } From f666898e5d19e34a571848ecd0a78dbbea3a1b34 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 8 Nov 2024 10:05:37 +0300 Subject: [PATCH 149/591] [#1480] objsvc: Remove EACL checks Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/object.go | 9 - pkg/services/object/acl/acl.go | 214 ---------------------- pkg/services/object/acl/v2/errors.go | 14 -- pkg/services/object/acl/v2/errors_test.go | 20 -- pkg/services/object/acl/v2/service.go | 45 +---- pkg/services/object/acl/v2/types.go | 8 - 6 files changed, 1 insertion(+), 309 deletions(-) delete mode 100644 pkg/services/object/acl/acl.go delete mode 100644 pkg/services/object/acl/v2/errors_test.go diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 7f26393a7..629f79207 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -19,7 +19,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl" v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2" objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" @@ -39,7 +38,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -458,17 +456,10 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi } func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service { - ls := c.cfgObject.cfgLocalStorage.localStorage - return v2.New( apeSvc, c.netMapSource, irFetcher, - acl.NewChecker( - c.cfgNetmap.state, - c.cfgObject.eaclSource, - eaclSDK.NewValidator(), - ls), c.cfgObject.cnrSource, v2.WithLogger(c.log), ) diff --git a/pkg/services/object/acl/acl.go b/pkg/services/object/acl/acl.go deleted file mode 100644 index 53ba652e1..000000000 --- a/pkg/services/object/acl/acl.go +++ /dev/null @@ -1,214 +0,0 @@ -package acl - -import ( - "context" - "crypto/ecdsa" - "errors" - "fmt" - "io" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - eaclV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/eacl/v2" - v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2" - bearerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -// Checker implements v2.ACLChecker interfaces and provides -// ACL/eACL validation functionality. -type Checker struct { - eaclSrc container.EACLSource - validator *eaclSDK.Validator - localStorage *engine.StorageEngine - state netmap.State -} - -type localStorage struct { - ls *engine.StorageEngine -} - -func (s *localStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) { - if s.ls == nil { - return nil, io.ErrUnexpectedEOF - } - - return engine.Head(ctx, s.ls, addr) -} - -// Various EACL check errors. -var ( - errEACLDeniedByRule = errors.New("denied by rule") - errBearerExpired = errors.New("bearer token has expired") - errBearerInvalidSignature = errors.New("bearer token has invalid signature") - errBearerInvalidContainerID = errors.New("bearer token was created for another container") - errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner") - errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender") -) - -// NewChecker creates Checker. -// Panics if at least one of the parameter is nil. -func NewChecker( - state netmap.State, - eaclSrc container.EACLSource, - validator *eaclSDK.Validator, - localStorage *engine.StorageEngine, -) *Checker { - return &Checker{ - eaclSrc: eaclSrc, - validator: validator, - localStorage: localStorage, - state: state, - } -} - -// CheckEACL is a main check function for extended ACL. -func (c *Checker) CheckEACL(msg any, reqInfo v2.RequestInfo) error { - basicACL := reqInfo.BasicACL() - if !basicACL.Extendable() { - return nil - } - - bearerTok := reqInfo.Bearer() - impersonate := bearerTok != nil && bearerTok.Impersonate() - - // if bearer token is not allowed, then ignore it - if impersonate || !basicACL.AllowedBearerRules(reqInfo.Operation()) { - reqInfo.CleanBearer() - } - - var table eaclSDK.Table - cnr := reqInfo.ContainerID() - - if bearerTok == nil { - eaclInfo, err := c.eaclSrc.GetEACL(cnr) - if err != nil { - if client.IsErrEACLNotFound(err) { - return nil - } - return err - } - - table = *eaclInfo.Value - } else { - table = bearerTok.EACLTable() - } - - // if bearer token is not present, isValidBearer returns true - if err := isValidBearer(reqInfo, c.state); err != nil { - return err - } - - hdrSrc, err := c.getHeaderSource(cnr, msg, reqInfo) - if err != nil { - return err - } - - eaclRole := getRole(reqInfo) - - action, _ := c.validator.CalculateAction(new(eaclSDK.ValidationUnit). - WithRole(eaclRole). - WithOperation(eaclSDK.Operation(reqInfo.Operation())). - WithContainerID(&cnr). - WithSenderKey(reqInfo.SenderKey()). - WithHeaderSource(hdrSrc). - WithEACLTable(&table), - ) - - if action != eaclSDK.ActionAllow { - return errEACLDeniedByRule - } - return nil -} - -func getRole(reqInfo v2.RequestInfo) eaclSDK.Role { - var eaclRole eaclSDK.Role - switch op := reqInfo.RequestRole(); op { - default: - eaclRole = eaclSDK.Role(op) - case acl.RoleOwner: - eaclRole = eaclSDK.RoleUser - case acl.RoleInnerRing, acl.RoleContainer: - eaclRole = eaclSDK.RoleSystem - case acl.RoleOthers: - eaclRole = eaclSDK.RoleOthers - } - return eaclRole -} - -func (c *Checker) getHeaderSource(cnr cid.ID, msg any, reqInfo v2.RequestInfo) (eaclSDK.TypedHeaderSource, error) { - var xHeaderSource eaclV2.XHeaderSource - if req, ok := msg.(eaclV2.Request); ok { - xHeaderSource = eaclV2.NewRequestXHeaderSource(req) - } else { - xHeaderSource = eaclV2.NewResponseXHeaderSource(msg.(eaclV2.Response), reqInfo.Request().(eaclV2.Request)) - } - - hdrSrc, err := eaclV2.NewMessageHeaderSource(&localStorage{ls: c.localStorage}, xHeaderSource, cnr, eaclV2.WithOID(reqInfo.ObjectID())) - if err != nil { - return nil, fmt.Errorf("can't parse headers: %w", err) - } - return hdrSrc, nil -} - -// isValidBearer checks whether bearer token was correctly signed by authorized -// entity. This method might be defined on whole ACL service because it will -// require fetching current epoch to check lifetime. -func isValidBearer(reqInfo v2.RequestInfo, st netmap.State) error { - ownerCnr := reqInfo.ContainerOwner() - - token := reqInfo.Bearer() - - // 0. Check if bearer token is present in reqInfo. - if token == nil { - return nil - } - - // 1. First check token lifetime. Simplest verification. - if token.InvalidAt(st.CurrentEpoch()) { - return errBearerExpired - } - - // 2. Then check if bearer token is signed correctly. - if !token.VerifySignature() { - return errBearerInvalidSignature - } - - // 3. Then check if container is either empty or equal to the container in the request. - cnr, isSet := token.EACLTable().CID() - if isSet && !cnr.Equals(reqInfo.ContainerID()) { - return errBearerInvalidContainerID - } - - // 4. Then check if container owner signed this token. - if !bearerSDK.ResolveIssuer(*token).Equals(ownerCnr) { - // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again - return errBearerNotSignedByOwner - } - - // 5. Then check if request sender has rights to use this token. - var keySender frostfsecdsa.PublicKey - - err := keySender.Decode(reqInfo.SenderKey()) - if err != nil { - return fmt.Errorf("decode sender public key: %w", err) - } - - var usrSender user.ID - user.IDFromKey(&usrSender, ecdsa.PublicKey(keySender)) - - if !token.AssertUser(usrSender) { - // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again - return errBearerInvalidOwner - } - - return nil -} diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go index e969d37fa..cd2de174a 100644 --- a/pkg/services/object/acl/v2/errors.go +++ b/pkg/services/object/acl/v2/errors.go @@ -2,8 +2,6 @@ package v2 import ( "fmt" - - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" ) const invalidRequestMessage = "malformed request" @@ -20,15 +18,3 @@ var ( errInvalidSessionOwner = malformedRequestError("invalid session token owner") errInvalidVerb = malformedRequestError("session token verb is invalid") ) - -const ( - accessDeniedACLReasonFmt = "access to operation %s is denied by basic ACL check" - accessDeniedEACLReasonFmt = "access to operation %s is denied by extended ACL check: %v" -) - -func eACLErr(info RequestInfo, err error) error { - errAccessDenied := &apistatus.ObjectAccessDenied{} - errAccessDenied.WriteReason(fmt.Sprintf(accessDeniedEACLReasonFmt, info.operation, err)) - - return errAccessDenied -} diff --git a/pkg/services/object/acl/v2/errors_test.go b/pkg/services/object/acl/v2/errors_test.go deleted file mode 100644 index 3cc74e6aa..000000000 --- a/pkg/services/object/acl/v2/errors_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package v2 - -import ( - "errors" - "testing" - - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "github.com/stretchr/testify/require" -) - -func TestEACLErr(t *testing.T) { - var reqInfo RequestInfo - testErr := errors.New("test-eacl") - err := eACLErr(reqInfo, testErr) - - var errAccessDenied *apistatus.ObjectAccessDenied - - require.ErrorAs(t, err, &errAccessDenied, - "eACLErr must be able to be casted to apistatus.ObjectAccessDenied") -} diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go index 9f5ac5a27..69406e5b0 100644 --- a/pkg/services/object/acl/v2/service.go +++ b/pkg/services/object/acl/v2/service.go @@ -42,27 +42,15 @@ type patchStreamBasicChecker struct { } type getStreamBasicChecker struct { - checker ACLChecker - object.GetObjectStream - - info RequestInfo } type rangeStreamBasicChecker struct { - checker ACLChecker - object.GetObjectRangeStream - - info RequestInfo } type searchStreamBasicChecker struct { - checker ACLChecker - object.SearchStream - - info RequestInfo } // Option represents Service constructor option. @@ -73,8 +61,6 @@ type cfg struct { containers container.Source - checker ACLChecker - irFetcher InnerRingFetcher nm netmap.Source @@ -86,7 +72,6 @@ type cfg struct { func New(next object.ServiceServer, nm netmap.Source, irf InnerRingFetcher, - acl ACLChecker, cs container.Source, opts ...Option, ) Service { @@ -95,7 +80,6 @@ func New(next object.ServiceServer, next: next, nm: nm, irFetcher: irf, - checker: acl, containers: cs, } @@ -230,8 +214,6 @@ func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream return b.next.Get(request, &getStreamBasicChecker{ GetObjectStream: newWrappedGetObjectStreamStream(stream, reqInfo), - info: reqInfo, - checker: b.checker, }) } @@ -298,14 +280,7 @@ func (b Service) Head( reqInfo.obj = obj - resp, err := b.next.Head(requestContext(ctx, reqInfo), request) - if err == nil { - if err = b.checker.CheckEACL(resp, reqInfo); err != nil { - err = eACLErr(reqInfo, err) - } - } - - return resp, err + return b.next.Head(requestContext(ctx, reqInfo), request) } func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error { @@ -344,9 +319,7 @@ func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStr } return b.next.Search(request, &searchStreamBasicChecker{ - checker: b.checker, SearchStream: newWrappedSearchStream(stream, reqInfo), - info: reqInfo, }) } @@ -441,9 +414,7 @@ func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetOb reqInfo.obj = obj return b.next.GetRange(request, &rangeStreamBasicChecker{ - checker: b.checker, GetObjectRangeStream: newWrappedRangeStream(stream, reqInfo), - info: reqInfo, }) } @@ -657,28 +628,14 @@ func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutR } func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { - if _, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok { - if err := g.checker.CheckEACL(resp, g.info); err != nil { - return eACLErr(g.info, err) - } - } - return g.GetObjectStream.Send(resp) } func (g *rangeStreamBasicChecker) Send(resp *objectV2.GetRangeResponse) error { - if err := g.checker.CheckEACL(resp, g.info); err != nil { - return eACLErr(g.info, err) - } - return g.GetObjectRangeStream.Send(resp) } func (g *searchStreamBasicChecker) Send(resp *objectV2.SearchResponse) error { - if err := g.checker.CheckEACL(resp, g.info); err != nil { - return eACLErr(g.info, err) - } - return g.SearchStream.Send(resp) } diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go index 6ae80e9c2..b03261b90 100644 --- a/pkg/services/object/acl/v2/types.go +++ b/pkg/services/object/acl/v2/types.go @@ -1,13 +1,5 @@ package v2 -// ACLChecker is an interface that must provide -// ACL related checks. -type ACLChecker interface { - // CheckEACL must return non-nil error if request - // doesn't pass extended ACL validation. - CheckEACL(any, RequestInfo) error -} - // InnerRingFetcher is an interface that must provide // Inner Ring information. type InnerRingFetcher interface { From c82c753e9f67c70dfa99f98a34623fae3dfa5fce Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 8 Nov 2024 10:10:06 +0300 Subject: [PATCH 150/591] [#1480] objsvc: Remove useless stream wrappers Signed-off-by: Evgenii Stratonikov --- pkg/services/object/acl/v2/service.go | 36 +++------------------------ 1 file changed, 3 insertions(+), 33 deletions(-) diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go index 69406e5b0..56748b08c 100644 --- a/pkg/services/object/acl/v2/service.go +++ b/pkg/services/object/acl/v2/service.go @@ -41,18 +41,6 @@ type patchStreamBasicChecker struct { nonFirstSend bool } -type getStreamBasicChecker struct { - object.GetObjectStream -} - -type rangeStreamBasicChecker struct { - object.GetObjectRangeStream -} - -type searchStreamBasicChecker struct { - object.SearchStream -} - // Option represents Service constructor option. type Option func(*cfg) @@ -212,9 +200,7 @@ func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream reqInfo.obj = obj - return b.next.Get(request, &getStreamBasicChecker{ - GetObjectStream: newWrappedGetObjectStreamStream(stream, reqInfo), - }) + return b.next.Get(request, newWrappedGetObjectStreamStream(stream, reqInfo)) } func (b Service) Put() (object.PutObjectStream, error) { @@ -318,9 +304,7 @@ func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStr return err } - return b.next.Search(request, &searchStreamBasicChecker{ - SearchStream: newWrappedSearchStream(stream, reqInfo), - }) + return b.next.Search(request, newWrappedSearchStream(stream, reqInfo)) } func (b Service) Delete( @@ -413,9 +397,7 @@ func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetOb reqInfo.obj = obj - return b.next.GetRange(request, &rangeStreamBasicChecker{ - GetObjectRangeStream: newWrappedRangeStream(stream, reqInfo), - }) + return b.next.GetRange(request, newWrappedRangeStream(stream, reqInfo)) } func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context { @@ -627,18 +609,6 @@ func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutR return p.next.CloseAndRecv(ctx) } -func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { - return g.GetObjectStream.Send(resp) -} - -func (g *rangeStreamBasicChecker) Send(resp *objectV2.GetRangeResponse) error { - return g.GetObjectRangeStream.Send(resp) -} - -func (g *searchStreamBasicChecker) Send(resp *objectV2.SearchResponse) error { - return g.SearchStream.Send(resp) -} - func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error { body := request.GetBody() if body == nil { From d336f2d487abd5fd102d24d130749a6a6ddeb8c4 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Wed, 30 Oct 2024 17:39:02 +0300 Subject: [PATCH 151/591] [#1393] adm: Make `NewLocalActor` receive accout name * Some RPC-clients for contracts require different wallet account types. Since, `Policy` contract gets `consensus` accounts while `NNS` gets `committee` accounts. Signed-off-by: Airat Arifullin --- .../internal/modules/morph/ape/ape_util.go | 2 +- .../internal/modules/morph/helper/actor.go | 11 +++++++---- cmd/frostfs-adm/internal/modules/morph/nns/helper.go | 3 ++- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go index d4aedda2e..f4373c535 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go @@ -139,7 +139,7 @@ func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *he c, err := helper.GetN3Client(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - ac, err := helper.NewLocalActor(cmd, c) + ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) var ch util.Uint160 diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go index 1ca246f9f..ff0421335 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go @@ -5,7 +5,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -31,7 +30,11 @@ type LocalActor struct { // NewLocalActor create LocalActor with accounts form provided wallets. // In case of empty wallets provided created actor with dummy account only for read operation. -func NewLocalActor(cmd *cobra.Command, c actor.RPCActor) (*LocalActor, error) { +// +// If wallets are provided, the contract client will use accounts with accName name from these wallets. +// To determine which account name should be used in a contract client, refer to how the contract +// verifies the transaction signature. +func NewLocalActor(cmd *cobra.Command, c actor.RPCActor, accName string) (*LocalActor, error) { walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) var act *actor.Actor var accounts []*wallet.Account @@ -53,8 +56,8 @@ func NewLocalActor(cmd *cobra.Command, c actor.RPCActor) (*LocalActor, error) { commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err) for _, w := range wallets { - acc, err := GetWalletAccount(w, constants.CommitteeAccountName) - commonCmd.ExitOnErr(cmd, "can't find committee account: %w", err) + acc, err := GetWalletAccount(w, accName) + commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err) accounts = append(accounts, acc) } act, err = actor.New(c, []actor.SignerAccount{{ diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go index 0eaa5ac58..de439acd1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go @@ -2,6 +2,7 @@ package nns import ( client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" @@ -15,7 +16,7 @@ func getRPCClient(cmd *cobra.Command) (*client.Contract, *helper.LocalActor, uti c, err := helper.GetN3Client(v) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - ac, err := helper.NewLocalActor(cmd, c) + ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) r := management.NewReader(ac.Invoker) From ad01fb958afd2c48e2efba03e5d22acdf8be7849 Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Wed, 6 Nov 2024 13:39:46 +0300 Subject: [PATCH 152/591] [#1474] Stop using obsolete .github directory This commit is a part of multi-repo cleanup effort: https://git.frostfs.info/TrueCloudLab/frostfs-infra/issues/136 Signed-off-by: Vitaliy Potyarkin --- {.github => .forgejo}/ISSUE_TEMPLATE/bug_report.md | 0 {.github => .forgejo}/ISSUE_TEMPLATE/config.yml | 0 {.github => .forgejo}/ISSUE_TEMPLATE/feature_request.md | 0 {.github => .forgejo}/logo.svg | 0 README.md | 2 +- docs/release-instruction.md | 8 ++++---- docs/update-go-instruction.md | 2 +- 7 files changed, 6 insertions(+), 6 deletions(-) rename {.github => .forgejo}/ISSUE_TEMPLATE/bug_report.md (100%) rename {.github => .forgejo}/ISSUE_TEMPLATE/config.yml (100%) rename {.github => .forgejo}/ISSUE_TEMPLATE/feature_request.md (100%) rename {.github => .forgejo}/logo.svg (100%) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.forgejo/ISSUE_TEMPLATE/bug_report.md similarity index 100% rename from .github/ISSUE_TEMPLATE/bug_report.md rename to .forgejo/ISSUE_TEMPLATE/bug_report.md diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.forgejo/ISSUE_TEMPLATE/config.yml similarity index 100% rename from .github/ISSUE_TEMPLATE/config.yml rename to .forgejo/ISSUE_TEMPLATE/config.yml diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.forgejo/ISSUE_TEMPLATE/feature_request.md similarity index 100% rename from .github/ISSUE_TEMPLATE/feature_request.md rename to .forgejo/ISSUE_TEMPLATE/feature_request.md diff --git a/.github/logo.svg b/.forgejo/logo.svg similarity index 100% rename from .github/logo.svg rename to .forgejo/logo.svg diff --git a/README.md b/README.md index 47d812b18..6998b6726 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@

- FrostFS + FrostFS

diff --git a/docs/release-instruction.md b/docs/release-instruction.md index d000f10d0..18659c699 100644 --- a/docs/release-instruction.md +++ b/docs/release-instruction.md @@ -55,7 +55,7 @@ Add an entry to the `CHANGELOG.md` following the style established there. * update `Unreleased...new` and `new...old` diff-links at the bottom of the file * add optional codename and release date in the heading * remove all empty sections such as `Added`, `Removed`, etc. -* make sure all changes have references to GitHub issues in `#123` format (if possible) +* make sure all changes have references to relevant issues in `#123` format (if possible) * clean up all `Unreleased` sections and leave them empty ### Make release commit @@ -110,9 +110,9 @@ $ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION} $ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION} ``` -### Make a proper GitHub release (if not automated) +### Make a proper release (if not automated) -Edit an automatically-created release on GitHub, copy things from `CHANGELOG.md`. +Edit an automatically-created release on git.frostfs.info, copy things from `CHANGELOG.md`. Build and tar release binaries with `make prepare-release`, attach them to the release. Publish the release. @@ -121,7 +121,7 @@ the release. Publish the release. Prepare pull-request in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env) with new versions. -### Close GitHub milestone +### Close milestone Look up [milestones](https://git.frostfs.info/TrueCloudLab/frostfs-node/milestones) and close the release one if exists. diff --git a/docs/update-go-instruction.md b/docs/update-go-instruction.md index f99225046..195e0c6b3 100644 --- a/docs/update-go-instruction.md +++ b/docs/update-go-instruction.md @@ -7,7 +7,7 @@ ## Update CI Change Golang versions for unit test in CI. -There is `go` section in `.github/workflows/go.yaml` file: +There is `go` section in `.forgejo/workflows/*.yml` files: ```yaml jobs: test: From 8a57c78f5f5e2098ec1b0085c709a2473d40adbb Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 11 Nov 2024 12:46:55 +0300 Subject: [PATCH 153/591] [#1484] engine: Fix engine metrics 1. Add forgotten metrics for client requests 2. Include execIfNotBlocked into metrics Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/engine/container.go | 8 ++++---- pkg/local_object_storage/engine/delete.go | 3 +-- pkg/local_object_storage/engine/get.go | 3 +-- pkg/local_object_storage/engine/inhume.go | 3 +-- pkg/local_object_storage/engine/list.go | 5 +++++ pkg/local_object_storage/engine/lock.go | 1 + pkg/local_object_storage/engine/put.go | 3 +-- pkg/local_object_storage/engine/range.go | 19 +++++++++---------- pkg/local_object_storage/engine/select.go | 6 ++---- 9 files changed, 25 insertions(+), 26 deletions(-) diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index 6def02f12..a52436175 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -45,6 +45,8 @@ func (r ListContainersRes) Containers() []cid.ID { // // Returns an error if executions are blocked (see BlockExecution). func (e *StorageEngine) ContainerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) { + defer elapsed("ContainerSize", e.metrics.AddMethodDuration)() + err = e.execIfNotBlocked(func() error { res, err = e.containerSize(prm) return err @@ -68,8 +70,6 @@ func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) { } func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) { - defer elapsed("EstimateContainerSize", e.metrics.AddMethodDuration)() - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { var csPrm shard.ContainerSizePrm csPrm.SetContainerID(prm.cnr) @@ -93,6 +93,8 @@ func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRe // // Returns an error if executions are blocked (see BlockExecution). func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm) (res ListContainersRes, err error) { + defer elapsed("ListContainers", e.metrics.AddMethodDuration)() + err = e.execIfNotBlocked(func() error { res, err = e.listContainers(ctx) return err @@ -114,8 +116,6 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) { } func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) { - defer elapsed("ListContainers", e.metrics.AddMethodDuration)() - uniqueIDs := make(map[string]cid.ID) e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 3ec3f8f9b..9ca3a7cee 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -58,6 +58,7 @@ func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRe attribute.Bool("force_removal", prm.forceRemoval), )) defer span.End() + defer elapsed("Delete", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { res, err = e.delete(ctx, prm) @@ -68,8 +69,6 @@ func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRe } func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { - defer elapsed("Delete", e.metrics.AddMethodDuration)() - var locked struct { is bool } diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index 4a9199be7..a1fe8a010 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -56,6 +56,7 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er attribute.String("address", prm.addr.EncodeToString()), )) defer span.End() + defer elapsed("Get", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { res, err = e.get(ctx, prm) @@ -66,8 +67,6 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er } func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { - defer elapsed("Get", e.metrics.AddMethodDuration)() - errNotFound := new(apistatus.ObjectNotFound) var shPrm shard.GetPrm diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index 35ce50f65..b8959b534 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -70,6 +70,7 @@ var errInhumeFailure = errors.New("inhume operation failed") func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume") defer span.End() + defer elapsed("Inhume", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { res, err = e.inhume(ctx, prm) @@ -80,8 +81,6 @@ func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRe } func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { - defer elapsed("Inhume", e.metrics.AddMethodDuration)() - var shPrm shard.InhumePrm if prm.forceRemoval { shPrm.ForceRemoval() diff --git a/pkg/local_object_storage/engine/list.go b/pkg/local_object_storage/engine/list.go index cb3830b7c..073248862 100644 --- a/pkg/local_object_storage/engine/list.go +++ b/pkg/local_object_storage/engine/list.go @@ -7,6 +7,7 @@ import ( objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" ) // ErrEndOfListing is returned from an object listing with cursor @@ -98,6 +99,10 @@ func (l ListWithCursorRes) Cursor() *Cursor { // Returns ErrEndOfListing if there are no more objects to return or count // parameter set to zero. func (e *StorageEngine) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (ListWithCursorRes, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.ListWithCursor") + defer span.End() + defer elapsed("ListWithCursor", e.metrics.AddMethodDuration)() + result := make([]objectcore.Info, 0, prm.count) // Set initial cursors diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go index ac8fa9c6f..bbab59bfa 100644 --- a/pkg/local_object_storage/engine/lock.go +++ b/pkg/local_object_storage/engine/lock.go @@ -32,6 +32,7 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l attribute.Int("locked_count", len(locked)), )) defer span.End() + defer elapsed("Lock", e.metrics.AddMethodDuration)() return e.execIfNotBlocked(func() error { return e.lock(ctx, idCnr, locker, locked) diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index a50d80b75..56d3ef490 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -56,6 +56,7 @@ func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) { attribute.String("address", object.AddressOf(prm.Object).EncodeToString()), )) defer span.End() + defer elapsed("Put", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { err = e.put(ctx, prm) @@ -66,8 +67,6 @@ func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) { } func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { - defer elapsed("Put", e.metrics.AddMethodDuration)() - addr := object.AddressOf(prm.Object) // In #1146 this check was parallelized, however, it became diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index 498674fd2..fde6052ae 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -65,6 +65,15 @@ func (r RngRes) Object() *objectSDK.Object { // // Returns an error if executions are blocked (see BlockExecution). func (e *StorageEngine) GetRange(ctx context.Context, prm RngPrm) (res RngRes, err error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getRange", + trace.WithAttributes( + attribute.String("address", prm.addr.EncodeToString()), + attribute.String("offset", strconv.FormatUint(prm.off, 10)), + attribute.String("length", strconv.FormatUint(prm.ln, 10)), + )) + defer span.End() + defer elapsed("GetRange", e.metrics.AddMethodDuration)() + err = e.execIfNotBlocked(func() error { res, err = e.getRange(ctx, prm) return err @@ -74,16 +83,6 @@ func (e *StorageEngine) GetRange(ctx context.Context, prm RngPrm) (res RngRes, e } func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getRange", - trace.WithAttributes( - attribute.String("address", prm.addr.EncodeToString()), - attribute.String("offset", strconv.FormatUint(prm.off, 10)), - attribute.String("length", strconv.FormatUint(prm.ln, 10)), - )) - defer span.End() - - defer elapsed("GetRange", e.metrics.AddMethodDuration)() - var shPrm shard.RngPrm shPrm.SetAddress(prm.addr) shPrm.SetRange(prm.off, prm.ln) diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index a85891f0c..0f1341f85 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -51,6 +51,7 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe attribute.String("container_id", prm.cnr.EncodeToString()), )) defer span.End() + defer elapsed("Select", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { res, err = e._select(ctx, prm) @@ -61,8 +62,6 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe } func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) { - defer elapsed("Search", e.metrics.AddMethodDuration)() - addrList := make([]oid.Address, 0) uniqueMap := make(map[string]struct{}) @@ -99,6 +98,7 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, // // Returns an error if executions are blocked (see BlockExecution). func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, err error) { + defer elapsed("List", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { res, err = e.list(ctx, limit) return err @@ -108,8 +108,6 @@ func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, } func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) { - defer elapsed("ListObjects", e.metrics.AddMethodDuration)() - addrList := make([]oid.Address, 0, limit) uniqueMap := make(map[string]struct{}) ln := uint64(0) From 2f3bc6eb8421eb742d1304371d077015a74e218c Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Tue, 12 Nov 2024 11:07:48 +0300 Subject: [PATCH 154/591] [#1396] cli/playground: Improve terminal control key handling Signed-off-by: Alexander Chuprov --- .../modules/container/policy_playground.go | 41 +++++++++++++++---- 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index 095ab6438..6885d35f2 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -1,11 +1,10 @@ package container import ( - "bufio" "encoding/hex" "encoding/json" + "errors" "fmt" - "io" "os" "strings" @@ -14,6 +13,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/chzyer/readline" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -163,6 +163,16 @@ func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { return nm } +var policyPlaygroundCompleter = readline.NewPrefixCompleter( + readline.PcItem("list"), + readline.PcItem("ls"), + readline.PcItem("add"), + readline.PcItem("load"), + readline.PcItem("remove"), + readline.PcItem("rm"), + readline.PcItem("eval"), +) + func (repl *policyPlaygroundREPL) run() error { if len(viper.GetString(commonflags.RPC)) > 0 { key := key.GetOrGenerate(repl.cmd) @@ -189,15 +199,32 @@ func (repl *policyPlaygroundREPL) run() error { "rm": repl.handleRemove, "eval": repl.handleEval, } - for reader := bufio.NewReader(os.Stdin); ; { - fmt.Print("> ") - line, err := reader.ReadString('\n') + + rl, err := readline.NewEx(&readline.Config{ + Prompt: "> ", + InterruptPrompt: "^C", + AutoComplete: policyPlaygroundCompleter, + }) + if err != nil { + return fmt.Errorf("error initializing readline: %w", err) + } + defer rl.Close() + + var exit bool + for { + line, err := rl.Readline() if err != nil { - if err == io.EOF { - return nil + if errors.Is(err, readline.ErrInterrupt) { + if exit { + return nil + } + exit = true + continue } return fmt.Errorf("reading line: %v", err) } + exit = false + parts := strings.Fields(line) if len(parts) == 0 { continue From 80f8a8fd3abd0cf9a605c4002cbaced6f2fd9b8c Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Tue, 12 Nov 2024 11:09:27 +0300 Subject: [PATCH 155/591] [#1396] cli/playground: Refactor Signed-off-by: Alexander Chuprov --- cmd/frostfs-cli/modules/container/policy_playground.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index 6885d35f2..40bd4110b 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -221,7 +221,7 @@ func (repl *policyPlaygroundREPL) run() error { exit = true continue } - return fmt.Errorf("reading line: %v", err) + return fmt.Errorf("reading line: %w", err) } exit = false @@ -230,8 +230,7 @@ func (repl *policyPlaygroundREPL) run() error { continue } cmd := parts[0] - handler, exists := cmdHandlers[cmd] - if exists { + if handler, exists := cmdHandlers[cmd]; exists { if err := handler(parts[1:]); err != nil { fmt.Printf("error: %v\n", err) } From b543569c3f0ae2feab378ad3568bab27528d1035 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 11 Nov 2024 18:54:07 +0300 Subject: [PATCH 156/591] [#1486] node: Introduce dual service support * Register GRPC services for both neo.fs.v2 and frost.fs namespaces * Use this temporary solution until all nodes are updated Signed-off-by: Airat Arifullin --- cmd/frostfs-node/accounting.go | 23 +++++++++++++++++++++++ cmd/frostfs-node/apemanager.go | 3 +++ cmd/frostfs-node/container.go | 3 +++ cmd/frostfs-node/netmap.go | 3 +++ cmd/frostfs-node/object.go | 3 +++ cmd/frostfs-node/session.go | 3 +++ 6 files changed, 38 insertions(+) diff --git a/cmd/frostfs-node/accounting.go b/cmd/frostfs-node/accounting.go index 72a6e5331..2d52e0c56 100644 --- a/cmd/frostfs-node/accounting.go +++ b/cmd/frostfs-node/accounting.go @@ -3,6 +3,7 @@ package main import ( "context" "net" + "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" accountingTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/accounting/grpc" @@ -30,5 +31,27 @@ func initAccountingService(ctx context.Context, c *cfg) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { accountingGRPC.RegisterAccountingServiceServer(s, server) + + // TODO(@aarifullin): #1487 remove the dual service support. + s.RegisterService(frostFSServiceDesc(accountingGRPC.AccountingService_ServiceDesc), server) }) } + +// frostFSServiceDesc creates a service descriptor with the new namespace for dual service support. +func frostFSServiceDesc(sd grpc.ServiceDesc) *grpc.ServiceDesc { + sdLegacy := new(grpc.ServiceDesc) + *sdLegacy = sd + + const ( + legacyNamespace = "neo.fs.v2" + apemanagerLegacyNamespace = "frostfs.v2" + newNamespace = "frost.fs" + ) + + if strings.HasPrefix(sd.ServiceName, legacyNamespace) { + sdLegacy.ServiceName = strings.ReplaceAll(sd.ServiceName, legacyNamespace, newNamespace) + } else if strings.HasPrefix(sd.ServiceName, apemanagerLegacyNamespace) { + sdLegacy.ServiceName = strings.ReplaceAll(sd.ServiceName, apemanagerLegacyNamespace, newNamespace) + } + return sdLegacy +} diff --git a/cmd/frostfs-node/apemanager.go b/cmd/frostfs-node/apemanager.go index c4d7725f5..de3aed660 100644 --- a/cmd/frostfs-node/apemanager.go +++ b/cmd/frostfs-node/apemanager.go @@ -26,5 +26,8 @@ func initAPEManagerService(c *cfg) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { apemanager_grpc.RegisterAPEManagerServiceServer(s, server) + + // TODO(@aarifullin): #1487 remove the dual service support. + s.RegisterService(frostFSServiceDesc(apemanager_grpc.APEManagerService_ServiceDesc), server) }) } diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index 9c3505922..de840ec80 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -64,6 +64,9 @@ func initContainerService(_ context.Context, c *cfg) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { containerGRPC.RegisterContainerServiceServer(s, server) + + // TODO(@aarifullin): #1487 remove the dual service support. + s.RegisterService(frostFSServiceDesc(containerGRPC.ContainerService_ServiceDesc), server) }) c.cfgObject.cfgLocalStorage.localStorage.SetContainerSource(cnrRdr) diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 58e066fc9..73871bfc9 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -166,6 +166,9 @@ func initNetmapService(ctx context.Context, c *cfg) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { netmapGRPC.RegisterNetmapServiceServer(s, server) + + // TODO(@aarifullin): #1487 remove the dual service support. + s.RegisterService(frostFSServiceDesc(netmapGRPC.NetmapService_ServiceDesc), server) }) addNewEpochNotificationHandlers(c) diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 629f79207..ec521ab01 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -218,6 +218,9 @@ func initObjectService(c *cfg) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { objectGRPC.RegisterObjectServiceServer(s, server) + + // TODO(@aarifullin): #1487 remove the dual service support. + s.RegisterService(frostFSServiceDesc(objectGRPC.ObjectService_ServiceDesc), server) }) } diff --git a/cmd/frostfs-node/session.go b/cmd/frostfs-node/session.go index 20d2d318f..a35d4e470 100644 --- a/cmd/frostfs-node/session.go +++ b/cmd/frostfs-node/session.go @@ -61,5 +61,8 @@ func initSessionService(c *cfg) { c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { sessionGRPC.RegisterSessionServiceServer(s, server) + + // TODO(@aarifullin): #1487 remove the dual service support. + s.RegisterService(frostFSServiceDesc(sessionGRPC.SessionService_ServiceDesc), server) }) } From be2753de0032c2eb4c0abb5b00096c7e6697413f Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Tue, 12 Nov 2024 13:31:13 +0300 Subject: [PATCH 157/591] [#1490] docs: Update description for `object.get.priority` Signed-off-by: Anton Nikiforov --- docs/storage-node-configuration.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 363520481..98d72cb69 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -412,12 +412,12 @@ object: - $attribute:ClusterName ``` -| Parameter | Type | Default value | Description | -|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------------| -| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. | -| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. | -| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. | -| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET and SEARCH requests. | +| Parameter | Type | Default value | Description | +|-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------| +| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. | +| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. | +| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. | +| `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. | # `runtime` section Contains runtime parameters. From a69229853366e192296e2a287c49adc66c647bb3 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 11 Nov 2024 09:24:09 +0300 Subject: [PATCH 158/591] [#1483] node: Remove eACL cache Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/cache.go | 25 ----------------------- cmd/frostfs-node/config.go | 2 -- cmd/frostfs-node/container.go | 16 --------------- cmd/frostfs-node/object.go | 26 ------------------------ pkg/core/container/storage.go | 13 ------------ pkg/services/container/morph/executor.go | 1 - 6 files changed, 83 deletions(-) diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go index 06142a46c..b90641799 100644 --- a/cmd/frostfs-node/cache.go +++ b/cmd/frostfs-node/cache.go @@ -196,31 +196,6 @@ func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error return s.delInfoCache.get(cnr) } -type ttlEACLStorage struct { - *ttlNetCache[cid.ID, *container.EACL] -} - -func newCachedEACLStorage(v container.EACLSource, ttl time.Duration) ttlEACLStorage { - const eaclCacheSize = 100 - - lruCnrCache := newNetworkTTLCache(eaclCacheSize, ttl, func(id cid.ID) (*container.EACL, error) { - return v.GetEACL(id) - }, metrics.NewCacheMetrics("eacl")) - - return ttlEACLStorage{lruCnrCache} -} - -// GetEACL returns eACL value from the cache. If value is missing in the cache -// or expired, then it returns value from side chain and updates cache. -func (s ttlEACLStorage) GetEACL(cnr cid.ID) (*container.EACL, error) { - return s.get(cnr) -} - -// InvalidateEACL removes cached eACL value. -func (s ttlEACLStorage) InvalidateEACL(cnr cid.ID) { - s.remove(cnr) -} - type lruNetmapSource struct { netState netmap.State diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 40af23841..902187560 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -642,8 +642,6 @@ type cfgObject struct { cnrSource container.Source - eaclSource container.EACLSource - cfgAccessPolicyEngine cfgAccessPolicyEngine pool cfgObjectRoutines diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index de840ec80..cc38876ee 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -73,10 +73,6 @@ func initContainerService(_ context.Context, c *cfg) { } func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc containerCore.Source) (*morphContainerReader, *morphContainerWriter) { - eACLFetcher := &morphEACLFetcher{ - w: client, - } - cnrRdr := new(morphContainerReader) cnrWrt := &morphContainerWriter{ @@ -84,8 +80,6 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c } if c.cfgMorph.cacheTTL <= 0 { - c.cfgObject.eaclSource = eACLFetcher - cnrRdr.eacl = eACLFetcher c.cfgObject.cnrSource = cnrSrc cnrRdr.src = cnrSrc cnrRdr.lister = client @@ -129,11 +123,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c c.cfgObject.cnrSource = containerCache } - cachedEACLStorage := newCachedEACLStorage(eACLFetcher, c.cfgMorph.cacheTTL) - c.cfgObject.eaclSource = cachedEACLStorage - cnrRdr.lister = client - cnrRdr.eacl = c.cfgObject.eaclSource cnrRdr.src = c.cfgObject.cnrSource } @@ -224,8 +214,6 @@ func (c *cfg) ExternalAddresses() []string { // implements interface required by container service provided by morph executor. type morphContainerReader struct { - eacl containerCore.EACLSource - src containerCore.Source lister interface { @@ -241,10 +229,6 @@ func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, return x.src.DeletionInfo(id) } -func (x *morphContainerReader) GetEACL(id cid.ID) (*containerCore.EACL, error) { - return x.eacl.GetEACL(id) -} - func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) { return x.lister.ContainersOf(id) } diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index ec521ab01..aeab1d6cb 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -2,7 +2,6 @@ package main import ( "context" - "errors" "fmt" "net" @@ -14,7 +13,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" @@ -37,7 +35,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -484,29 +481,6 @@ func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *object ) } -type morphEACLFetcher struct { - w *cntClient.Client -} - -func (s *morphEACLFetcher) GetEACL(cnr cid.ID) (*containercore.EACL, error) { - eaclInfo, err := s.w.GetEACL(cnr) - if err != nil { - return nil, err - } - - binTable, err := eaclInfo.Value.Marshal() - if err != nil { - return nil, fmt.Errorf("marshal eACL table: %w", err) - } - - if !eaclInfo.Signature.Verify(binTable) { - // TODO(@cthulhu-rider): #468 use "const" error - return nil, errors.New("invalid signature of the eACL table") - } - - return eaclInfo, nil -} - type engineWithoutNotifications struct { engine *engine.StorageEngine } diff --git a/pkg/core/container/storage.go b/pkg/core/container/storage.go index 69854f495..ba4404546 100644 --- a/pkg/core/container/storage.go +++ b/pkg/core/container/storage.go @@ -58,16 +58,3 @@ type EACL struct { // Session within which Value was set. Nil means session absence. Session *session.Container } - -// EACLSource is the interface that wraps -// basic methods of extended ACL table source. -type EACLSource interface { - // GetEACL reads the table from the source by identifier. - // It returns any error encountered. - // - // GetEACL must return exactly one non-nil value. - // - // Must return apistatus.ErrEACLNotFound if requested - // eACL table is not in source. - GetEACL(cid.ID) (*EACL, error) -} diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go index adb808af3..eb43eab70 100644 --- a/pkg/services/container/morph/executor.go +++ b/pkg/services/container/morph/executor.go @@ -25,7 +25,6 @@ type morphExecutor struct { // Reader is an interface of read-only container storage. type Reader interface { containercore.Source - containercore.EACLSource // ContainersOf returns a list of container identifiers belonging // to the specified user of FrostFS system. Returns the identifiers From 3324c26fd8f8b68ca7feea07263bfc91b8587817 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 11 Nov 2024 09:26:00 +0300 Subject: [PATCH 159/591] [#1483] morph: Remove container.GetEACL() Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/container/client.go | 1 - pkg/morph/client/container/eacl.go | 95 ---------------------------- 2 files changed, 96 deletions(-) delete mode 100644 pkg/morph/client/container/eacl.go diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go index b512a6594..bdbcce917 100644 --- a/pkg/morph/client/container/client.go +++ b/pkg/morph/client/container/client.go @@ -27,7 +27,6 @@ const ( getMethod = "get" listMethod = "list" containersOfMethod = "containersOf" - eaclMethod = "eACL" deletionInfoMethod = "deletionInfo" // putNamedMethod is method name for container put with an alias. It is exported to provide custom fee. diff --git a/pkg/morph/client/container/eacl.go b/pkg/morph/client/container/eacl.go deleted file mode 100644 index 9e604e091..000000000 --- a/pkg/morph/client/container/eacl.go +++ /dev/null @@ -1,95 +0,0 @@ -package container - -import ( - "crypto/sha256" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" -) - -// GetEACL reads the extended ACL table from FrostFS system -// through Container contract call. -// -// Returns apistatus.EACLNotFound if eACL table is missing in the contract. -func (c *Client) GetEACL(cnr cid.ID) (*container.EACL, error) { - binCnr := make([]byte, sha256.Size) - cnr.Encode(binCnr) - - prm := client.TestInvokePrm{} - prm.SetMethod(eaclMethod) - prm.SetArgs(binCnr) - - prms, err := c.client.TestInvoke(prm) - if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", eaclMethod, err) - } else if ln := len(prms); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", eaclMethod, ln) - } - - arr, err := client.ArrayFromStackItem(prms[0]) - if err != nil { - return nil, fmt.Errorf("could not get item array of eACL (%s): %w", eaclMethod, err) - } - - if len(arr) != 4 { - return nil, fmt.Errorf("unexpected eacl stack item count (%s): %d", eaclMethod, len(arr)) - } - - rawEACL, err := client.BytesFromStackItem(arr[0]) - if err != nil { - return nil, fmt.Errorf("could not get byte array of eACL (%s): %w", eaclMethod, err) - } - - sig, err := client.BytesFromStackItem(arr[1]) - if err != nil { - return nil, fmt.Errorf("could not get byte array of eACL signature (%s): %w", eaclMethod, err) - } - - // Client may not return errors if the table is missing, so check this case additionally. - // The absence of a signature in the response can be taken as an eACL absence criterion, - // since unsigned table cannot be approved in the storage by design. - if len(sig) == 0 { - return nil, new(apistatus.EACLNotFound) - } - - pub, err := client.BytesFromStackItem(arr[2]) - if err != nil { - return nil, fmt.Errorf("could not get byte array of eACL public key (%s): %w", eaclMethod, err) - } - - binToken, err := client.BytesFromStackItem(arr[3]) - if err != nil { - return nil, fmt.Errorf("could not get byte array of eACL session token (%s): %w", eaclMethod, err) - } - - var res container.EACL - - res.Value = eacl.NewTable() - if err = res.Value.Unmarshal(rawEACL); err != nil { - return nil, err - } - - if len(binToken) > 0 { - res.Session = new(session.Container) - - err = res.Session.Unmarshal(binToken) - if err != nil { - return nil, fmt.Errorf("could not unmarshal session token: %w", err) - } - } - - // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion - var sigV2 refs.Signature - sigV2.SetKey(pub) - sigV2.SetSign(sig) - sigV2.SetScheme(refs.ECDSA_RFC6979_SHA256) - - err = res.Signature.ReadFromV2(sigV2) - return &res, err -} From 1cf51a80799b93cc679be7ea1d1f4022ae019ade Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 11 Nov 2024 09:37:30 +0300 Subject: [PATCH 160/591] [#1483] cli/docs: Remove set-eacl mention Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-cli/docs/sessions.md | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/frostfs-cli/docs/sessions.md b/cmd/frostfs-cli/docs/sessions.md index 04563b7af..52c0e9b9b 100644 --- a/cmd/frostfs-cli/docs/sessions.md +++ b/cmd/frostfs-cli/docs/sessions.md @@ -72,4 +72,3 @@ All other `object` sub-commands support only static sessions (2). List of commands supporting sessions (static only): - `create` - `delete` -- `set-eacl` From 16830033f82cfa2936a721b22ab65469896ea240 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 11 Nov 2024 09:41:55 +0300 Subject: [PATCH 161/591] [#1483] cli: Remove --basic-acl flag Signed-off-by: Evgenii Stratonikov --- README.md | 2 +- cmd/frostfs-cli/modules/container/create.go | 10 ---------- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/README.md b/README.md index 6998b6726..0109ed0e5 100644 --- a/README.md +++ b/README.md @@ -98,7 +98,7 @@ See `frostfs-contract`'s README.md for build instructions. 4. To create container and put object into it run (container and object IDs will be different): ``` -./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --basic-acl public-read-write --await +./bin/frostfs-cli container create -r 127.0.0.1:8080 --wallet ./dev/wallet.json --policy "REP 1 IN X CBF 1 SELECT 1 FROM * AS X" --await Enter password > <- press ENTER, the is no password for wallet CID: CfPhEuHQ2PRvM4gfBQDC4dWZY3NccovyfcnEdiq2ixju diff --git a/cmd/frostfs-cli/modules/container/create.go b/cmd/frostfs-cli/modules/container/create.go index 017f4b5ce..30f995180 100644 --- a/cmd/frostfs-cli/modules/container/create.go +++ b/cmd/frostfs-cli/modules/container/create.go @@ -15,14 +15,12 @@ import ( containerApi "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" ) var ( - containerACL string containerPolicy string containerAttributes []string containerAwait bool @@ -89,9 +87,6 @@ It will be stored in sidechain when inner ring will accepts it.`, err = parseAttributes(&cnr, containerAttributes) commonCmd.ExitOnErr(cmd, "", err) - var basicACL acl.Basic - commonCmd.ExitOnErr(cmd, "decode basic ACL string: %w", basicACL.DecodeString(containerACL)) - tok := getSession(cmd) if tok != nil { @@ -105,7 +100,6 @@ It will be stored in sidechain when inner ring will accepts it.`, } cnr.SetPlacementPolicy(*placementPolicy) - cnr.SetBasicACL(basicACL) var syncContainerPrm internalclient.SyncContainerPrm syncContainerPrm.SetClient(cli) @@ -163,10 +157,6 @@ func initContainerCreateCmd() { flags.DurationP(commonflags.Timeout, commonflags.TimeoutShorthand, commonflags.TimeoutDefault, commonflags.TimeoutUsage) flags.StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, commonflags.WalletPathDefault, commonflags.WalletPathUsage) flags.StringP(commonflags.Account, commonflags.AccountShorthand, commonflags.AccountDefault, commonflags.AccountUsage) - - flags.StringVar(&containerACL, "basic-acl", acl.NamePrivate, fmt.Sprintf("HEX encoded basic ACL value or keywords like '%s', '%s', '%s'", - acl.NamePublicRW, acl.NamePrivate, acl.NamePublicROExtended, - )) flags.StringVarP(&containerPolicy, "policy", "p", "", "QL-encoded or JSON-encoded placement policy or path to file with it") flags.StringSliceVarP(&containerAttributes, "attributes", "a", nil, "Comma separated pairs of container attributes in form of Key1=Value1,Key2=Value2") flags.BoolVar(&containerAwait, "await", false, "Block execution until container is persisted") From 9bd05e94c8e1dee1a63d343938abf77cc40eb51d Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 30 Oct 2024 11:02:52 +0300 Subject: [PATCH 162/591] [#1449] tree: Add ApplyBatch method Concurrent Apply can lead to child node applies before parent, so undo/redo operations will perform. This leads to performance degradation in case of tree with many sublevels. Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config/tree/config.go | 13 ++++ cmd/frostfs-node/config/tree/config_test.go | 1 + cmd/frostfs-node/tree.go | 1 + config/example/node.env | 1 + config/example/node.json | 1 + config/example/node.yaml | 1 + pkg/local_object_storage/engine/tree.go | 28 +++++++ pkg/local_object_storage/pilorama/boltdb.go | 74 +++++++++++++++++++ pkg/local_object_storage/pilorama/forest.go | 9 +++ .../pilorama/interface.go | 2 + pkg/local_object_storage/shard/tree.go | 27 +++++++ pkg/services/tree/options.go | 7 ++ pkg/services/tree/replicator.go | 1 + pkg/services/tree/service.go | 1 + pkg/services/tree/sync.go | 34 ++++----- 15 files changed, 180 insertions(+), 21 deletions(-) diff --git a/cmd/frostfs-node/config/tree/config.go b/cmd/frostfs-node/config/tree/config.go index 8a8919999..da877791e 100644 --- a/cmd/frostfs-node/config/tree/config.go +++ b/cmd/frostfs-node/config/tree/config.go @@ -10,6 +10,8 @@ import ( const ( subsection = "tree" + + SyncBatchSizeDefault = 1000 ) // TreeConfig is a wrapper over "tree" config section @@ -74,6 +76,17 @@ func (c TreeConfig) SyncInterval() time.Duration { return config.DurationSafe(c.cfg, "sync_interval") } +// SyncBatchSize returns the value of "sync_batch_size" +// config parameter from the "tree" section. +// +// Returns `SyncBatchSizeDefault` if config value is not specified. +func (c TreeConfig) SyncBatchSize() int { + if v := config.IntSafe(c.cfg, "sync_batch_size"); v > 0 { + return int(v) + } + return SyncBatchSizeDefault +} + // AuthorizedKeys parses and returns an array of "authorized_keys" config // parameter from "tree" section. // diff --git a/cmd/frostfs-node/config/tree/config_test.go b/cmd/frostfs-node/config/tree/config_test.go index 285ea0725..6628b8878 100644 --- a/cmd/frostfs-node/config/tree/config_test.go +++ b/cmd/frostfs-node/config/tree/config_test.go @@ -44,6 +44,7 @@ func TestTreeSection(t *testing.T) { require.Equal(t, 32, treeSec.ReplicationWorkerCount()) require.Equal(t, 5*time.Second, treeSec.ReplicationTimeout()) require.Equal(t, time.Hour, treeSec.SyncInterval()) + require.Equal(t, 2000, treeSec.SyncBatchSize()) require.Equal(t, expectedKeys, treeSec.AuthorizedKeys()) } diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index f188e2fbc..a92979daf 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -62,6 +62,7 @@ func initTreeService(c *cfg) { tree.WithReplicationTimeout(treeConfig.ReplicationTimeout()), tree.WithReplicationChannelCapacity(treeConfig.ReplicationChannelCapacity()), tree.WithReplicationWorkerCount(treeConfig.ReplicationWorkerCount()), + tree.WithSyncBatchSize(treeConfig.SyncBatchSize()), tree.WithAuthorizedKeys(treeConfig.AuthorizedKeys()), tree.WithMetrics(c.metricsCollector.TreeService()), tree.WithAPELocalOverrideStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage()), diff --git a/config/example/node.env b/config/example/node.env index 3979eb18f..e21328b60 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -31,6 +31,7 @@ FROSTFS_TREE_REPLICATION_CHANNEL_CAPACITY=32 FROSTFS_TREE_REPLICATION_WORKER_COUNT=32 FROSTFS_TREE_REPLICATION_TIMEOUT=5s FROSTFS_TREE_SYNC_INTERVAL=1h +FROSTFS_TREE_SYNC_BATCH_SIZE=2000 FROSTFS_TREE_AUTHORIZED_KEYS="0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56" # gRPC section diff --git a/config/example/node.json b/config/example/node.json index 1ea28de6c..ac7125949 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -69,6 +69,7 @@ "replication_worker_count": 32, "replication_timeout": "5s", "sync_interval": "1h", + "sync_batch_size": 2000, "authorized_keys": [ "0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0", "02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56" diff --git a/config/example/node.yaml b/config/example/node.yaml index 4a418dfcb..d547f5cee 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -59,6 +59,7 @@ tree: replication_channel_capacity: 32 replication_timeout: 5s sync_interval: 1h + sync_batch_size: 2000 authorized_keys: # list of hex-encoded public keys that have rights to use the Tree Service with frostfs-cli - 0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0 - 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56 diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go index 39122628f..6bb5e3a41 100644 --- a/pkg/local_object_storage/engine/tree.go +++ b/pkg/local_object_storage/engine/tree.go @@ -110,6 +110,34 @@ func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID str return nil } +// TreeApplyBatch implements the pilorama.Forest interface. +func (e *StorageEngine) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeApplyBatch", + trace.WithAttributes( + attribute.String("container_id", cnr.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + + index, lst, err := e.getTreeShard(ctx, cnr, treeID) + if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { + return err + } + + err = lst[index].TreeApplyBatch(ctx, cnr, treeID, m) + if err != nil { + if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { + e.reportShardError(lst[index], "can't perform `TreeApplyBatch`", err, + zap.Stringer("cid", cnr), + zap.String("tree", treeID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + } + return err + } + return nil +} + // TreeGetByPath implements the pilorama.Forest interface. func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeGetByPath", diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 7bce1f340..a778434dd 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -558,6 +558,80 @@ func (t *boltForest) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string return metaerr.Wrap(err) } +func (t *boltForest) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error { + var ( + startedAt = time.Now() + success = false + ) + defer func() { + t.metrics.AddMethodDuration("TreeApplyBatch", time.Since(startedAt), success) + }() + + _, span := tracing.StartSpanFromContext(ctx, "boltForest.TreeApplyBatch", + trace.WithAttributes( + attribute.String("container_id", cnr.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + + m, err := t.filterSeen(cnr, treeID, m) + if err != nil { + return err + } + if len(m) == 0 { + success = true + return nil + } + + ch := make(chan error) + b := &batch{ + forest: t, + cid: cnr, + treeID: treeID, + results: []chan<- error{ch}, + operations: m, + } + go func() { + b.run() + }() + err = <-ch + success = err == nil + return metaerr.Wrap(err) +} + +func (t *boltForest) filterSeen(cnr cidSDK.ID, treeID string, m []*Move) ([]*Move, error) { + t.modeMtx.RLock() + defer t.modeMtx.RUnlock() + + if t.mode.NoMetabase() { + return nil, ErrDegradedMode + } + + ops := make([]*Move, 0, len(m)) + err := t.db.View(func(tx *bbolt.Tx) error { + treeRoot := tx.Bucket(bucketName(cnr, treeID)) + if treeRoot == nil { + ops = m + return nil + } + b := treeRoot.Bucket(logBucket) + for _, op := range m { + var logKey [8]byte + binary.BigEndian.PutUint64(logKey[:], op.Time) + seen := b.Get(logKey[:]) != nil + if !seen { + ops = append(ops, op) + } + } + return nil + }) + if err != nil { + return nil, metaerr.Wrap(err) + } + return ops, nil +} + // TreeApplyStream should be used with caution: this method locks other write transactions while `source` is not closed. func (t *boltForest) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID string, source <-chan *Move) error { var ( diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index bb5c22e51..374943745 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -111,6 +111,15 @@ func (f *memoryForest) TreeApply(_ context.Context, cnr cid.ID, treeID string, o return s.Apply(op) } +func (f *memoryForest) TreeApplyBatch(ctx context.Context, cnr cid.ID, treeID string, ops []*Move) error { + for _, op := range ops { + if err := f.TreeApply(ctx, cnr, treeID, op, true); err != nil { + return err + } + } + return nil +} + func (f *memoryForest) Init() error { return nil } diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go index 61a3849bf..b6ca246f2 100644 --- a/pkg/local_object_storage/pilorama/interface.go +++ b/pkg/local_object_storage/pilorama/interface.go @@ -21,6 +21,8 @@ type Forest interface { // TreeApply applies replicated operation from another node. // If background is true, TreeApply will first check whether an operation exists. TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m *Move, backgroundSync bool) error + // TreeApplyBatch applies replicated operations from another node. + TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*Move) error // TreeGetByPath returns all nodes corresponding to the path. // The path is constructed by descending from the root using the values of the // AttributeFilename in meta. diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go index 26dc8ec1e..01a014cec 100644 --- a/pkg/local_object_storage/shard/tree.go +++ b/pkg/local_object_storage/shard/tree.go @@ -106,6 +106,33 @@ func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m * return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync) } +// TreeApplyBatch implements the pilorama.Forest interface. +func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string, m []*pilorama.Move) error { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeApplyBatch", + trace.WithAttributes( + attribute.String("shard_id", s.ID().String()), + attribute.String("container_id", cnr.EncodeToString()), + attribute.String("tree_id", treeID), + ), + ) + defer span.End() + + if s.pilorama == nil { + return ErrPiloramaDisabled + } + + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.ReadOnly() { + return ErrReadOnlyMode + } + if s.info.Mode.NoMetabase() { + return ErrDegradedMode + } + return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m) +} + // TreeGetByPath implements the pilorama.Forest interface. func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, attr string, path []string, latest bool) ([]pilorama.Node, error) { ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeGetByPath", diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go index 1633ae557..a3f488009 100644 --- a/pkg/services/tree/options.go +++ b/pkg/services/tree/options.go @@ -41,6 +41,7 @@ type cfg struct { replicatorTimeout time.Duration containerCacheSize int authorizedKeys [][]byte + syncBatchSize int localOverrideStorage policyengine.LocalOverrideStorage morphChainStorage policyengine.MorphRuleChainStorageReader @@ -113,6 +114,12 @@ func WithReplicationWorkerCount(n int) Option { } } +func WithSyncBatchSize(n int) Option { + return func(c *cfg) { + c.syncBatchSize = n + } +} + func WithContainerCacheSize(n int) Option { return func(c *cfg) { if n > 0 { diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index 95c8f8013..84e376cf7 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -40,6 +40,7 @@ const ( defaultReplicatorCapacity = 64 defaultReplicatorWorkerCount = 64 defaultReplicatorSendTimeout = time.Second * 5 + defaultSyncBatchSize = 1000 ) func (s *Service) localReplicationWorker(ctx context.Context) { diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 8097d545c..b63338d25 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -55,6 +55,7 @@ func New(opts ...Option) *Service { s.replicatorChannelCapacity = defaultReplicatorCapacity s.replicatorWorkerCount = defaultReplicatorWorkerCount s.replicatorTimeout = defaultReplicatorSendTimeout + s.syncBatchSize = defaultSyncBatchSize s.metrics = defaultMetricsRegister{} for i := range opts { diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index ce1e72104..b93410616 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -177,37 +177,29 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string, operationStream <-chan *pilorama.Move, ) uint64 { - errGroup, _ := errgroup.WithContext(ctx) - const workersCount = 1024 - errGroup.SetLimit(workersCount) - - // We run TreeApply concurrently for the operation batch. Let's consider two operations - // in the batch m1 and m2 such that m1.Time < m2.Time. The engine may apply m2 and fail - // on m1. That means the service must start sync from m1.Time in the next iteration and - // this height is stored in unappliedOperationHeight. - var unappliedOperationHeight uint64 = math.MaxUint64 - var heightMtx sync.Mutex - var prev *pilorama.Move + var batch []*pilorama.Move for m := range operationStream { // skip already applied op if prev != nil && prev.Time == m.Time { continue } prev = m + batch = append(batch, m) - errGroup.Go(func() error { - if err := s.forest.TreeApply(ctx, cid, treeID, m, true); err != nil { - heightMtx.Lock() - unappliedOperationHeight = min(unappliedOperationHeight, m.Time) - heightMtx.Unlock() - return err + if len(batch) == s.syncBatchSize { + if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil { + return batch[0].Time } - return nil - }) + batch = batch[:0] + } } - _ = errGroup.Wait() - return unappliedOperationHeight + if len(batch) > 0 { + if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil { + return batch[0].Time + } + } + return math.MaxUint64 } func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, From 46fef276b474ba5c5ee66d0dee2a7595d47bc4f0 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 11 Nov 2024 12:12:45 +0300 Subject: [PATCH 163/591] [#1449] tree: Log tree sync with Info level Signed-off-by: Dmitrii Stepanov --- pkg/services/tree/sync.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index b93410616..2c6deeb78 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -376,7 +376,7 @@ func (s *Service) syncLoop(ctx context.Context) { return case <-s.syncChan: ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync") - s.log.Debug(logs.TreeSyncingTrees) + s.log.Info(logs.TreeSyncingTrees) start := time.Now() @@ -394,7 +394,7 @@ func (s *Service) syncLoop(ctx context.Context) { s.removeContainers(ctx, newMap) - s.log.Debug(logs.TreeTreesHaveBeenSynchronized) + s.log.Info(logs.TreeTreesHaveBeenSynchronized) s.metrics.AddSyncDuration(time.Since(start), true) span.End() From c00f4bab18854b88f8c87d5b3dfff859d7505a26 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 12 Nov 2024 11:32:33 +0300 Subject: [PATCH 164/591] [#1488] go.mod: Bump observability version Signed-off-by: Dmitrii Stepanov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8a70c3819..84f9f9a66 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d - git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 + git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 diff --git a/go.sum b/go.sum index 9778f91e2..d93e9b74e 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSV git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823 h1:sepm9FeuoInmygH1K/+3L+Yp5bJhGiVi/oGCH6Emp2c= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823/go.mod h1:eoK7+KZQ9GJxbzIs6vTnoUJqFDppavInLRHaN4MYgZg= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= From e2658c75194fb8b04b0ec763dda92bfa94967e55 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 12 Nov 2024 11:33:23 +0300 Subject: [PATCH 165/591] [#1488] tracing: KV attributes for spans from config Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config/tracing/config.go | 23 +++++++++- .../config/tracing/config_test.go | 46 +++++++++++++++++++ config/example/node.env | 4 ++ config/example/node.json | 14 +++++- config/example/node.yaml | 5 ++ 5 files changed, 89 insertions(+), 3 deletions(-) create mode 100644 cmd/frostfs-node/config/tracing/config_test.go diff --git a/cmd/frostfs-node/config/tracing/config.go b/cmd/frostfs-node/config/tracing/config.go index 8544c672c..91ef669ee 100644 --- a/cmd/frostfs-node/config/tracing/config.go +++ b/cmd/frostfs-node/config/tracing/config.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" "git.frostfs.info/TrueCloudLab/frostfs-node/misc" @@ -24,6 +25,7 @@ func ToTracingConfig(c *config.Config) (*tracing.Config, error) { Service: "frostfs-node", InstanceID: getInstanceIDOrDefault(c), Version: misc.Version, + Attributes: make(map[string]string), } if trustedCa := config.StringSafe(c.Sub(subsection), "trusted_ca"); trustedCa != "" { @@ -38,11 +40,30 @@ func ToTracingConfig(c *config.Config) (*tracing.Config, error) { } conf.ServerCaCertPool = certPool } + + i := uint64(0) + for ; ; i++ { + si := strconv.FormatUint(i, 10) + ac := c.Sub(subsection).Sub("attributes").Sub(si) + k := config.StringSafe(ac, "key") + if k == "" { + break + } + v := config.StringSafe(ac, "value") + if v == "" { + return nil, fmt.Errorf("empty tracing attribute value for key %s", k) + } + if _, ok := conf.Attributes[k]; ok { + return nil, fmt.Errorf("tracing attribute key %s defined more than once", k) + } + conf.Attributes[k] = v + } + return conf, nil } func getInstanceIDOrDefault(c *config.Config) string { - s := config.StringSlice(c.Sub("node"), "addresses") + s := config.StringSliceSafe(c.Sub("node"), "addresses") if len(s) > 0 { return s[0] } diff --git a/cmd/frostfs-node/config/tracing/config_test.go b/cmd/frostfs-node/config/tracing/config_test.go new file mode 100644 index 000000000..8e485ca6e --- /dev/null +++ b/cmd/frostfs-node/config/tracing/config_test.go @@ -0,0 +1,46 @@ +package tracing + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "github.com/stretchr/testify/require" +) + +func TestTracingSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + tc, err := ToTracingConfig(configtest.EmptyConfig()) + require.NoError(t, err) + require.Equal(t, false, tc.Enabled) + require.Equal(t, tracing.Exporter(""), tc.Exporter) + require.Equal(t, "", tc.Endpoint) + require.Equal(t, "frostfs-node", tc.Service) + require.Equal(t, "", tc.InstanceID) + require.Nil(t, tc.ServerCaCertPool) + require.Empty(t, tc.Attributes) + }) + + const path = "../../../../config/example/node" + + fileConfigTest := func(c *config.Config) { + tc, err := ToTracingConfig(c) + require.NoError(t, err) + require.Equal(t, true, tc.Enabled) + require.Equal(t, tracing.OTLPgRPCExporter, tc.Exporter) + require.Equal(t, "localhost", tc.Endpoint) + require.Equal(t, "frostfs-node", tc.Service) + require.Nil(t, tc.ServerCaCertPool) + require.EqualValues(t, map[string]string{ + "key0": "value", + "key1": "value", + }, tc.Attributes) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) +} diff --git a/config/example/node.env b/config/example/node.env index e21328b60..f470acf3e 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -203,6 +203,10 @@ FROSTFS_TRACING_ENABLED=true FROSTFS_TRACING_ENDPOINT="localhost" FROSTFS_TRACING_EXPORTER="otlp_grpc" FROSTFS_TRACING_TRUSTED_CA="" +FROSTFS_TRACING_ATTRIBUTES_0_KEY=key0 +FROSTFS_TRACING_ATTRIBUTES_0_VALUE=value +FROSTFS_TRACING_ATTRIBUTES_1_KEY=key1 +FROSTFS_TRACING_ATTRIBUTES_1_VALUE=value FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824 diff --git a/config/example/node.json b/config/example/node.json index ac7125949..dba3bad8b 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -259,9 +259,19 @@ }, "tracing": { "enabled": true, - "endpoint": "localhost:9090", + "endpoint": "localhost", "exporter": "otlp_grpc", - "trusted_ca": "/etc/ssl/tracing.pem" + "trusted_ca": "", + "attributes":[ + { + "key": "key0", + "value": "value" + }, + { + "key": "key1", + "value": "value" + } + ] }, "runtime": { "soft_memory_limit": 1073741824 diff --git a/config/example/node.yaml b/config/example/node.yaml index d547f5cee..8f9300b4a 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -239,6 +239,11 @@ tracing: exporter: "otlp_grpc" endpoint: "localhost" trusted_ca: "" + attributes: + - key: key0 + value: value + - key: key1 + value: value runtime: soft_memory_limit: 1gb From e122ff6013f63898471ecb95c06073d925f50698 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 12 Nov 2024 11:34:58 +0300 Subject: [PATCH 166/591] [#1488] dev: Add Jaeger image and enable tracing on debug Signed-off-by: Dmitrii Stepanov --- dev/.vscode-example/launch.json | 28 ++++++++++++++++++++++++---- dev/docker-compose.yml | 12 ++++++++++++ 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/dev/.vscode-example/launch.json b/dev/.vscode-example/launch.json index 990fd42a8..6abf5ecdc 100644 --- a/dev/.vscode-example/launch.json +++ b/dev/.vscode-example/launch.json @@ -78,7 +78,12 @@ "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s1/pilorama1", "FROSTFS_PROMETHEUS_ENABLED":"true", "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9090", - "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s" + "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s", + "FROSTFS_TRACING_ENABLED":"true", + "FROSTFS_TRACING_EXPORTER":"otlp_grpc", + "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317", + "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip", + "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8080" }, "postDebugTask": "env-down" }, @@ -129,7 +134,12 @@ "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s2/pilorama1", "FROSTFS_PROMETHEUS_ENABLED":"true", "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9091", - "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s" + "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s", + "FROSTFS_TRACING_ENABLED":"true", + "FROSTFS_TRACING_EXPORTER":"otlp_grpc", + "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317", + "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip", + "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8082" }, "postDebugTask": "env-down" }, @@ -180,7 +190,12 @@ "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s3/pilorama1", "FROSTFS_PROMETHEUS_ENABLED":"true", "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9092", - "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s" + "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s", + "FROSTFS_TRACING_ENABLED":"true", + "FROSTFS_TRACING_EXPORTER":"otlp_grpc", + "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317", + "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip", + "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8084" }, "postDebugTask": "env-down" }, @@ -231,7 +246,12 @@ "FROSTFS_STORAGE_SHARD_1_PILORAMA_PATH":"${workspaceFolder}/.cache/storage/s4/pilorama1", "FROSTFS_PROMETHEUS_ENABLED":"true", "FROSTFS_PROMETHEUS_ADDRESS":"127.0.0.1:9093", - "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s" + "FROSTFS_PROMETHEUS_SHUTDOWN_TIMEOUT":"15s", + "FROSTFS_TRACING_ENABLED":"true", + "FROSTFS_TRACING_EXPORTER":"otlp_grpc", + "FROSTFS_TRACING_ENDPOINT":"127.0.0.1:4317", + "FROSTFS_TRACING_ATTRIBUTES_0_KEY":"host.ip", + "FROSTFS_TRACING_ATTRIBUTES_0_VALUE":"127.0.0.1:8086" }, "postDebugTask": "env-down" } diff --git a/dev/docker-compose.yml b/dev/docker-compose.yml index be1956e65..40ed35aeb 100644 --- a/dev/docker-compose.yml +++ b/dev/docker-compose.yml @@ -14,3 +14,15 @@ services: - ./neo-go/node-wallet.json:/wallets/node-wallet.json - ./neo-go/config.yml:/wallets/config.yml - ./neo-go/wallet.json:/wallets/wallet.json + jaeger: + image: jaegertracing/all-in-one:latest + container_name: jaeger + ports: + - '4317:4317' #OTLP over gRPC + - '4318:4318' #OTLP over HTTP + - '16686:16686' #frontend + stop_signal: SIGKILL + environment: + - COLLECTOR_OTLP_ENABLED=true + - SPAN_STORAGE_TYPE=badger + - BADGER_EPHEMERAL=true From f1556e3c42764499c2a7f4f9c75f8b13a7d643d7 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 12 Nov 2024 11:35:20 +0300 Subject: [PATCH 167/591] [#1488] Makefile: Drop all containers created on env-up Signed-off-by: Dmitrii Stepanov --- Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 68a31febe..c54601c73 100755 --- a/Makefile +++ b/Makefile @@ -282,7 +282,6 @@ env-up: all # Shutdown dev environment env-down: - docker compose -f dev/docker-compose.yml down - docker volume rm -f frostfs-node_neo-go + docker compose -f dev/docker-compose.yml down -v rm -rf ./$(TMP_DIR)/state rm -rf ./$(TMP_DIR)/storage From b451de94c81a7a40f2afa712dd56f9c84f5be1c1 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 13 Nov 2024 10:02:38 +0300 Subject: [PATCH 168/591] [#1492] metabase: Fix typo in objData Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/metabase/get.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go index 776f5d27c..6a35d47f3 100644 --- a/pkg/local_object_storage/metabase/get.go +++ b/pkg/local_object_storage/metabase/get.go @@ -216,10 +216,10 @@ func getECInfoError(tx *bbolt.Tx, cnr cid.ID, data []byte) error { ecInfo := objectSDK.NewECInfo() for _, key := range keys { // check in primary index - ojbData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key) - if len(ojbData) != 0 { + objData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key) + if len(objData) != 0 { obj := objectSDK.New() - if err := obj.Unmarshal(ojbData); err != nil { + if err := obj.Unmarshal(objData); err != nil { return err } chunk := objectSDK.ECChunk{} From 8ed7a676d50e24489e2abeb5269d6eb3332df1f8 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 13 Nov 2024 10:07:32 +0300 Subject: [PATCH 169/591] [#1492] metabase: Ensure Unmarshal() is called on a cloned slice The slice returned from bucket.Get() is only valid during the tx lifetime. Cloning it is not necessary everywhere, but better safe than sorry. Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/metabase/exists.go | 3 ++- pkg/local_object_storage/metabase/get.go | 4 ++-- pkg/local_object_storage/metabase/iterators.go | 3 ++- pkg/local_object_storage/metabase/list.go | 4 ++-- pkg/local_object_storage/metabase/put.go | 3 ++- 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go index 2e1b1dce8..0294dd3ba 100644 --- a/pkg/local_object_storage/metabase/exists.go +++ b/pkg/local_object_storage/metabase/exists.go @@ -1,6 +1,7 @@ package meta import ( + "bytes" "context" "fmt" "time" @@ -215,7 +216,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e splitInfo := objectSDK.NewSplitInfo() - err := splitInfo.Unmarshal(rawSplitInfo) + err := splitInfo.Unmarshal(bytes.Clone(rawSplitInfo)) if err != nil { return nil, fmt.Errorf("can't unmarshal split info from root index: %w", err) } diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go index 6a35d47f3..1cbf78ab2 100644 --- a/pkg/local_object_storage/metabase/get.go +++ b/pkg/local_object_storage/metabase/get.go @@ -185,7 +185,7 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD child := objectSDK.New() - err = child.Unmarshal(data) + err = child.Unmarshal(bytes.Clone(data)) if err != nil { return nil, fmt.Errorf("can't unmarshal child with parent: %w", err) } @@ -219,7 +219,7 @@ func getECInfoError(tx *bbolt.Tx, cnr cid.ID, data []byte) error { objData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key) if len(objData) != 0 { obj := objectSDK.New() - if err := obj.Unmarshal(objData); err != nil { + if err := obj.Unmarshal(bytes.Clone(objData)); err != nil { return err } chunk := objectSDK.ECChunk{} diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go index d44c51fb2..1809bfd2d 100644 --- a/pkg/local_object_storage/metabase/iterators.go +++ b/pkg/local_object_storage/metabase/iterators.go @@ -1,6 +1,7 @@ package meta import ( + "bytes" "context" "errors" "fmt" @@ -195,7 +196,7 @@ func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) e } return b.ForEach(func(k, v []byte) error { - if oid.Decode(k) == nil && obj.Unmarshal(v) == nil { + if oid.Decode(k) == nil && obj.Unmarshal(bytes.Clone(v)) == nil { return f(cid, oid, obj) } diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index b007ef0da..a7ff2222f 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -247,7 +247,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket var ecInfo *objectcore.ECInfo if objType == objectSDK.TypeRegular { var o objectSDK.Object - if err := o.Unmarshal(v); err != nil { + if err := o.Unmarshal(bytes.Clone(v)); err != nil { return nil, nil, nil, err } isLinkingObj = isLinkObject(&o) @@ -413,7 +413,7 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, p var ecInfo *objectcore.ECInfo if prm.ObjectType == objectSDK.TypeRegular { var o objectSDK.Object - if err := o.Unmarshal(v); err != nil { + if err := o.Unmarshal(bytes.Clone(v)); err != nil { return err } isLinkingObj = isLinkObject(&o) diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index 09c5e04ad..2d94e7ae1 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -1,6 +1,7 @@ package meta import ( + "bytes" "context" "encoding/binary" "errors" @@ -313,7 +314,7 @@ func updateSplitInfoIndex(tx *bbolt.Tx, objKey []byte, cnr cid.ID, bucketName [] return si.Marshal() default: oldSI := objectSDK.NewSplitInfo() - if err := oldSI.Unmarshal(old); err != nil { + if err := oldSI.Unmarshal(bytes.Clone(old)); err != nil { return nil, err } si = util.MergeSplitInfo(si, oldSI) From fd004add00fd35924e70dd450221dc74bc285cb9 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 13 Nov 2024 10:08:17 +0300 Subject: [PATCH 170/591] [#1492] metabase: Fix import formatting Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/metabase/iterators.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go index 1809bfd2d..5d42e4125 100644 --- a/pkg/local_object_storage/metabase/iterators.go +++ b/pkg/local_object_storage/metabase/iterators.go @@ -13,7 +13,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" "go.opentelemetry.io/otel/attribute" From c16dae8b4d76c55037a56d9382d359fa91dc27f6 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 18 Oct 2024 13:31:53 +0300 Subject: [PATCH 171/591] [#1437] logger: Use context to log trace id Signed-off-by: Dmitrii Stepanov --- pkg/util/logger/log.go | 40 ++++++++++++++++++++++++++++++++++ pkg/util/logger/logger.go | 16 +++++++++++--- pkg/util/logger/test/logger.go | 11 +++++----- 3 files changed, 59 insertions(+), 8 deletions(-) create mode 100644 pkg/util/logger/log.go diff --git a/pkg/util/logger/log.go b/pkg/util/logger/log.go new file mode 100644 index 000000000..269e07d90 --- /dev/null +++ b/pkg/util/logger/log.go @@ -0,0 +1,40 @@ +package logger + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" + "go.uber.org/zap" +) + +func (l *Logger) Debug(ctx context.Context, msg string, fields ...zap.Field) { + if traceID := tracing.GetTraceID(ctx); traceID != "" { + l.z.Debug(msg, append(fields, zap.String("trace_id", traceID))...) + return + } + l.z.Debug(msg, fields...) +} + +func (l *Logger) Info(ctx context.Context, msg string, fields ...zap.Field) { + if traceID := tracing.GetTraceID(ctx); traceID != "" { + l.z.Info(msg, append(fields, zap.String("trace_id", traceID))...) + return + } + l.z.Info(msg, fields...) +} + +func (l *Logger) Warn(ctx context.Context, msg string, fields ...zap.Field) { + if traceID := tracing.GetTraceID(ctx); traceID != "" { + l.z.Warn(msg, append(fields, zap.String("trace_id", traceID))...) + return + } + l.z.Warn(msg, fields...) +} + +func (l *Logger) Error(ctx context.Context, msg string, fields ...zap.Field) { + if traceID := tracing.GetTraceID(ctx); traceID != "" { + l.z.Error(msg, append(fields, zap.String("trace_id", traceID))...) + return + } + l.z.Error(msg, fields...) +} diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index 4b60f02de..d4ac2ab02 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -12,7 +12,7 @@ import ( // Logger represents a component // for writing messages to log. type Logger struct { - *zap.Logger + z *zap.Logger lvl zap.AtomicLevel } @@ -136,7 +136,7 @@ func newConsoleLogger(prm *Prm) (*Logger, error) { return nil, err } - l := &Logger{Logger: lZap, lvl: lvl} + l := &Logger{z: lZap, lvl: lvl} prm._log = l return l, nil @@ -169,7 +169,7 @@ func newJournaldLogger(prm *Prm) (*Logger, error) { lZap := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))) - l := &Logger{Logger: lZap, lvl: lvl} + l := &Logger{z: lZap, lvl: lvl} prm._log = l return l, nil @@ -179,3 +179,13 @@ func (l *Logger) reload(prm Prm) error { l.lvl.SetLevel(prm.level) return nil } + +func (l *Logger) WithOptions(options ...zap.Option) { + l.z = l.z.WithOptions(options...) +} + +func NewLoggerWrapper(z *zap.Logger) *Logger { + return &Logger{ + z: z, + } +} diff --git a/pkg/util/logger/test/logger.go b/pkg/util/logger/test/logger.go index f93756d17..b5b0a31eb 100644 --- a/pkg/util/logger/test/logger.go +++ b/pkg/util/logger/test/logger.go @@ -11,9 +11,10 @@ import ( // NewLogger creates a new logger. func NewLogger(t testing.TB) *logger.Logger { - var l logger.Logger - l.Logger = zaptest.NewLogger(t, - zaptest.Level(zapcore.DebugLevel), - zaptest.WrapOptions(zap.Development(), zap.AddCaller())) - return &l + return logger.NewLoggerWrapper( + zaptest.NewLogger(t, + zaptest.Level(zapcore.DebugLevel), + zaptest.WrapOptions(zap.Development(), zap.AddCaller()), + ), + ) } From 6db46257c0d30ddba71138de248aa86c18611730 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 21 Oct 2024 10:22:54 +0300 Subject: [PATCH 172/591] [#1437] node: Use ctx for logging Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-ir/config.go | 25 +++---- cmd/frostfs-ir/httpcomponent.go | 15 ++-- cmd/frostfs-ir/main.go | 12 ++-- cmd/frostfs-ir/pprof.go | 7 +- cmd/frostfs-node/config.go | 68 +++++++++--------- cmd/frostfs-node/container.go | 6 +- cmd/frostfs-node/control.go | 6 +- cmd/frostfs-node/grpc.go | 29 ++++---- cmd/frostfs-node/httpcomponent.go | 4 +- cmd/frostfs-node/main.go | 20 +++--- cmd/frostfs-node/morph.go | 22 +++--- cmd/frostfs-node/netmap.go | 10 +-- cmd/frostfs-node/object.go | 6 +- cmd/frostfs-node/runtime.go | 5 +- cmd/frostfs-node/tracing.go | 6 +- cmd/frostfs-node/tree.go | 10 +-- internal/audit/request.go | 4 +- pkg/core/object/fmt_test.go | 12 ++-- pkg/core/object/sender_classifier.go | 5 +- pkg/innerring/initialization.go | 12 ++-- pkg/innerring/innerring.go | 14 ++-- pkg/innerring/notary.go | 8 +-- pkg/innerring/processors/alphabet/handlers.go | 6 +- .../processors/alphabet/process_emit.go | 19 ++--- .../processors/alphabet/processor.go | 3 +- pkg/innerring/processors/balance/handlers.go | 5 +- .../processors/balance/process_assets.go | 6 +- pkg/innerring/processors/balance/processor.go | 3 +- .../processors/container/handlers.go | 9 +-- .../processors/container/process_container.go | 13 ++-- .../processors/container/processor.go | 3 +- pkg/innerring/processors/frostfs/handlers.go | 17 ++--- .../processors/frostfs/process_assets.go | 24 ++++--- .../processors/frostfs/process_config.go | 6 +- pkg/innerring/processors/frostfs/processor.go | 3 +- .../processors/governance/handlers.go | 6 +- .../processors/governance/process_update.go | 29 ++++---- pkg/innerring/processors/netmap/handlers.go | 23 +++--- .../processors/netmap/process_cleanup.go | 12 ++-- .../processors/netmap/process_epoch.go | 16 +++-- .../processors/netmap/process_peers.go | 19 ++--- pkg/innerring/processors/netmap/processor.go | 3 +- pkg/innerring/state.go | 15 ++-- .../blobovnicza/blobovnicza.go | 4 +- .../blobovnicza/control.go | 19 ++--- .../blobovnicza/delete.go | 2 +- .../blobstor/blobovniczatree/control.go | 6 +- .../blobstor/blobovniczatree/delete.go | 2 +- .../blobstor/blobovniczatree/exists.go | 2 +- .../blobstor/blobovniczatree/get.go | 2 +- .../blobstor/blobovniczatree/get_range.go | 2 +- .../blobstor/blobovniczatree/iterate.go | 4 +- .../blobstor/blobovniczatree/manager.go | 7 +- .../blobstor/blobovniczatree/option.go | 2 +- .../blobstor/blobovniczatree/put.go | 6 +- .../blobstor/blobovniczatree/rebuild.go | 50 ++++++------- pkg/local_object_storage/blobstor/blobstor.go | 4 +- pkg/local_object_storage/blobstor/control.go | 8 +-- pkg/local_object_storage/blobstor/delete.go | 4 +- pkg/local_object_storage/blobstor/exists.go | 2 +- .../blobstor/fstree/fstree.go | 6 +- .../blobstor/fstree/option.go | 2 +- pkg/local_object_storage/blobstor/iterate.go | 2 +- pkg/local_object_storage/blobstor/logger.go | 6 +- pkg/local_object_storage/blobstor/put.go | 2 +- pkg/local_object_storage/blobstor/rebuild.go | 4 +- pkg/local_object_storage/engine/control.go | 14 ++-- pkg/local_object_storage/engine/delete.go | 6 +- pkg/local_object_storage/engine/engine.go | 20 +++--- pkg/local_object_storage/engine/evacuate.go | 28 ++++---- pkg/local_object_storage/engine/get.go | 2 +- pkg/local_object_storage/engine/inhume.go | 24 +++---- pkg/local_object_storage/engine/put.go | 8 +-- pkg/local_object_storage/engine/range.go | 2 +- .../engine/remove_copies.go | 8 +-- pkg/local_object_storage/engine/shards.go | 16 ++--- pkg/local_object_storage/internal/log/log.go | 6 +- pkg/local_object_storage/metabase/control.go | 6 +- pkg/local_object_storage/metabase/db.go | 2 +- pkg/local_object_storage/metabase/delete.go | 2 +- pkg/local_object_storage/metabase/inhume.go | 2 +- pkg/local_object_storage/metabase/put.go | 2 +- .../metabase/upgrade_test.go | 10 +-- pkg/local_object_storage/shard/control.go | 16 ++--- pkg/local_object_storage/shard/delete.go | 6 +- pkg/local_object_storage/shard/gc.go | 70 +++++++++---------- pkg/local_object_storage/shard/get.go | 6 +- pkg/local_object_storage/shard/id.go | 3 +- pkg/local_object_storage/shard/inhume.go | 2 +- pkg/local_object_storage/shard/list.go | 2 +- pkg/local_object_storage/shard/lock_test.go | 2 +- pkg/local_object_storage/shard/mode.go | 6 +- pkg/local_object_storage/shard/put.go | 2 +- pkg/local_object_storage/shard/rebuild.go | 8 +-- pkg/local_object_storage/shard/shard.go | 10 +-- pkg/local_object_storage/shard/writecache.go | 6 +- pkg/local_object_storage/writecache/cache.go | 2 +- pkg/local_object_storage/writecache/delete.go | 2 +- pkg/local_object_storage/writecache/flush.go | 4 +- .../writecache/flush_test.go | 2 +- pkg/local_object_storage/writecache/mode.go | 4 +- .../writecache/options.go | 2 +- pkg/local_object_storage/writecache/put.go | 2 +- .../writecache/storage.go | 4 +- pkg/morph/client/client.go | 10 +-- pkg/morph/client/constructor.go | 6 +- pkg/morph/client/multi.go | 8 +-- pkg/morph/client/notary.go | 9 +-- pkg/morph/event/listener.go | 70 +++++++++---------- pkg/morph/event/utils.go | 3 +- pkg/morph/subscriber/subscriber.go | 10 +-- pkg/services/apemanager/executor.go | 2 +- pkg/services/object/acl/v2/service.go | 2 +- pkg/services/object/common/writer/common.go | 2 +- pkg/services/object/common/writer/ec.go | 8 +-- pkg/services/object/delete/delete.go | 6 +- pkg/services/object/delete/exec.go | 18 ++--- pkg/services/object/delete/local.go | 6 +- pkg/services/object/delete/service.go | 4 +- pkg/services/object/get/assemble.go | 10 +-- pkg/services/object/get/assembleec.go | 10 +-- pkg/services/object/get/assemblerec.go | 18 ++--- pkg/services/object/get/container.go | 12 ++-- pkg/services/object/get/get.go | 14 ++-- pkg/services/object/get/local.go | 2 +- pkg/services/object/get/remote.go | 4 +- pkg/services/object/get/request.go | 14 ++-- pkg/services/object/get/service.go | 4 +- pkg/services/object/get/v2/get_range_hash.go | 8 +-- pkg/services/object/get/v2/service.go | 4 +- pkg/services/object/put/service.go | 2 +- pkg/services/object/put/single.go | 2 +- pkg/services/object/search/container.go | 16 ++--- pkg/services/object/search/exec.go | 4 +- pkg/services/object/search/local.go | 2 +- pkg/services/object/search/search.go | 8 +-- pkg/services/object/search/service.go | 4 +- pkg/services/object/util/log.go | 6 +- .../object_manager/tombstone/checker.go | 4 +- .../object_manager/tombstone/constructor.go | 2 +- pkg/services/policer/check.go | 10 +-- pkg/services/policer/ec.go | 40 +++++------ pkg/services/policer/option.go | 2 +- pkg/services/policer/policer.go | 3 +- pkg/services/policer/process.go | 8 +-- pkg/services/replicator/process.go | 8 +-- pkg/services/replicator/pull.go | 8 +-- pkg/services/replicator/put.go | 6 +- pkg/services/replicator/replicator.go | 2 +- pkg/services/session/executor.go | 2 +- .../session/storage/persistent/options.go | 2 +- .../session/storage/persistent/storage.go | 7 +- pkg/services/tree/redirect.go | 2 +- pkg/services/tree/replicator.go | 8 +-- pkg/services/tree/service.go | 2 +- pkg/services/tree/sync.go | 36 +++++----- pkg/util/logger/logger.go | 4 ++ 157 files changed, 764 insertions(+), 713 deletions(-) diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go index 137e764ed..34d9d5595 100644 --- a/cmd/frostfs-ir/config.go +++ b/cmd/frostfs-ir/config.go @@ -1,6 +1,7 @@ package main import ( + "context" "os" "os/signal" "syscall" @@ -58,13 +59,13 @@ func watchForSignal(cancel func()) { // signals causing application to shut down should have priority over // reconfiguration signal case <-ch: - log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) + log.Info(context.Background(), logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) cancel() shutdown() - log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete) + log.Info(context.Background(), logs.FrostFSNodeTerminationSignalProcessingIsComplete) return case err := <-intErr: // internal application error - log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error())) + log.Info(context.Background(), logs.FrostFSIRInternalError, zap.String("msg", err.Error())) cancel() shutdown() return @@ -72,35 +73,35 @@ func watchForSignal(cancel func()) { // block until any signal is receieved select { case <-ch: - log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) + log.Info(context.Background(), logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) cancel() shutdown() - log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete) + log.Info(context.Background(), logs.FrostFSNodeTerminationSignalProcessingIsComplete) return case err := <-intErr: // internal application error - log.Info(logs.FrostFSIRInternalError, zap.String("msg", err.Error())) + log.Info(context.Background(), logs.FrostFSIRInternalError, zap.String("msg", err.Error())) cancel() shutdown() return case <-sighupCh: - log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) + log.Info(context.Background(), logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) if !innerRing.CompareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) { - log.Info(logs.FrostFSNodeSIGHUPSkip) + log.Info(context.Background(), logs.FrostFSNodeSIGHUPSkip) break } err := reloadConfig() if err != nil { - log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err)) + log.Error(context.Background(), logs.FrostFSNodeConfigurationReading, zap.Error(err)) } pprofCmp.reload() metricsCmp.reload() - log.Info(logs.FrostFSIRReloadExtraWallets) + log.Info(context.Background(), logs.FrostFSIRReloadExtraWallets) err = innerRing.SetExtraWallets(cfg) if err != nil { - log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err)) + log.Error(context.Background(), logs.FrostFSNodeConfigurationReading, zap.Error(err)) } innerRing.CompareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY) - log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) + log.Info(context.Background(), logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) } } } diff --git a/cmd/frostfs-ir/httpcomponent.go b/cmd/frostfs-ir/httpcomponent.go index 2792c3548..685ef61ad 100644 --- a/cmd/frostfs-ir/httpcomponent.go +++ b/cmd/frostfs-ir/httpcomponent.go @@ -1,6 +1,7 @@ package main import ( + "context" "net/http" "time" @@ -25,7 +26,7 @@ const ( ) func (c *httpComponent) init() { - log.Info("init " + c.name) + log.Info(context.Background(), "init "+c.name) c.enabled = cfg.GetBool(c.name + enabledKeyPostfix) c.address = cfg.GetString(c.name + addressKeyPostfix) c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix) @@ -39,14 +40,14 @@ func (c *httpComponent) init() { httputil.WithShutdownTimeout(c.shutdownDur), ) } else { - log.Info(c.name + " is disabled, skip") + log.Info(context.Background(), c.name+" is disabled, skip") c.srv = nil } } func (c *httpComponent) start() { if c.srv != nil { - log.Info("start " + c.name) + log.Info(context.Background(), "start "+c.name) wg.Add(1) go func() { defer wg.Done() @@ -57,7 +58,7 @@ func (c *httpComponent) start() { func (c *httpComponent) shutdown() error { if c.srv != nil { - log.Info("shutdown " + c.name) + log.Info(context.Background(), "shutdown "+c.name) return c.srv.Shutdown() } return nil @@ -71,11 +72,11 @@ func (c *httpComponent) needReload() bool { } func (c *httpComponent) reload() { - log.Info("reload " + c.name) + log.Info(context.Background(), "reload "+c.name) if c.needReload() { - log.Info(c.name + " config updated") + log.Info(context.Background(), c.name+" config updated") if err := c.shutdown(); err != nil { - log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer, + log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error()), ) } else { diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index 4bc5923a0..55a8ce00d 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -103,32 +103,32 @@ func main() { err = innerRing.Start(ctx, intErr) exitErr(err) - log.Info(logs.CommonApplicationStarted, + log.Info(ctx, logs.CommonApplicationStarted, zap.String("version", misc.Version)) watchForSignal(cancel) <-ctx.Done() // graceful shutdown - log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop) + log.Debug(ctx, logs.FrostFSNodeWaitingForAllProcessesToStop) wg.Wait() - log.Info(logs.FrostFSIRApplicationStopped) + log.Info(ctx, logs.FrostFSIRApplicationStopped) } func shutdown() { innerRing.Stop() if err := metricsCmp.shutdown(); err != nil { - log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer, + log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error()), ) } if err := pprofCmp.shutdown(); err != nil { - log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer, + log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error()), ) } if err := sdnotify.ClearStatus(); err != nil { - log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err)) + log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err)) } } diff --git a/cmd/frostfs-ir/pprof.go b/cmd/frostfs-ir/pprof.go index ff5642008..e95fd117f 100644 --- a/cmd/frostfs-ir/pprof.go +++ b/cmd/frostfs-ir/pprof.go @@ -1,6 +1,7 @@ package main import ( + "context" "runtime" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -52,11 +53,11 @@ func (c *pprofComponent) needReload() bool { } func (c *pprofComponent) reload() { - log.Info("reload " + c.name) + log.Info(context.Background(), "reload "+c.name) if c.needReload() { - log.Info(c.name + " config updated") + log.Info(context.Background(), c.name+" config updated") if err := c.shutdown(); err != nil { - log.Debug(logs.FrostFSIRCouldNotShutdownHTTPServer, + log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error())) return } diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 902187560..bd1b99095 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -400,13 +400,13 @@ type internals struct { func (c *cfg) startMaintenance() { c.isMaintenance.Store(true) c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE) - c.log.Info(logs.FrostFSNodeStartedLocalNodesMaintenance) + c.log.Info(context.Background(), logs.FrostFSNodeStartedLocalNodesMaintenance) } // stops node's maintenance. func (c *internals) stopMaintenance() { if c.isMaintenance.CompareAndSwap(true, false) { - c.log.Info(logs.FrostFSNodeStoppedLocalNodesMaintenance) + c.log.Info(context.Background(), logs.FrostFSNodeStoppedLocalNodesMaintenance) } } @@ -705,7 +705,7 @@ func initCfg(appCfg *config.Config) *cfg { log, err := logger.NewLogger(logPrm) fatalOnErr(err) if loggerconfig.ToLokiConfig(appCfg).Enabled { - log.Logger = log.Logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { + log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg)) return lokiCore })) @@ -1103,10 +1103,10 @@ func initLocalStorage(ctx context.Context, c *cfg) { shard.WithTombstoneSource(c.createTombstoneSource()), shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...) if err != nil { - c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err)) } else { shardsAttached++ - c.log.Info(logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id)) + c.log.Info(ctx, logs.FrostFSNodeShardAttachedToEngine, zap.Stringer("id", id)) } } if shardsAttached == 0 { @@ -1116,15 +1116,15 @@ func initLocalStorage(ctx context.Context, c *cfg) { c.cfgObject.cfgLocalStorage.localStorage = ls c.onShutdown(func() { - c.log.Info(logs.FrostFSNodeClosingComponentsOfTheStorageEngine) + c.log.Info(ctx, logs.FrostFSNodeClosingComponentsOfTheStorageEngine) err := ls.Close(context.WithoutCancel(ctx)) if err != nil { - c.log.Info(logs.FrostFSNodeStorageEngineClosingFailure, + c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure, zap.String("error", err.Error()), ) } else { - c.log.Info(logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully) + c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully) } }) } @@ -1132,7 +1132,7 @@ func initLocalStorage(ctx context.Context, c *cfg) { func initAccessPolicyEngine(_ context.Context, c *cfg) { var localOverrideDB chainbase.LocalOverrideDatabase if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" { - c.log.Warn(logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed) + c.log.Warn(context.Background(), logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed) localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase() } else { localOverrideDB = chainbase.NewBoltLocalOverrideDatabase( @@ -1157,7 +1157,7 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) { c.onShutdown(func() { if err := ape.LocalOverrideDatabaseCore().Close(); err != nil { - c.log.Warn(logs.FrostFSNodeAccessPolicyEngineClosingFailure, + c.log.Warn(context.Background(), logs.FrostFSNodeAccessPolicyEngineClosingFailure, zap.Error(err), ) } @@ -1209,7 +1209,7 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) { func (c *cfg) updateContractNodeInfo(epoch uint64) { ni, err := c.netmapLocalNodeState(epoch) if err != nil { - c.log.Error(logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, + c.log.Error(context.Background(), logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, zap.Uint64("epoch", epoch), zap.String("error", err.Error())) return @@ -1245,13 +1245,13 @@ func (c *cfg) bootstrap() error { // switch to online except when under maintenance st := c.cfgNetmap.state.controlNetmapStatus() if st == control.NetmapStatus_MAINTENANCE { - c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState) + c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithTheMaintenanceState) return c.bootstrapWithState(func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) }) } - c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState, + c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithOnlineState, zap.Stringer("previous", st), ) @@ -1280,19 +1280,19 @@ func (c *cfg) signalWatcher(ctx context.Context) { // signals causing application to shut down should have priority over // reconfiguration signal case <-ch: - c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) + c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) c.shutdown() - c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete) + c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) return case err := <-c.internalErr: // internal application error - c.log.Warn(logs.FrostFSNodeInternalApplicationError, + c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError, zap.String("message", err.Error())) c.shutdown() - c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete) + c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete) return default: // block until any signal is receieved @@ -1300,19 +1300,19 @@ func (c *cfg) signalWatcher(ctx context.Context) { case <-sighupCh: c.reloadConfig(ctx) case <-ch: - c.log.Info(logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) + c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) c.shutdown() - c.log.Info(logs.FrostFSNodeTerminationSignalProcessingIsComplete) + c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) return case err := <-c.internalErr: // internal application error - c.log.Warn(logs.FrostFSNodeInternalApplicationError, + c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError, zap.String("message", err.Error())) c.shutdown() - c.log.Info(logs.FrostFSNodeInternalErrorProcessingIsComplete) + c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete) return } } @@ -1320,17 +1320,17 @@ func (c *cfg) signalWatcher(ctx context.Context) { } func (c *cfg) reloadConfig(ctx context.Context) { - c.log.Info(logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) + c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) { - c.log.Info(logs.FrostFSNodeSIGHUPSkip) + c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip) return } defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY) err := c.reloadAppConfig() if err != nil { - c.log.Error(logs.FrostFSNodeConfigurationReading, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err)) return } @@ -1341,7 +1341,7 @@ func (c *cfg) reloadConfig(ctx context.Context) { logPrm, err := c.loggerPrm() if err != nil { - c.log.Error(logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err)) return } @@ -1362,25 +1362,25 @@ func (c *cfg) reloadConfig(ctx context.Context) { err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg) if err != nil { - c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err)) return } for _, component := range components { err = component.reloadFunc() if err != nil { - c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying, + c.log.Error(ctx, logs.FrostFSNodeUpdatedConfigurationApplying, zap.String("component", component.name), zap.Error(err)) } } if err := c.dialerSource.Update(internalNetConfig(c.appCfg, c.metricsCollector.MultinetMetrics())); err != nil { - c.log.Error(logs.FailedToUpdateMultinetConfiguration, zap.Error(err)) + c.log.Error(ctx, logs.FailedToUpdateMultinetConfiguration, zap.Error(err)) return } - c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) + c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) } func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { @@ -1403,7 +1403,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { } updated, err := tracing.Setup(ctx, *traceConfig) if updated { - c.log.Info(logs.FrostFSNodeTracingConfigationUpdated) + c.log.Info(ctx, logs.FrostFSNodeTracingConfigationUpdated) } return err }}) @@ -1438,7 +1438,7 @@ func (c *cfg) reloadPools() error { func (c *cfg) reloadPool(p *ants.Pool, newSize int, name string) { oldSize := p.Cap() if oldSize != newSize { - c.log.Info(logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name), + c.log.Info(context.Background(), logs.FrostFSNodePoolConfigurationUpdate, zap.String("field", name), zap.Int("old", oldSize), zap.Int("new", newSize)) p.Tune(newSize) } @@ -1477,11 +1477,11 @@ func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoPro func (c *cfg) shutdown() { old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN) if old == control.HealthStatus_SHUTTING_DOWN { - c.log.Info(logs.FrostFSNodeShutdownSkip) + c.log.Info(context.Background(), logs.FrostFSNodeShutdownSkip) return } if old == control.HealthStatus_STARTING { - c.log.Warn(logs.FrostFSNodeShutdownWhenNotReady) + c.log.Warn(context.Background(), logs.FrostFSNodeShutdownWhenNotReady) } c.ctxCancel() @@ -1491,6 +1491,6 @@ func (c *cfg) shutdown() { } if err := sdnotify.ClearStatus(); err != nil { - c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err)) + c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err)) } } diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index cc38876ee..1a54f9ffc 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -102,13 +102,13 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c } else { // unlike removal, we expect successful receive of the container // after successful creation, so logging can be useful - c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification, + c.log.Error(context.Background(), logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification, zap.Stringer("id", ev.ID), zap.Error(err), ) } - c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt, + c.log.Debug(context.Background(), logs.FrostFSNodeContainerCreationEventsReceipt, zap.Stringer("id", ev.ID), ) }) @@ -116,7 +116,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c subscribeToContainerRemoval(c, func(e event.Event) { ev := e.(containerEvent.DeleteSuccess) containerCache.handleRemoval(ev.ID) - c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt, + c.log.Debug(context.Background(), logs.FrostFSNodeContainerRemovalEventsReceipt, zap.Stringer("id", ev.ID), ) }) diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go index e1e6e3ac9..ffac23eec 100644 --- a/cmd/frostfs-node/control.go +++ b/cmd/frostfs-node/control.go @@ -46,7 +46,7 @@ func initControlService(c *cfg) { lis, err := net.Listen("tcp", endpoint) if err != nil { - c.log.Error(logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err)) + c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err)) return } @@ -60,7 +60,7 @@ func initControlService(c *cfg) { c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) { runAndLog(ctx, c, serviceNameControl, false, func(context.Context, *cfg) { - c.log.Info(logs.FrostFSNodeStartListeningEndpoint, + c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint, zap.String("service", serviceNameControl), zap.String("endpoint", endpoint)) fatalOnErr(c.cfgControlService.server.Serve(lis)) @@ -113,6 +113,6 @@ func (c *cfg) notifySystemd(st control.HealthStatus) { err = sdnotify.Status(fmt.Sprintf("%v", st)) } if err != nil { - c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err)) + c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err)) } } diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go index 3a38b2cca..271810ee6 100644 --- a/cmd/frostfs-node/grpc.go +++ b/cmd/frostfs-node/grpc.go @@ -1,6 +1,7 @@ package main import ( + "context" "crypto/tls" "errors" "net" @@ -30,7 +31,7 @@ func initGRPC(c *cfg) { lis, err := net.Listen("tcp", sc.Endpoint()) if err != nil { c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint()) - c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) + c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint()) return } @@ -76,19 +77,19 @@ func scheduleReconnect(endpoint string, c *cfg) { } func tryReconnect(endpoint string, c *cfg) bool { - c.log.Info(logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint)) + c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint)) serverOpts, found := getGRPCEndpointOpts(endpoint, c) if !found { - c.log.Warn(logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint)) + c.log.Warn(context.Background(), logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint)) return true } lis, err := net.Listen("tcp", endpoint) if err != nil { c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint) - c.log.Error(logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) - c.log.Warn(logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout)) + c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) + c.log.Warn(context.Background(), logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout)) return false } c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint) @@ -101,7 +102,7 @@ func tryReconnect(endpoint string, c *cfg) bool { c.cfgGRPC.appendAndHandle(endpoint, lis, srv) - c.log.Info(logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint)) + c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint)) return true } @@ -143,7 +144,7 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool if tlsCfg != nil { cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile()) if err != nil { - c.log.Error(logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err)) + c.log.Error(context.Background(), logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err)) return nil, false } @@ -180,21 +181,21 @@ func serveGRPC(c *cfg) { go func() { defer func() { - c.log.Info(logs.FrostFSNodeStopListeningGRPCEndpoint, + c.log.Info(context.Background(), logs.FrostFSNodeStopListeningGRPCEndpoint, zap.Stringer("endpoint", l.Addr()), ) c.wg.Done() }() - c.log.Info(logs.FrostFSNodeStartListeningEndpoint, + c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint, zap.String("service", "gRPC"), zap.Stringer("endpoint", l.Addr()), ) if err := s.Serve(l); err != nil { c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e) - c.log.Error(logs.FrostFSNodeGRPCServerError, zap.Error(err)) + c.log.Error(context.Background(), logs.FrostFSNodeGRPCServerError, zap.Error(err)) c.cfgGRPC.dropConnection(e) scheduleReconnect(e, c) } @@ -203,9 +204,9 @@ func serveGRPC(c *cfg) { } func stopGRPC(name string, s *grpc.Server, l *logger.Logger) { - l = &logger.Logger{Logger: l.With(zap.String("name", name))} + l = l.With(zap.String("name", name)) - l.Info(logs.FrostFSNodeStoppingGRPCServer) + l.Info(context.Background(), logs.FrostFSNodeStoppingGRPCServer) // GracefulStop() may freeze forever, see #1270 done := make(chan struct{}) @@ -217,9 +218,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) { select { case <-done: case <-time.After(1 * time.Minute): - l.Info(logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop) + l.Info(context.Background(), logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop) s.Stop() } - l.Info(logs.FrostFSNodeGRPCServerStoppedSuccessfully) + l.Info(context.Background(), logs.FrostFSNodeGRPCServerStoppedSuccessfully) } diff --git a/cmd/frostfs-node/httpcomponent.go b/cmd/frostfs-node/httpcomponent.go index 2ec20d848..a699a72a1 100644 --- a/cmd/frostfs-node/httpcomponent.go +++ b/cmd/frostfs-node/httpcomponent.go @@ -22,7 +22,7 @@ type httpComponent struct { func (cmp *httpComponent) init(c *cfg) { if !cmp.enabled { - c.log.Info(cmp.name + " is disabled") + c.log.Info(context.Background(), cmp.name+" is disabled") return } // Init server with parameters @@ -39,7 +39,7 @@ func (cmp *httpComponent) init(c *cfg) { go func() { defer c.wg.Done() - c.log.Info(logs.FrostFSNodeStartListeningEndpoint, + c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint, zap.String("service", cmp.name), zap.String("endpoint", cmp.address)) fatalOnErr(srv.Serve()) diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go index e4f0a434c..cd42d5f1d 100644 --- a/cmd/frostfs-node/main.go +++ b/cmd/frostfs-node/main.go @@ -73,9 +73,9 @@ func main() { } func initAndLog(c *cfg, name string, initializer func(*cfg)) { - c.log.Info(fmt.Sprintf("initializing %s service...", name)) + c.log.Info(context.Background(), fmt.Sprintf("initializing %s service...", name)) initializer(c) - c.log.Info(name + " service has been successfully initialized") + c.log.Info(context.Background(), name+" service has been successfully initialized") } func initApp(ctx context.Context, c *cfg) { @@ -120,25 +120,25 @@ func initApp(ctx context.Context, c *cfg) { } func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) { - c.log.Info(fmt.Sprintf("starting %s service...", name)) + c.log.Info(ctx, fmt.Sprintf("starting %s service...", name)) starter(ctx, c) if logSuccess { - c.log.Info(name + " service started successfully") + c.log.Info(ctx, name+" service started successfully") } } func stopAndLog(c *cfg, name string, stopper func() error) { - c.log.Debug(fmt.Sprintf("shutting down %s service", name)) + c.log.Debug(context.Background(), fmt.Sprintf("shutting down %s service", name)) err := stopper() if err != nil { - c.log.Debug(fmt.Sprintf("could not shutdown %s server", name), + c.log.Debug(context.Background(), fmt.Sprintf("could not shutdown %s server", name), zap.String("error", err.Error()), ) } - c.log.Debug(name + " service has been stopped") + c.log.Debug(context.Background(), name+" service has been stopped") } func bootUp(ctx context.Context, c *cfg) { @@ -150,7 +150,7 @@ func bootUp(ctx context.Context, c *cfg) { } func wait(c *cfg) { - c.log.Info(logs.CommonApplicationStarted, + c.log.Info(context.Background(), logs.CommonApplicationStarted, zap.String("version", misc.Version)) <-c.done // graceful shutdown @@ -160,12 +160,12 @@ func wait(c *cfg) { go func() { defer drain.Done() for err := range c.internalErr { - c.log.Warn(logs.FrostFSNodeInternalApplicationError, + c.log.Warn(context.Background(), logs.FrostFSNodeInternalApplicationError, zap.String("message", err.Error())) } }() - c.log.Debug(logs.FrostFSNodeWaitingForAllProcessesToStop) + c.log.Debug(context.Background(), logs.FrostFSNodeWaitingForAllProcessesToStop) c.wg.Wait() diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 0969f5579..3e010b181 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -48,7 +48,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) { fatalOnErr(err) } - c.log.Info(logs.FrostFSNodeNotarySupport, + c.log.Info(ctx, logs.FrostFSNodeNotarySupport, zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled), ) @@ -64,7 +64,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) { msPerBlock, err := c.cfgMorph.client.MsPerBlock() fatalOnErr(err) c.cfgMorph.cacheTTL = time.Duration(msPerBlock) * time.Millisecond - c.log.Debug(logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL)) + c.log.Debug(ctx, logs.FrostFSNodeMorphcacheTTLFetchedFromNetwork, zap.Duration("value", c.cfgMorph.cacheTTL)) } if c.cfgMorph.cacheTTL < 0 { @@ -102,7 +102,7 @@ func initMorphClient(ctx context.Context, c *cfg) { client.WithDialerSource(c.dialerSource), ) if err != nil { - c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient, + c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient, zap.Any("endpoints", addresses), zap.String("error", err.Error()), ) @@ -111,12 +111,12 @@ func initMorphClient(ctx context.Context, c *cfg) { } c.onShutdown(func() { - c.log.Info(logs.FrostFSNodeClosingMorphComponents) + c.log.Info(ctx, logs.FrostFSNodeClosingMorphComponents) cli.Close() }) if err := cli.SetGroupSignerScope(); err != nil { - c.log.Info(logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err)) + c.log.Info(ctx, logs.FrostFSNodeFailedToSetGroupSignerScopeContinueWithGlobal, zap.Error(err)) } c.cfgMorph.client = cli @@ -136,7 +136,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { // non-error deposit with an empty TX hash means // that the deposit has already been made; no // need to wait it. - c.log.Info(logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade) + c.log.Info(ctx, logs.FrostFSNodeNotaryDepositHasAlreadyBeenMade) return } @@ -202,7 +202,7 @@ func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) return fmt.Errorf("could not wait for notary deposit persists in chain: %w", err) } if res.Execution.VMState.HasFlag(vmstate.Halt) { - c.log.Info(logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted) + c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted) return nil } return errNotaryDepositFail @@ -217,7 +217,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - c.log.Warn(logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) + c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) } subs, err = subscriber.New(ctx, &subscriber.Params{ @@ -246,7 +246,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) { res, err := netmapEvent.ParseNewEpoch(src) if err == nil { - c.log.Info(logs.FrostFSNodeNewEpochEventFromSidechain, + c.log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain, zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()), ) } @@ -257,11 +257,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers) registerBlockHandler(lis, func(block *block.Block) { - c.log.Debug(logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) + c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index) if err != nil { - c.log.Warn(logs.FrostFSNodeCantUpdatePersistentState, + c.log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", block.Index)) } diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 73871bfc9..18667e636 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -189,7 +189,7 @@ func addNewEpochNotificationHandlers(c *cfg) { } if err := c.bootstrap(); err != nil { - c.log.Warn(logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err)) + c.log.Warn(context.Background(), logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err)) } }) @@ -197,7 +197,7 @@ func addNewEpochNotificationHandlers(c *cfg) { addNewEpochAsyncNotificationHandler(c, func(_ event.Event) { _, _, err := makeNotaryDeposit(c) if err != nil { - c.log.Error(logs.FrostFSNodeCouldNotMakeNotaryDeposit, + c.log.Error(context.Background(), logs.FrostFSNodeCouldNotMakeNotaryDeposit, zap.String("error", err.Error()), ) } @@ -210,7 +210,7 @@ func addNewEpochNotificationHandlers(c *cfg) { func bootstrapNode(c *cfg) { if c.needBootstrap() { if c.IsMaintenance() { - c.log.Info(logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) + c.log.Info(context.Background(), logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) return } err := c.bootstrap() @@ -250,7 +250,7 @@ func initNetmapState(c *cfg) { stateWord := nodeState(ni) - c.log.Info(logs.FrostFSNodeInitialNetworkState, + c.log.Info(context.Background(), logs.FrostFSNodeInitialNetworkState, zap.Uint64("epoch", epoch), zap.String("state", stateWord), ) @@ -307,7 +307,7 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error if nmState != candidateState { // This happens when the node was switched to maintenance without epoch tick. // We expect it to continue staying in maintenance. - c.log.Info(logs.CandidateStatusPriority, + c.log.Info(context.Background(), logs.CandidateStatusPriority, zap.String("netmap", nmState), zap.String("candidate", candidateState)) } diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index aeab1d6cb..cad6d5ee3 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -58,7 +58,7 @@ type objectSvc struct { func (c *cfg) MaxObjectSize() uint64 { sz, err := c.cfgNetmap.wrapper.MaxObjectSize() if err != nil { - c.log.Error(logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, + c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, zap.String("error", err.Error()), ) } @@ -223,7 +223,7 @@ func initObjectService(c *cfg) { func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.ClientCache) { if policerconfig.UnsafeDisable(c.appCfg) { - c.log.Warn(logs.FrostFSNodePolicerIsDisabled) + c.log.Warn(context.Background(), logs.FrostFSNodePolicerIsDisabled) return } @@ -287,7 +287,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl _, err := ls.Inhume(ctx, inhumePrm) if err != nil { - c.log.Warn(logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, + c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, zap.String("error", err.Error()), ) } diff --git a/cmd/frostfs-node/runtime.go b/cmd/frostfs-node/runtime.go index d858ba490..0b1000e70 100644 --- a/cmd/frostfs-node/runtime.go +++ b/cmd/frostfs-node/runtime.go @@ -1,6 +1,7 @@ package main import ( + "context" "os" "runtime/debug" @@ -12,14 +13,14 @@ import ( func setRuntimeParameters(c *cfg) { if len(os.Getenv("GOMEMLIMIT")) != 0 { // default limit < yaml limit < app env limit < GOMEMLIMIT - c.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT) + c.log.Warn(context.Background(), logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT) return } memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg) previous := debug.SetMemoryLimit(memLimitBytes) if memLimitBytes != previous { - c.log.Info(logs.RuntimeSoftMemoryLimitUpdated, + c.log.Info(context.Background(), logs.RuntimeSoftMemoryLimitUpdated, zap.Int64("new_value", memLimitBytes), zap.Int64("old_value", previous)) } diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go index f550dd882..65f5aec15 100644 --- a/cmd/frostfs-node/tracing.go +++ b/cmd/frostfs-node/tracing.go @@ -13,12 +13,12 @@ import ( func initTracing(ctx context.Context, c *cfg) { conf, err := tracingconfig.ToTracingConfig(c.appCfg) if err != nil { - c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err)) return } _, err = tracing.Setup(ctx, *conf) if err != nil { - c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeFailedInitTracing, zap.Error(err)) return } @@ -29,7 +29,7 @@ func initTracing(ctx context.Context, c *cfg) { defer cancel() err := tracing.Shutdown(ctx) // cfg context cancels before close if err != nil { - c.log.Error(logs.FrostFSNodeFailedShutdownTracing, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeFailedShutdownTracing, zap.Error(err)) } }, }) diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index a92979daf..59923ee2f 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -44,7 +44,7 @@ func (c cnrSource) List() ([]cid.ID, error) { func initTreeService(c *cfg) { treeConfig := treeconfig.Tree(c.appCfg) if !treeConfig.Enabled() { - c.log.Info(logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization) + c.log.Info(context.Background(), logs.FrostFSNodeTreeServiceIsNotEnabledSkipInitialization) return } @@ -83,7 +83,7 @@ func initTreeService(c *cfg) { addNewEpochNotificationHandler(c, func(_ event.Event) { err := c.treeService.SynchronizeAll() if err != nil { - c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) + c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) } }) } else { @@ -94,7 +94,7 @@ func initTreeService(c *cfg) { for range tick.C { err := c.treeService.SynchronizeAll() if err != nil { - c.log.Error(logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) + c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) if errors.Is(err, tree.ErrShuttingDown) { return } @@ -107,11 +107,11 @@ func initTreeService(c *cfg) { ev := e.(containerEvent.DeleteSuccess) // This is executed asynchronously, so we don't care about the operation taking some time. - c.log.Debug(logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID)) + c.log.Debug(context.Background(), logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID)) err := c.treeService.DropTree(context.Background(), ev.ID, "") if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { // Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged. - c.log.Error(logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved, + c.log.Error(context.Background(), logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved, zap.Stringer("cid", ev.ID), zap.String("error", err.Error())) } diff --git a/internal/audit/request.go b/internal/audit/request.go index 3355087f1..15a4a7960 100644 --- a/internal/audit/request.go +++ b/internal/audit/request.go @@ -1,6 +1,8 @@ package audit import ( + "context" + crypto "git.frostfs.info/TrueCloudLab/frostfs-crypto" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -37,7 +39,7 @@ func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target object = target.String() } - log.Info(logs.AuditEventLogRecord, + log.Info(context.Background(), logs.AuditEventLogRecord, zap.String("operation", operation), zap.String("object", object), zap.String("subject", subject), diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go index b428b56da..20560cf3a 100644 --- a/pkg/core/object/fmt_test.go +++ b/pkg/core/object/fmt_test.go @@ -65,7 +65,7 @@ func TestFormatValidator_Validate(t *testing.T) { epoch: curEpoch, }), WithLockSource(ls), - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), ) ownerKey, err := keys.NewPrivateKey() @@ -290,7 +290,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }), WithLockSource(ls), WithVerifySessionTokenIssuer(false), - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), ) tok := sessiontest.Object() @@ -339,7 +339,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, }, ), - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), ) tok := sessiontest.Object() @@ -417,7 +417,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { currentEpoch: curEpoch, }, ), - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), ) require.NoError(t, v.Validate(context.Background(), obj, false)) @@ -491,7 +491,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { currentEpoch: curEpoch, }, ), - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), ) require.NoError(t, v.Validate(context.Background(), obj, false)) @@ -567,7 +567,7 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { currentEpoch: curEpoch, }, ), - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), ) require.Error(t, v.Validate(context.Background(), obj, false)) diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go index 13d0ebfb1..ed438c0b9 100644 --- a/pkg/core/object/sender_classifier.go +++ b/pkg/core/object/sender_classifier.go @@ -2,6 +2,7 @@ package object import ( "bytes" + "context" "crypto/sha256" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -64,7 +65,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes) if err != nil { // do not throw error, try best case matching - c.log.Debug(logs.V2CantCheckIfRequestFromInnerRing, + c.log.Debug(context.Background(), logs.V2CantCheckIfRequestFromInnerRing, zap.String("error", err.Error())) } else if isInnerRingNode { return &ClassifyResult{ @@ -81,7 +82,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC // error might happen if request has `RoleOther` key and placement // is not possible for previous epoch, so // do not throw error, try best case matching - c.log.Debug(logs.V2CantCheckIfRequestFromContainerNode, + c.log.Debug(context.Background(), logs.V2CantCheckIfRequestFromContainerNode, zap.String("error", err.Error())) } else if isContainerNode { return &ClassifyResult{ diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index d6b474c32..b8812819e 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -97,7 +97,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain * fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey) if err != nil { fromMainChainBlock = 0 - s.log.Warn(logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error())) } mainnetChain.from = fromMainChainBlock @@ -142,7 +142,7 @@ func (s *Server) initNotaryConfig() { !s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too ) - s.log.Info(logs.InnerringNotarySupport, + s.log.Info(context.Background(), logs.InnerringNotarySupport, zap.Bool("sidechain_enabled", true), zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled), ) @@ -153,7 +153,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli if s.withoutMainNet || cfg.GetBool("governance.disable") { alphaSync = func(event.Event) { - s.log.Debug(logs.InnerringAlphabetKeysSyncIsDisabled) + s.log.Debug(context.Background(), logs.InnerringAlphabetKeysSyncIsDisabled) } } else { // create governance processor @@ -307,7 +307,7 @@ func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error { func (s *Server) initGRPCServer(cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error { controlSvcEndpoint := cfg.GetString("control.grpc.endpoint") if controlSvcEndpoint == "" { - s.log.Info(logs.InnerringNoControlServerEndpointSpecified) + s.log.Info(context.Background(), logs.InnerringNoControlServerEndpointSpecified) return nil } @@ -446,7 +446,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - s.log.Warn(logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) } morphChain := &chainParams{ @@ -471,7 +471,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- return nil, err } if err := s.morphClient.SetGroupSignerScope(); err != nil { - morphChain.log.Info(logs.InnerringFailedToSetGroupSignerScope, zap.Error(err)) + morphChain.log.Info(ctx, logs.InnerringFailedToSetGroupSignerScope, zap.Error(err)) } return morphChain, nil diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index 5fae302c4..63a4cb1cb 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -176,7 +176,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { err = s.voteForSidechainValidator(prm) if err != nil { // we don't stop inner ring execution on this error - s.log.Warn(logs.InnerringCantVoteForPreparedValidators, + s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators, zap.String("error", err.Error())) } @@ -218,13 +218,13 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { func (s *Server) registerMorphNewBlockEventHandler() { s.morphListener.RegisterBlockHandler(func(b *block.Block) { - s.log.Debug(logs.InnerringNewBlock, + s.log.Debug(context.Background(), logs.InnerringNewBlock, zap.Uint32("index", b.Index), ) err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index) if err != nil { - s.log.Warn(logs.InnerringCantUpdatePersistentState, + s.log.Warn(context.Background(), logs.InnerringCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", b.Index)) } @@ -238,7 +238,7 @@ func (s *Server) registerMainnetNewBlockEventHandler() { s.mainnetListener.RegisterBlockHandler(func(b *block.Block) { err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index) if err != nil { - s.log.Warn(logs.InnerringCantUpdatePersistentState, + s.log.Warn(context.Background(), logs.InnerringCantUpdatePersistentState, zap.String("chain", "main"), zap.Uint32("block_index", b.Index)) } @@ -307,7 +307,7 @@ func (s *Server) Stop() { for _, c := range s.closers { if err := c(); err != nil { - s.log.Warn(logs.InnerringCloserError, + s.log.Warn(context.Background(), logs.InnerringCloserError, zap.String("error", err.Error()), ) } @@ -438,7 +438,7 @@ func createListener(ctx context.Context, cli *client.Client, p *chainParams) (ev } listener, err := event.NewListener(event.ListenerParams{ - Logger: &logger.Logger{Logger: p.log.With(zap.String("chain", p.name))}, + Logger: p.log.With(zap.String("chain", p.name)), Subscriber: sub, }) if err != nil { @@ -602,7 +602,7 @@ func (s *Server) initConfigFromBlockchain() error { return err } - s.log.Debug(logs.InnerringReadConfigFromBlockchain, + s.log.Debug(context.Background(), logs.InnerringReadConfigFromBlockchain, zap.Bool("active", s.IsActive()), zap.Bool("alphabet", s.IsAlphabet()), zap.Uint64("epoch", epoch), diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go index 5cdbb971c..902a4c30a 100644 --- a/pkg/innerring/notary.go +++ b/pkg/innerring/notary.go @@ -54,12 +54,12 @@ func (s *Server) notaryHandler(_ event.Event) { if !s.mainNotaryConfig.disabled { _, err := s.depositMainNotary() if err != nil { - s.log.Error(logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err)) + s.log.Error(context.Background(), logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err)) } } if _, err := s.depositSideNotary(); err != nil { - s.log.Error(logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err)) + s.log.Error(context.Background(), logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err)) } } @@ -81,11 +81,11 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite // non-error deposit with an empty TX hash means // that the deposit has already been made; no // need to wait it. - s.log.Info(logs.InnerringNotaryDepositHasAlreadyBeenMade) + s.log.Info(ctx, logs.InnerringNotaryDepositHasAlreadyBeenMade) return nil } - s.log.Info(msg) + s.log.Info(ctx, msg) return await(ctx, tx) } diff --git a/pkg/innerring/processors/alphabet/handlers.go b/pkg/innerring/processors/alphabet/handlers.go index 9de075f17..0cc2a5f39 100644 --- a/pkg/innerring/processors/alphabet/handlers.go +++ b/pkg/innerring/processors/alphabet/handlers.go @@ -1,6 +1,8 @@ package alphabet import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" @@ -10,14 +12,14 @@ import ( func (ap *Processor) HandleGasEmission(ev event.Event) { _ = ev.(timers.NewAlphabetEmitTick) - ap.log.Info(logs.AlphabetTick, zap.String("type", "alphabet gas emit")) + ap.log.Info(context.Background(), logs.AlphabetTick, zap.String("type", "alphabet gas emit")) // send event to the worker pool err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", ap.processEmit) if err != nil { // there system can be moved into controlled degradation stage - ap.log.Warn(logs.AlphabetAlphabetProcessorWorkerPoolDrained, + ap.log.Warn(context.Background(), logs.AlphabetAlphabetProcessorWorkerPoolDrained, zap.Int("capacity", ap.pool.Cap())) } } diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go index 2317f3e98..142409631 100644 --- a/pkg/innerring/processors/alphabet/process_emit.go +++ b/pkg/innerring/processors/alphabet/process_emit.go @@ -1,6 +1,7 @@ package alphabet import ( + "context" "crypto/elliptic" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -16,14 +17,14 @@ const emitMethod = "emit" func (ap *Processor) processEmit() bool { index := ap.irList.AlphabetIndex() if index < 0 { - ap.log.Info(logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent) + ap.log.Info(context.Background(), logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent) return true } contract, ok := ap.alphabetContracts.GetByIndex(index) if !ok { - ap.log.Debug(logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent, + ap.log.Debug(context.Background(), logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent, zap.Int("index", index)) return false @@ -32,20 +33,20 @@ func (ap *Processor) processEmit() bool { // there is no signature collecting, so we don't need extra fee _, err := ap.morphClient.Invoke(contract, 0, emitMethod) if err != nil { - ap.log.Warn(logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error())) + ap.log.Warn(context.Background(), logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error())) return false } if ap.storageEmission == 0 { - ap.log.Info(logs.AlphabetStorageNodeEmissionIsOff) + ap.log.Info(context.Background(), logs.AlphabetStorageNodeEmissionIsOff) return true } networkMap, err := ap.netmapClient.NetMap() if err != nil { - ap.log.Warn(logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, + ap.log.Warn(context.Background(), logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, zap.String("error", err.Error())) return false @@ -58,7 +59,7 @@ func (ap *Processor) processEmit() bool { ap.pwLock.RUnlock() extraLen := len(pw) - ap.log.Debug(logs.AlphabetGasEmission, + ap.log.Debug(context.Background(), logs.AlphabetGasEmission, zap.Int("network_map", nmLen), zap.Int("extra_wallets", extraLen)) @@ -81,7 +82,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256()) if err != nil { - ap.log.Warn(logs.AlphabetCantParseNodePublicKey, + ap.log.Warn(context.Background(), logs.AlphabetCantParseNodePublicKey, zap.String("error", err.Error())) continue @@ -89,7 +90,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode) if err != nil { - ap.log.Warn(logs.AlphabetCantTransferGas, + ap.log.Warn(context.Background(), logs.AlphabetCantTransferGas, zap.String("receiver", key.Address()), zap.Int64("amount", int64(gasPerNode)), zap.String("error", err.Error()), @@ -106,7 +107,7 @@ func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixed for i, addr := range pw { receiversLog[i] = addr.StringLE() } - ap.log.Warn(logs.AlphabetCantTransferGasToWallet, + ap.log.Warn(context.Background(), logs.AlphabetCantTransferGasToWallet, zap.Strings("receivers", receiversLog), zap.Int64("amount", int64(gasPerNode)), zap.String("error", err.Error()), diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go index ce6679969..8dbef1e20 100644 --- a/pkg/innerring/processors/alphabet/processor.go +++ b/pkg/innerring/processors/alphabet/processor.go @@ -1,6 +1,7 @@ package alphabet import ( + "context" "errors" "fmt" "sync" @@ -85,7 +86,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/alphabet: global state is not set") } - p.Log.Debug(logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize)) + p.Log.Debug(context.Background(), logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go index e39f3abbd..3792fc2af 100644 --- a/pkg/innerring/processors/balance/handlers.go +++ b/pkg/innerring/processors/balance/handlers.go @@ -1,6 +1,7 @@ package balance import ( + "context" "encoding/hex" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -12,7 +13,7 @@ import ( func (bp *Processor) handleLock(ev event.Event) { lock := ev.(balanceEvent.Lock) - bp.log.Info(logs.Notification, + bp.log.Info(context.Background(), logs.Notification, zap.String("type", "lock"), zap.String("value", hex.EncodeToString(lock.ID()))) @@ -23,7 +24,7 @@ func (bp *Processor) handleLock(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - bp.log.Warn(logs.BalanceBalanceWorkerPoolDrained, + bp.log.Warn(context.Background(), logs.BalanceBalanceWorkerPoolDrained, zap.Int("capacity", bp.pool.Cap())) } } diff --git a/pkg/innerring/processors/balance/process_assets.go b/pkg/innerring/processors/balance/process_assets.go index 1d94fa454..ac6a1e493 100644 --- a/pkg/innerring/processors/balance/process_assets.go +++ b/pkg/innerring/processors/balance/process_assets.go @@ -1,6 +1,8 @@ package balance import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" frostfsContract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" balanceEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/balance" @@ -11,7 +13,7 @@ import ( // back to the withdraw issuer. func (bp *Processor) processLock(lock *balanceEvent.Lock) bool { if !bp.alphabetState.IsAlphabet() { - bp.log.Info(logs.BalanceNonAlphabetModeIgnoreBalanceLock) + bp.log.Info(context.Background(), logs.BalanceNonAlphabetModeIgnoreBalanceLock) return true } @@ -25,7 +27,7 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) bool { err := bp.frostfsClient.Cheque(prm) if err != nil { - bp.log.Error(logs.BalanceCantSendLockAssetTx, zap.Error(err)) + bp.log.Error(context.Background(), logs.BalanceCantSendLockAssetTx, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go index 5cc849b5c..c4078461e 100644 --- a/pkg/innerring/processors/balance/processor.go +++ b/pkg/innerring/processors/balance/processor.go @@ -1,6 +1,7 @@ package balance import ( + "context" "errors" "fmt" @@ -68,7 +69,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/balance: balance precision converter is not set") } - p.Log.Debug(logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize)) + p.Log.Debug(context.Background(), logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go index a54f3c772..b3d50d9d0 100644 --- a/pkg/innerring/processors/container/handlers.go +++ b/pkg/innerring/processors/container/handlers.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/sha256" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -15,7 +16,7 @@ func (cp *Processor) handlePut(ev event.Event) { put := ev.(putEvent) id := sha256.Sum256(put.Container()) - cp.log.Info(logs.Notification, + cp.log.Info(context.Background(), logs.Notification, zap.String("type", "container put"), zap.String("id", base58.Encode(id[:]))) @@ -26,14 +27,14 @@ func (cp *Processor) handlePut(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained, + cp.log.Warn(context.Background(), logs.ContainerContainerProcessorWorkerPoolDrained, zap.Int("capacity", cp.pool.Cap())) } } func (cp *Processor) handleDelete(ev event.Event) { del := ev.(containerEvent.Delete) - cp.log.Info(logs.Notification, + cp.log.Info(context.Background(), logs.Notification, zap.String("type", "container delete"), zap.String("id", base58.Encode(del.ContainerID()))) @@ -44,7 +45,7 @@ func (cp *Processor) handleDelete(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - cp.log.Warn(logs.ContainerContainerProcessorWorkerPoolDrained, + cp.log.Warn(context.Background(), logs.ContainerContainerProcessorWorkerPoolDrained, zap.Int("capacity", cp.pool.Cap())) } } diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go index d89b63e82..2b9c5995c 100644 --- a/pkg/innerring/processors/container/process_container.go +++ b/pkg/innerring/processors/container/process_container.go @@ -1,6 +1,7 @@ package container import ( + "context" "errors" "fmt" "strings" @@ -38,7 +39,7 @@ var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner nam // and sending approve tx back to the morph. func (cp *Processor) processContainerPut(put putEvent) bool { if !cp.alphabetState.IsAlphabet() { - cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerPut) + cp.log.Info(context.Background(), logs.ContainerNonAlphabetModeIgnoreContainerPut) return true } @@ -48,7 +49,7 @@ func (cp *Processor) processContainerPut(put putEvent) bool { err := cp.checkPutContainer(ctx) if err != nil { - cp.log.Error(logs.ContainerPutContainerCheckFailed, + cp.log.Error(context.Background(), logs.ContainerPutContainerCheckFailed, zap.String("error", err.Error()), ) @@ -56,7 +57,7 @@ func (cp *Processor) processContainerPut(put putEvent) bool { } if err := cp.morphClient.NotarySignAndInvokeTX(ctx.e.NotaryRequest().MainTransaction); err != nil { - cp.log.Error(logs.ContainerCouldNotApprovePutContainer, + cp.log.Error(context.Background(), logs.ContainerCouldNotApprovePutContainer, zap.String("error", err.Error()), ) return false @@ -105,13 +106,13 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { // and sending approve tx back to morph. func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool { if !cp.alphabetState.IsAlphabet() { - cp.log.Info(logs.ContainerNonAlphabetModeIgnoreContainerDelete) + cp.log.Info(context.Background(), logs.ContainerNonAlphabetModeIgnoreContainerDelete) return true } err := cp.checkDeleteContainer(e) if err != nil { - cp.log.Error(logs.ContainerDeleteContainerCheckFailed, + cp.log.Error(context.Background(), logs.ContainerDeleteContainerCheckFailed, zap.String("error", err.Error()), ) @@ -119,7 +120,7 @@ func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool { } if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil { - cp.log.Error(logs.ContainerCouldNotApproveDeleteContainer, + cp.log.Error(context.Background(), logs.ContainerCouldNotApproveDeleteContainer, zap.String("error", err.Error()), ) diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go index a6fbdc707..7a50ca773 100644 --- a/pkg/innerring/processors/container/processor.go +++ b/pkg/innerring/processors/container/processor.go @@ -1,6 +1,7 @@ package container import ( + "context" "errors" "fmt" @@ -97,7 +98,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/container: FrostFSID client is not set") } - p.Log.Debug(logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize)) + p.Log.Debug(context.Background(), logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go index c80f9fdc5..02dfbaf60 100644 --- a/pkg/innerring/processors/frostfs/handlers.go +++ b/pkg/innerring/processors/frostfs/handlers.go @@ -2,6 +2,7 @@ package frostfs import ( "bytes" + "context" "encoding/hex" "slices" @@ -16,7 +17,7 @@ func (np *Processor) handleDeposit(ev event.Event) { deposit := ev.(frostfsEvent.Deposit) depositIDBin := bytes.Clone(deposit.ID()) slices.Reverse(depositIDBin) - np.log.Info(logs.Notification, + np.log.Info(context.Background(), logs.Notification, zap.String("type", "deposit"), zap.String("id", hex.EncodeToString(depositIDBin))) @@ -27,7 +28,7 @@ func (np *Processor) handleDeposit(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, + np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } @@ -36,7 +37,7 @@ func (np *Processor) handleWithdraw(ev event.Event) { withdraw := ev.(frostfsEvent.Withdraw) withdrawBin := bytes.Clone(withdraw.ID()) slices.Reverse(withdrawBin) - np.log.Info(logs.Notification, + np.log.Info(context.Background(), logs.Notification, zap.String("type", "withdraw"), zap.String("id", hex.EncodeToString(withdrawBin))) @@ -47,14 +48,14 @@ func (np *Processor) handleWithdraw(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, + np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleCheque(ev event.Event) { cheque := ev.(frostfsEvent.Cheque) - np.log.Info(logs.Notification, + np.log.Info(context.Background(), logs.Notification, zap.String("type", "cheque"), zap.String("id", hex.EncodeToString(cheque.ID()))) @@ -65,14 +66,14 @@ func (np *Processor) handleCheque(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, + np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleConfig(ev event.Event) { cfg := ev.(frostfsEvent.Config) - np.log.Info(logs.Notification, + np.log.Info(context.Background(), logs.Notification, zap.String("type", "set config"), zap.String("key", hex.EncodeToString(cfg.Key())), zap.String("value", hex.EncodeToString(cfg.Value()))) @@ -84,7 +85,7 @@ func (np *Processor) handleConfig(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.FrostFSFrostfsProcessorWorkerPoolDrained, + np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go index c72aeceee..3bee6ed96 100644 --- a/pkg/innerring/processors/frostfs/process_assets.go +++ b/pkg/innerring/processors/frostfs/process_assets.go @@ -1,6 +1,8 @@ package frostfs import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" @@ -17,7 +19,7 @@ const ( // gas in the sidechain. func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.FrostFSNonAlphabetModeIgnoreDeposit) + np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreDeposit) return true } @@ -30,7 +32,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { // send transferX to a balance contract err := np.balanceClient.Mint(prm) if err != nil { - np.log.Error(logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err)) + np.log.Error(context.Background(), logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err)) } curEpoch := np.epochState.EpochCounter() @@ -44,7 +46,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { val, ok := np.mintEmitCache.Get(receiver.String()) if ok && val+np.mintEmitThreshold >= curEpoch { - np.log.Warn(logs.FrostFSDoubleMintEmissionDeclined, + np.log.Warn(context.Background(), logs.FrostFSDoubleMintEmissionDeclined, zap.Stringer("receiver", receiver), zap.Uint64("last_emission", val), zap.Uint64("current_epoch", curEpoch)) @@ -56,12 +58,12 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { // before gas transfer check if the balance is greater than the threshold balance, err := np.morphClient.GasBalance() if err != nil { - np.log.Error(logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err)) + np.log.Error(context.Background(), logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err)) return false } if balance < np.gasBalanceThreshold { - np.log.Warn(logs.FrostFSGasBalanceThresholdHasBeenReached, + np.log.Warn(context.Background(), logs.FrostFSGasBalanceThresholdHasBeenReached, zap.Int64("balance", balance), zap.Int64("threshold", np.gasBalanceThreshold)) @@ -70,7 +72,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { err = np.morphClient.TransferGas(receiver, np.mintEmitValue) if err != nil { - np.log.Error(logs.FrostFSCantTransferNativeGasToReceiver, + np.log.Error(context.Background(), logs.FrostFSCantTransferNativeGasToReceiver, zap.String("error", err.Error())) return false @@ -84,14 +86,14 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { // Process withdraw event by locking assets in the balance account. func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool { if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.FrostFSNonAlphabetModeIgnoreWithdraw) + np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreWithdraw) return true } // create lock account lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size]) if err != nil { - np.log.Error(logs.FrostFSCantCreateLockAccount, zap.Error(err)) + np.log.Error(context.Background(), logs.FrostFSCantCreateLockAccount, zap.Error(err)) return false } @@ -107,7 +109,7 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool { err = np.balanceClient.Lock(prm) if err != nil { - np.log.Error(logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err)) + np.log.Error(context.Background(), logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err)) return false } @@ -118,7 +120,7 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool { // the reserve account. func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool { if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.FrostFSNonAlphabetModeIgnoreCheque) + np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreCheque) return true } @@ -130,7 +132,7 @@ func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool { err := np.balanceClient.Burn(prm) if err != nil { - np.log.Error(logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err)) + np.log.Error(context.Background(), logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/frostfs/process_config.go b/pkg/innerring/processors/frostfs/process_config.go index 2ae3e6ced..814dd40b4 100644 --- a/pkg/innerring/processors/frostfs/process_config.go +++ b/pkg/innerring/processors/frostfs/process_config.go @@ -1,6 +1,8 @@ package frostfs import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" frostfsEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/frostfs" @@ -11,7 +13,7 @@ import ( // the sidechain. func (np *Processor) processConfig(config frostfsEvent.Config) bool { if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.FrostFSNonAlphabetModeIgnoreConfig) + np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreConfig) return true } @@ -24,7 +26,7 @@ func (np *Processor) processConfig(config frostfsEvent.Config) bool { err := np.netmapClient.SetConfig(prm) if err != nil { - np.log.Error(logs.FrostFSCantRelaySetConfigEvent, zap.Error(err)) + np.log.Error(context.Background(), logs.FrostFSCantRelaySetConfigEvent, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go index 2019857ac..fdc31d82e 100644 --- a/pkg/innerring/processors/frostfs/processor.go +++ b/pkg/innerring/processors/frostfs/processor.go @@ -1,6 +1,7 @@ package frostfs import ( + "context" "errors" "fmt" "sync" @@ -110,7 +111,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/frostfs: balance precision converter is not set") } - p.Log.Debug(logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize)) + p.Log.Debug(context.Background(), logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { diff --git a/pkg/innerring/processors/governance/handlers.go b/pkg/innerring/processors/governance/handlers.go index fd7f539c3..dee8c13e2 100644 --- a/pkg/innerring/processors/governance/handlers.go +++ b/pkg/innerring/processors/governance/handlers.go @@ -1,6 +1,8 @@ package governance import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" @@ -32,7 +34,7 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) { return } - gp.log.Info(logs.GovernanceNewEvent, zap.String("type", typ)) + gp.log.Info(context.Background(), logs.GovernanceNewEvent, zap.String("type", typ)) // send event to the worker pool @@ -41,7 +43,7 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - gp.log.Warn(logs.GovernanceGovernanceWorkerPoolDrained, + gp.log.Warn(context.Background(), logs.GovernanceGovernanceWorkerPoolDrained, zap.Int("capacity", gp.pool.Cap())) } } diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go index 50ba58e77..faca22f67 100644 --- a/pkg/innerring/processors/governance/process_update.go +++ b/pkg/innerring/processors/governance/process_update.go @@ -1,6 +1,7 @@ package governance import ( + "context" "encoding/binary" "encoding/hex" "sort" @@ -20,37 +21,37 @@ const ( func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool { if !gp.alphabetState.IsAlphabet() { - gp.log.Info(logs.GovernanceNonAlphabetModeIgnoreAlphabetSync) + gp.log.Info(context.Background(), logs.GovernanceNonAlphabetModeIgnoreAlphabetSync) return true } mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList() if err != nil { - gp.log.Error(logs.GovernanceCantFetchAlphabetListFromMainNet, + gp.log.Error(context.Background(), logs.GovernanceCantFetchAlphabetListFromMainNet, zap.String("error", err.Error())) return false } sidechainAlphabet, err := gp.morphClient.Committee() if err != nil { - gp.log.Error(logs.GovernanceCantFetchAlphabetListFromSideChain, + gp.log.Error(context.Background(), logs.GovernanceCantFetchAlphabetListFromSideChain, zap.String("error", err.Error())) return false } newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet) if err != nil { - gp.log.Error(logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain, + gp.log.Error(context.Background(), logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain, zap.String("error", err.Error())) return false } if newAlphabet == nil { - gp.log.Info(logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged) + gp.log.Info(context.Background(), logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged) return true } - gp.log.Info(logs.GovernanceAlphabetListHasBeenChangedStartingUpdate, + gp.log.Info(context.Background(), logs.GovernanceAlphabetListHasBeenChangedStartingUpdate, zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)), zap.String("new_alphabet", prettyKeys(newAlphabet)), ) @@ -63,7 +64,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool { // 1. Vote to sidechain committee via alphabet contracts. err = gp.voter.VoteForSidechainValidator(votePrm) if err != nil { - gp.log.Error(logs.GovernanceCantVoteForSideChainCommittee, + gp.log.Error(context.Background(), logs.GovernanceCantVoteForSideChainCommittee, zap.String("error", err.Error())) } @@ -76,7 +77,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool { // 4. Update FrostFS contract in the mainnet. gp.updateFrostFSContractInMainnet(newAlphabet) - gp.log.Info(logs.GovernanceFinishedAlphabetListUpdate) + gp.log.Info(context.Background(), logs.GovernanceFinishedAlphabetListUpdate) return true } @@ -96,21 +97,21 @@ func prettyKeys(keys keys.PublicKeys) string { func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { innerRing, err := gp.irFetcher.InnerRingKeys() if err != nil { - gp.log.Error(logs.GovernanceCantFetchInnerRingListFromSideChain, + gp.log.Error(context.Background(), logs.GovernanceCantFetchInnerRingListFromSideChain, zap.String("error", err.Error())) return } newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet) if err != nil { - gp.log.Error(logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys, + gp.log.Error(context.Background(), logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys, zap.String("error", err.Error())) return } sort.Sort(newInnerRing) - gp.log.Info(logs.GovernanceUpdateOfTheInnerRingList, + gp.log.Info(context.Background(), logs.GovernanceUpdateOfTheInnerRingList, zap.String("before", prettyKeys(innerRing)), zap.String("after", prettyKeys(newInnerRing)), ) @@ -120,7 +121,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl updPrm.SetHash(txHash) if err = gp.morphClient.UpdateNeoFSAlphabetList(updPrm); err != nil { - gp.log.Error(logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys, + gp.log.Error(context.Background(), logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys, zap.String("error", err.Error())) } } @@ -133,7 +134,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, tx err := gp.morphClient.UpdateNotaryList(updPrm) if err != nil { - gp.log.Error(logs.GovernanceCantUpdateListOfNotaryNodesInSideChain, + gp.log.Error(context.Background(), logs.GovernanceCantUpdateListOfNotaryNodesInSideChain, zap.String("error", err.Error())) } } @@ -153,7 +154,7 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) err := gp.frostfsClient.AlphabetUpdate(prm) if err != nil { - gp.log.Error(logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract, + gp.log.Error(context.Background(), logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract, zap.String("error", err.Error())) } } diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go index c6053e281..478ab5eab 100644 --- a/pkg/innerring/processors/netmap/handlers.go +++ b/pkg/innerring/processors/netmap/handlers.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "encoding/hex" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -13,21 +14,21 @@ import ( func (np *Processor) HandleNewEpochTick(ev event.Event) { _ = ev.(timerEvent.NewEpochTick) - np.log.Info(logs.NetmapTick, zap.String("type", "epoch")) + np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "epoch")) // send an event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", np.processNewEpochTick) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleNewEpoch(ev event.Event) { epochEvent := ev.(netmapEvent.NewEpoch) - np.log.Info(logs.Notification, + np.log.Info(context.Background(), logs.Notification, zap.String("type", "new epoch"), zap.Uint64("value", epochEvent.EpochNumber())) @@ -38,7 +39,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } @@ -46,7 +47,7 @@ func (np *Processor) handleNewEpoch(ev event.Event) { func (np *Processor) handleAddPeer(ev event.Event) { newPeer := ev.(netmapEvent.AddPeer) - np.log.Info(logs.Notification, + np.log.Info(context.Background(), logs.Notification, zap.String("type", "add peer"), ) @@ -57,14 +58,14 @@ func (np *Processor) handleAddPeer(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleUpdateState(ev event.Event) { updPeer := ev.(netmapEvent.UpdatePeer) - np.log.Info(logs.Notification, + np.log.Info(context.Background(), logs.Notification, zap.String("type", "update peer state"), zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes()))) @@ -75,21 +76,21 @@ func (np *Processor) handleUpdateState(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } func (np *Processor) handleCleanupTick(ev event.Event) { if !np.netmapSnapshot.enabled { - np.log.Debug(logs.NetmapNetmapCleanUpRoutineIsDisabled518) + np.log.Debug(context.Background(), logs.NetmapNetmapCleanUpRoutineIsDisabled518) return } cleanup := ev.(netmapCleanupTick) - np.log.Info(logs.NetmapTick, zap.String("type", "netmap cleaner")) + np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "netmap cleaner")) // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool { @@ -97,7 +98,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go index 269e79c5e..9529d3a0c 100644 --- a/pkg/innerring/processors/netmap/process_cleanup.go +++ b/pkg/innerring/processors/netmap/process_cleanup.go @@ -1,6 +1,8 @@ package netmap import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" v2netmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -9,7 +11,7 @@ import ( func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick) + np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick) return true } @@ -17,13 +19,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error { key, err := keys.NewPublicKeyFromString(s) if err != nil { - np.log.Warn(logs.NetmapCantDecodePublicKeyOfNetmapNode, + np.log.Warn(context.Background(), logs.NetmapCantDecodePublicKeyOfNetmapNode, zap.String("key", s)) return nil } - np.log.Info(logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s)) + np.log.Info(context.Background(), logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s)) // In notary environments we call UpdateStateIR method instead of UpdateState. // It differs from UpdateState only by name, so we can do this in the same form. @@ -39,13 +41,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { int64(v2netmap.Offline), key.Bytes(), ) if err != nil { - np.log.Error(logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err)) + np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err)) } return nil }) if err != nil { - np.log.Warn(logs.NetmapCantIterateOnNetmapCleanerCache, + np.log.Warn(context.Background(), logs.NetmapCantIterateOnNetmapCleanerCache, zap.String("error", err.Error())) return false } diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go index 9522df26c..8ad295a74 100644 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ b/pkg/innerring/processors/netmap/process_epoch.go @@ -1,6 +1,8 @@ package netmap import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/governance" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" @@ -14,7 +16,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool { epochDuration, err := np.netmapClient.EpochDuration() if err != nil { - np.log.Warn(logs.NetmapCantGetEpochDuration, + np.log.Warn(context.Background(), logs.NetmapCantGetEpochDuration, zap.String("error", err.Error())) } else { np.epochState.SetEpochDuration(epochDuration) @@ -24,20 +26,20 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool { h, err := np.netmapClient.MorphTxHeight(ev.TxHash()) if err != nil { - np.log.Warn(logs.NetmapCantGetTransactionHeight, + np.log.Warn(context.Background(), logs.NetmapCantGetTransactionHeight, zap.String("hash", ev.TxHash().StringLE()), zap.String("error", err.Error())) } if err := np.epochTimer.ResetEpochTimer(h); err != nil { - np.log.Warn(logs.NetmapCantResetEpochTimer, + np.log.Warn(context.Background(), logs.NetmapCantResetEpochTimer, zap.String("error", err.Error())) } // get new netmap snapshot networkMap, err := np.netmapClient.NetMap() if err != nil { - np.log.Warn(logs.NetmapCantGetNetmapSnapshotToPerformCleanup, + np.log.Warn(context.Background(), logs.NetmapCantGetNetmapSnapshotToPerformCleanup, zap.String("error", err.Error())) return false @@ -54,16 +56,16 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool { // Process new epoch tick by invoking new epoch method in network map contract. func (np *Processor) processNewEpochTick() bool { if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewEpochTick) + np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewEpochTick) return true } nextEpoch := np.epochState.EpochCounter() + 1 - np.log.Debug(logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch)) + np.log.Debug(context.Background(), logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch)) err := np.netmapClient.NewEpoch(nextEpoch) if err != nil { - np.log.Error(logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err)) + np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go index c8c7928a3..42d1b5ec6 100644 --- a/pkg/innerring/processors/netmap/process_peers.go +++ b/pkg/innerring/processors/netmap/process_peers.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "encoding/hex" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -14,7 +15,7 @@ import ( // local epoch timer. func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.NetmapNonAlphabetModeIgnoreNewPeerNotification) + np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewPeerNotification) return true } @@ -22,7 +23,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { tx := ev.NotaryRequest().MainTransaction ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers) if err != nil || !ok { - np.log.Warn(logs.NetmapNonhaltNotaryTransaction, + np.log.Warn(context.Background(), logs.NetmapNonhaltNotaryTransaction, zap.String("method", "netmap.AddPeer"), zap.String("hash", tx.Hash().StringLE()), zap.Error(err)) @@ -33,14 +34,14 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { var nodeInfo netmap.NodeInfo if err := nodeInfo.Unmarshal(ev.Node()); err != nil { // it will be nice to have tx id at event structure to log it - np.log.Warn(logs.NetmapCantParseNetworkMapCandidate) + np.log.Warn(context.Background(), logs.NetmapCantParseNetworkMapCandidate) return false } // validate and update node info err = np.nodeValidator.VerifyAndUpdate(&nodeInfo) if err != nil { - np.log.Warn(logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, + np.log.Warn(context.Background(), logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, zap.String("error", err.Error()), ) @@ -63,7 +64,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { // That is why we need to perform `addPeerIR` only in case when node is online, // because in scope of this method, contract set state `ONLINE` for the node. if updated && nodeInfo.Status().IsOnline() { - np.log.Info(logs.NetmapApprovingNetworkMapCandidate, + np.log.Info(context.Background(), logs.NetmapApprovingNetworkMapCandidate, zap.String("key", keyString)) prm := netmapclient.AddPeerPrm{} @@ -84,7 +85,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { nodeInfoBinary, ) if err != nil { - np.log.Error(logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err)) + np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err)) return false } } @@ -95,7 +96,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { // Process update peer notification by sending approval tx to the smart contract. func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool { if !np.alphabetState.IsAlphabet() { - np.log.Info(logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification) + np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification) return true } @@ -108,7 +109,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool { if ev.Maintenance() { err = np.nodeStateSettings.MaintenanceModeAllowed() if err != nil { - np.log.Info(logs.NetmapPreventSwitchingNodeToMaintenanceState, + np.log.Info(context.Background(), logs.NetmapPreventSwitchingNodeToMaintenanceState, zap.Error(err), ) @@ -117,7 +118,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool { } if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil { - np.log.Error(logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err)) + np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go index f5a91dee2..bbd60c1e1 100644 --- a/pkg/innerring/processors/netmap/processor.go +++ b/pkg/innerring/processors/netmap/processor.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "errors" "fmt" @@ -132,7 +133,7 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/netmap: node state settings is not set") } - p.Log.Debug(logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize)) + p.Log.Debug(context.Background(), logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize)) pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go index d3071faad..250f41e5f 100644 --- a/pkg/innerring/state.go +++ b/pkg/innerring/state.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "fmt" "sort" @@ -61,7 +62,7 @@ func (s *Server) IsAlphabet() bool { func (s *Server) InnerRingIndex() int { index, err := s.statusIndex.InnerRingIndex() if err != nil { - s.log.Error(logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error())) + s.log.Error(context.Background(), logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error())) return -1 } @@ -73,7 +74,7 @@ func (s *Server) InnerRingIndex() int { func (s *Server) InnerRingSize() int { size, err := s.statusIndex.InnerRingSize() if err != nil { - s.log.Error(logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error())) + s.log.Error(context.Background(), logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error())) return 0 } @@ -85,7 +86,7 @@ func (s *Server) InnerRingSize() int { func (s *Server) AlphabetIndex() int { index, err := s.statusIndex.AlphabetIndex() if err != nil { - s.log.Error(logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error())) + s.log.Error(context.Background(), logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error())) return -1 } @@ -97,13 +98,13 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro index := s.InnerRingIndex() if s.contracts.alphabet.indexOutOfRange(index) { - s.log.Info(logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange) + s.log.Info(context.Background(), logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange) return nil } if len(validators) == 0 { - s.log.Info(logs.InnerringIgnoreValidatorVoteEmptyValidatorsList) + s.log.Info(context.Background(), logs.InnerringIgnoreValidatorVoteEmptyValidatorsList) return nil } @@ -128,7 +129,7 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) { _, err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators) if err != nil { - s.log.Warn(logs.InnerringCantInvokeVoteMethodInAlphabetContract, + s.log.Warn(context.Background(), logs.InnerringCantInvokeVoteMethodInAlphabetContract, zap.Int8("alphabet_index", int8(letter)), zap.Uint64("epoch", epoch), zap.String("error", err.Error())) @@ -202,6 +203,6 @@ func (s *Server) notifySystemd(st control.HealthStatus) { err = sdnotify.Status(fmt.Sprintf("%v", st)) } if err != nil { - s.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err)) + s.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err)) } } diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza.go b/pkg/local_object_storage/blobovnicza/blobovnicza.go index c787f9d5e..08ef8b86c 100644 --- a/pkg/local_object_storage/blobovnicza/blobovnicza.go +++ b/pkg/local_object_storage/blobovnicza/blobovnicza.go @@ -58,7 +58,7 @@ func defaultCfg(c *cfg) { }, fullSizeLimit: 1 << 30, // 1GB objSizeLimit: 1 << 20, // 1MB - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), metrics: &NoopMetrics{}, } } @@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option { // WithLogger returns an option to specify Blobovnicza's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = &logger.Logger{Logger: l.With(zap.String("component", "Blobovnicza"))} + c.log = l.With(zap.String("component", "Blobovnicza")) } } diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go index aeaa4e1d5..5d7135741 100644 --- a/pkg/local_object_storage/blobovnicza/control.go +++ b/pkg/local_object_storage/blobovnicza/control.go @@ -1,6 +1,7 @@ package blobovnicza import ( + "context" "errors" "fmt" "path/filepath" @@ -23,7 +24,7 @@ func (b *Blobovnicza) Open() error { return nil } - b.log.Debug(logs.BlobovniczaCreatingDirectoryForBoltDB, + b.log.Debug(context.Background(), logs.BlobovniczaCreatingDirectoryForBoltDB, zap.String("path", b.path), zap.Bool("ro", b.boltOptions.ReadOnly), ) @@ -37,7 +38,7 @@ func (b *Blobovnicza) Open() error { } } - b.log.Debug(logs.BlobovniczaOpeningBoltDB, + b.log.Debug(context.Background(), logs.BlobovniczaOpeningBoltDB, zap.String("path", b.path), zap.Stringer("permissions", b.perm), ) @@ -63,7 +64,7 @@ func (b *Blobovnicza) Init() error { return errors.New("blobovnicza is not open") } - b.log.Debug(logs.BlobovniczaInitializing, + b.log.Debug(context.Background(), logs.BlobovniczaInitializing, zap.Uint64("object size limit", b.objSizeLimit), zap.Uint64("storage size limit", b.fullSizeLimit), ) @@ -71,7 +72,7 @@ func (b *Blobovnicza) Init() error { size := b.dataSize.Load() items := b.itemsCount.Load() if size != 0 || items != 0 { - b.log.Debug(logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(context.Background(), logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items)) return nil } @@ -81,7 +82,7 @@ func (b *Blobovnicza) Init() error { // create size range bucket rangeStr := stringifyBounds(lower, upper) - b.log.Debug(logs.BlobovniczaCreatingBucketForSizeRange, + b.log.Debug(context.Background(), logs.BlobovniczaCreatingBucketForSizeRange, zap.String("range", rangeStr)) _, err := tx.CreateBucketIfNotExists(key) @@ -131,17 +132,17 @@ func (b *Blobovnicza) initializeCounters() error { return fmt.Errorf("can't determine DB size: %w", err) } if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly { - b.log.Debug(logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items)) if err := b.boltDB.Update(func(tx *bbolt.Tx) error { if err := saveDataSize(tx, size); err != nil { return err } return saveItemsCount(tx, items) }); err != nil { - b.log.Debug(logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items)) return fmt.Errorf("can't save blobovnicza's size and items count: %w", err) } - b.log.Debug(logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items)) } b.dataSize.Store(size) @@ -162,7 +163,7 @@ func (b *Blobovnicza) Close() error { return nil } - b.log.Debug(logs.BlobovniczaClosingBoltDB, + b.log.Debug(context.Background(), logs.BlobovniczaClosingBoltDB, zap.String("path", b.path), ) diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go index 5d6787897..d821b2991 100644 --- a/pkg/local_object_storage/blobovnicza/delete.go +++ b/pkg/local_object_storage/blobovnicza/delete.go @@ -91,7 +91,7 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err } if err == nil && found { - b.log.Debug(logs.BlobovniczaObjectWasRemovedFromBucket, + b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket, zap.String("binary size", stringifyByteSize(dataSize)), zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)), zap.String("trace_id", tracingPkg.GetTraceID(ctx)), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go index 681cf876c..55c9d6630 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go @@ -24,10 +24,10 @@ func (b *Blobovniczas) Open(mode mode.ComponentMode) error { // // Should be called exactly once. func (b *Blobovniczas) Init() error { - b.log.Debug(logs.BlobovniczatreeInitializingBlobovniczas) + b.log.Debug(context.Background(), logs.BlobovniczatreeInitializingBlobovniczas) if b.readOnly { - b.log.Debug(logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization) + b.log.Debug(context.Background(), logs.BlobovniczatreeReadonlyModeSkipBlobovniczasInitialization) return nil } @@ -60,7 +60,7 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error { b.deleteProtectedObjects.Add(move.Address) } - b.log.Debug(logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) + b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) return nil }) return false, nil diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go index 298de3ad6..dd5258042 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go @@ -80,7 +80,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co res, err = b.deleteObjectFromLevel(ctx, bPrm, p) if err != nil { if !client.IsErrObjectNotFound(err) { - b.log.Debug(logs.BlobovniczatreeCouldNotRemoveObjectFromLevel, + b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel, zap.String("level", p), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx)), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go index a64b2bbb1..2149b17c0 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go @@ -55,7 +55,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common _, err := b.getObjectFromLevel(ctx, gPrm, p) if err != nil { if !client.IsErrObjectNotFound(err) { - b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, + b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go index 08cacda8a..e79480095 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go @@ -67,7 +67,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G res, err = b.getObjectFromLevel(ctx, bPrm, p) if err != nil { if !client.IsErrObjectNotFound(err) { - b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, + b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx)), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go index d237ae439..20f2be2ba 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go @@ -69,7 +69,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re if err != nil { outOfBounds := isErrOutOfRange(err) if !outOfBounds && !client.IsErrObjectNotFound(err) { - b.log.Debug(logs.BlobovniczatreeCouldNotGetObjectFromLevel, + b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go index f6acb46aa..7f0453410 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go @@ -42,7 +42,7 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm data, err := b.compression.Decompress(elem.ObjectData()) if err != nil { if prm.IgnoreErrors { - b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, + b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Stringer("address", elem.Address()), zap.String("err", err.Error()), zap.String("storage_id", p), @@ -76,7 +76,7 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo blz, err := shBlz.Open() if err != nil { if ignoreErrors { - b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, + b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.String("err", err.Error()), zap.String("storage_id", p), zap.String("root_path", b.rootPath)) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go index 4fdde15a9..235c9f65d 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go @@ -1,6 +1,7 @@ package blobovniczatree import ( + "context" "errors" "fmt" "os" @@ -86,7 +87,7 @@ func (b *sharedDB) Close() { defer b.cond.L.Unlock() if b.refCount == 0 { - b.log.Error(logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path)) + b.log.Error(context.Background(), logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path)) b.cond.Broadcast() return } @@ -94,7 +95,7 @@ func (b *sharedDB) Close() { if b.refCount == 1 { b.refCount = 0 if err := b.blcza.Close(); err != nil { - b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza, + b.log.Error(context.Background(), logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", b.path), zap.String("error", err.Error()), ) @@ -122,7 +123,7 @@ func (b *sharedDB) CloseAndRemoveFile() error { } if err := b.blcza.Close(); err != nil { - b.log.Error(logs.BlobovniczatreeCouldNotCloseBlobovnicza, + b.log.Error(context.Background(), logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", b.path), zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go index 008be9543..b56251772 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go @@ -47,7 +47,7 @@ const ( func initConfig(c *cfg) { *c = cfg{ - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), perm: defaultPerm, openedCacheSize: defaultOpenedCacheSize, openedCacheTTL: defaultOpenedCacheTTL, diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go index 76c4953e4..844b43151 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go @@ -82,7 +82,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) if !isLogical(err) { i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { - i.B.log.Debug(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, + i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } @@ -91,7 +91,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) } if active == nil { - i.B.log.Debug(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath), + i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return false, nil } @@ -104,7 +104,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) if !isLogical(err) { i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) } else { - i.B.log.Debug(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, + i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", active.SystemPath()), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index 202d38cd7..fee67a0a8 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -49,25 +49,25 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm var res common.RebuildRes - b.log.Debug(logs.BlobovniczaTreeCompletingPreviousRebuild) + b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild) completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage) res.ObjectsMoved += completedPreviosMoves if err != nil { - b.log.Warn(logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err)) success = false return res, err } - b.log.Debug(logs.BlobovniczaTreeCompletedPreviousRebuildSuccess) + b.log.Debug(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildSuccess) - b.log.Debug(logs.BlobovniczaTreeCollectingDBToRebuild) + b.log.Debug(ctx, logs.BlobovniczaTreeCollectingDBToRebuild) dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent) if err != nil { - b.log.Warn(logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err)) success = false return res, err } - b.log.Info(logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate))) + b.log.Info(ctx, logs.BlobovniczaTreeCollectingDBToRebuildSuccess, zap.Int("blobovniczas_to_rebuild", len(dbsToMigrate))) res, err = b.migrateDBs(ctx, dbsToMigrate, prm, res) if err != nil { success = false @@ -78,14 +78,14 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.RebuildPrm, res common.RebuildRes) (common.RebuildRes, error) { var completedDBCount uint32 for _, db := range dbs { - b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db)) + b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db)) movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter) res.ObjectsMoved += movedObjects if err != nil { - b.log.Warn(logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err)) return res, err } - b.log.Debug(logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects)) + b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaSuccess, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects)) res.FilesRemoved++ completedDBCount++ b.metrics.SetRebuildPercent((100 * completedDBCount) / uint32(len(dbs))) @@ -233,7 +233,7 @@ func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) { } return func() { if err := os.Remove(sysPath); err != nil { - b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) + b.log.Warn(context.Background(), logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) } }, nil } @@ -389,7 +389,7 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co }) for _, tmp := range rebuildTempFilesToRemove { if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil { - b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) } } return count, err @@ -413,14 +413,14 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob if client.IsErrObjectNotFound(err) { existsInSource = false } else { - b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err)) return err } } if !existsInSource { // object was deleted by Rebuild, need to delete move info if err = source.DropMoveInfo(ctx, move.Address); err != nil { - b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err)) return err } b.deleteProtectedObjects.Delete(move.Address) @@ -429,7 +429,7 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob existsInTarget, err := target.Exists(ctx, move.Address) if err != nil { - b.log.Warn(logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotCheckExistenceInTargetDB, zap.Error(err)) return err } @@ -439,25 +439,25 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob putPrm.SetMarshaledObject(gRes.Object()) _, err = target.Put(ctx, putPrm) if err != nil { - b.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToTargetDB, zap.String("path", targetDB.SystemPath()), zap.Error(err)) return err } } if err = metaStore.UpdateStorageID(ctx, move.Address, move.TargetStorageID); err != nil { - b.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", move.Address)) return err } var deletePrm blobovnicza.DeletePrm deletePrm.SetAddress(move.Address) if _, err = source.Delete(ctx, deletePrm); err != nil { - b.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", sourcePath), zap.Error(err)) return err } if err = source.DropMoveInfo(ctx, move.Address); err != nil { - b.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", sourcePath), zap.Error(err)) return err } @@ -482,13 +482,13 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, if !isLogical(err) { i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { - i.B.log.Warn(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err)) + i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err)) } return false, nil } if target == nil { - i.B.log.Warn(logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath)) + i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath)) return false, nil } defer target.Close() @@ -505,7 +505,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, if !isLogical(err) { i.B.reportError(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err) } else { - i.B.log.Warn(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err)) + i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err)) } return true, nil } @@ -521,13 +521,13 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, if !isLogical(err) { i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) } else { - i.B.log.Warn(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err)) + i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err)) } return true, nil } if err = i.MetaStore.UpdateStorageID(ctx, i.Address, targetStorageID.Bytes()); err != nil { - i.B.log.Warn(logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address)) + i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotUpdateStorageID, zap.Error(err), zap.Stringer("address", i.Address)) return true, nil } @@ -537,7 +537,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, if !isLogical(err) { i.B.reportError(logs.BlobovniczatreeCouldNotDeleteFromSource, err) } else { - i.B.log.Warn(logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err)) + i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err)) } return true, nil } @@ -546,7 +546,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, if !isLogical(err) { i.B.reportError(logs.BlobovniczatreeCouldNotDropMoveInfo, err) } else { - i.B.log.Warn(logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err)) + i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err)) } return true, nil } diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go index 6f579a8ca..41c6cf161 100644 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ b/pkg/local_object_storage/blobstor/blobstor.go @@ -47,7 +47,7 @@ type cfg struct { } func initConfig(c *cfg) { - c.log = &logger.Logger{Logger: zap.L()} + c.log = logger.NewLoggerWrapper(zap.L()) c.metrics = &noopMetrics{} } @@ -90,7 +90,7 @@ func WithStorages(st []SubStorage) Option { // WithLogger returns option to specify BlobStor's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = &logger.Logger{Logger: l.With(zap.String("component", "BlobStor"))} + c.log = l.With(zap.String("component", "BlobStor")) } } diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go index 9b414a9be..43436b4eb 100644 --- a/pkg/local_object_storage/blobstor/control.go +++ b/pkg/local_object_storage/blobstor/control.go @@ -12,7 +12,7 @@ import ( // Open opens BlobStor. func (b *BlobStor) Open(ctx context.Context, mode mode.Mode) error { - b.log.Debug(logs.BlobstorOpening) + b.log.Debug(ctx, logs.BlobstorOpening) b.modeMtx.Lock() defer b.modeMtx.Unlock() @@ -51,7 +51,7 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag // // Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure. func (b *BlobStor) Init() error { - b.log.Debug(logs.BlobstorInitializing) + b.log.Debug(context.Background(), logs.BlobstorInitializing) if err := b.compression.Init(); err != nil { return err @@ -68,13 +68,13 @@ func (b *BlobStor) Init() error { // Close releases all internal resources of BlobStor. func (b *BlobStor) Close() error { - b.log.Debug(logs.BlobstorClosing) + b.log.Debug(context.Background(), logs.BlobstorClosing) var firstErr error for i := range b.storage { err := b.storage[i].Storage.Close() if err != nil { - b.log.Info(logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error())) + b.log.Info(context.Background(), logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error())) if firstErr == nil { firstErr = err } diff --git a/pkg/local_object_storage/blobstor/delete.go b/pkg/local_object_storage/blobstor/delete.go index c91508e6d..86d8f15e3 100644 --- a/pkg/local_object_storage/blobstor/delete.go +++ b/pkg/local_object_storage/blobstor/delete.go @@ -39,7 +39,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del if err == nil || !client.IsErrObjectNotFound(err) { if err == nil { success = true - logOp(b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID) + logOp(ctx, b.log, deleteOp, prm.Address, b.storage[i].Storage.Type(), prm.StorageID) } return res, err } @@ -58,7 +58,7 @@ func (b *BlobStor) Delete(ctx context.Context, prm common.DeletePrm) (common.Del res, err := st.Delete(ctx, prm) if err == nil { success = true - logOp(b.log, deleteOp, prm.Address, st.Type(), prm.StorageID) + logOp(ctx, b.log, deleteOp, prm.Address, st.Type(), prm.StorageID) } return res, err diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go index 43feec7c9..556f53e12 100644 --- a/pkg/local_object_storage/blobstor/exists.go +++ b/pkg/local_object_storage/blobstor/exists.go @@ -73,7 +73,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi } for _, err := range errors[:len(errors)-1] { - b.log.Warn(logs.BlobstorErrorOccurredDuringObjectExistenceChecking, + b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking, zap.Stringer("address", prm.Address), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go index 057796db2..7f52762a7 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree.go @@ -87,7 +87,7 @@ func New(opts ...Option) *FSTree { DirNameLen: DirNameLen, metrics: &noopMetrics{}, fileCounter: &noopCounter{}, - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), } for i := range opts { opts[i](f) @@ -152,7 +152,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr des, err := os.ReadDir(dirPath) if err != nil { if prm.IgnoreErrors { - t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, + t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.String("err", err.Error()), zap.String("directory_path", dirPath)) return nil @@ -200,7 +200,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr } if err != nil { if prm.IgnoreErrors { - t.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, + t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Stringer("address", addr), zap.String("err", err.Error()), zap.String("path", path)) diff --git a/pkg/local_object_storage/blobstor/fstree/option.go b/pkg/local_object_storage/blobstor/fstree/option.go index 4d1f8fc22..7155ddcbb 100644 --- a/pkg/local_object_storage/blobstor/fstree/option.go +++ b/pkg/local_object_storage/blobstor/fstree/option.go @@ -53,6 +53,6 @@ func WithFileCounter(c FileCounter) Option { func WithLogger(l *logger.Logger) Option { return func(f *FSTree) { - f.log = &logger.Logger{Logger: l.With(zap.String("component", "FSTree"))} + f.log = l.With(zap.String("component", "FSTree")) } } diff --git a/pkg/local_object_storage/blobstor/iterate.go b/pkg/local_object_storage/blobstor/iterate.go index f213d7547..1ba835a95 100644 --- a/pkg/local_object_storage/blobstor/iterate.go +++ b/pkg/local_object_storage/blobstor/iterate.go @@ -42,7 +42,7 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I _, err := b.storage[i].Storage.Iterate(ctx, prm) if err != nil { if prm.IgnoreErrors { - b.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, + b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.String("storage_path", b.storage[i].Storage.Path()), zap.String("storage_type", b.storage[i].Storage.Type()), zap.String("err", err.Error())) diff --git a/pkg/local_object_storage/blobstor/logger.go b/pkg/local_object_storage/blobstor/logger.go index 7e057a0e3..070b1eac9 100644 --- a/pkg/local_object_storage/blobstor/logger.go +++ b/pkg/local_object_storage/blobstor/logger.go @@ -1,6 +1,8 @@ package blobstor import ( + "context" + storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -11,8 +13,8 @@ const ( putOp = "PUT" ) -func logOp(l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) { - storagelog.Write(l, +func logOp(ctx context.Context, l *logger.Logger, op string, addr oid.Address, typ string, sID []byte) { + storagelog.Write(ctx, l, storagelog.AddressField(addr), storagelog.OpField(op), storagelog.StorageTypeField(typ), diff --git a/pkg/local_object_storage/blobstor/put.go b/pkg/local_object_storage/blobstor/put.go index 1adae303d..342da28bf 100644 --- a/pkg/local_object_storage/blobstor/put.go +++ b/pkg/local_object_storage/blobstor/put.go @@ -63,7 +63,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e res, err := b.storage[i].Storage.Put(ctx, prm) if err == nil { success = true - logOp(b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID) + logOp(ctx, b.log, putOp, prm.Address, b.storage[i].Storage.Type(), res.StorageID) } return res, err } diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go index 7b2786ba2..2a6b94789 100644 --- a/pkg/local_object_storage/blobstor/rebuild.go +++ b/pkg/local_object_storage/blobstor/rebuild.go @@ -30,7 +30,7 @@ func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter Con summary.FilesRemoved += res.FilesRemoved summary.ObjectsMoved += res.ObjectsMoved if err != nil { - b.log.Error(logs.BlobstorRebuildFailedToRebuildStorages, + b.log.Error(ctx, logs.BlobstorRebuildFailedToRebuildStorages, zap.String("failed_storage_path", storage.Storage.Path()), zap.String("failed_storage_type", storage.Storage.Type()), zap.Error(err)) @@ -38,7 +38,7 @@ func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter Con break } } - b.log.Info(logs.BlobstorRebuildRebuildStoragesCompleted, + b.log.Info(ctx, logs.BlobstorRebuildRebuildStoragesCompleted, zap.Bool("success", rErr == nil), zap.Uint64("total_files_removed", summary.FilesRemoved), zap.Uint64("total_objects_moved", summary.ObjectsMoved)) diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index 80fb3f9ed..98ec73ae9 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -49,7 +49,7 @@ func (e *StorageEngine) open(ctx context.Context) error { for res := range errCh { if res.err != nil { - e.log.Error(logs.EngineCouldNotOpenShardClosingAndSkipping, + e.log.Error(ctx, logs.EngineCouldNotOpenShardClosingAndSkipping, zap.String("id", res.id), zap.Error(res.err)) @@ -58,7 +58,7 @@ func (e *StorageEngine) open(ctx context.Context) error { err := sh.Close() if err != nil { - e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard, + e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard, zap.String("id", res.id), zap.Error(res.err)) } @@ -101,7 +101,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { for res := range errCh { if res.err != nil { if errors.Is(res.err, blobstor.ErrInitBlobovniczas) { - e.log.Error(logs.EngineCouldNotInitializeShardClosingAndSkipping, + e.log.Error(ctx, logs.EngineCouldNotInitializeShardClosingAndSkipping, zap.String("id", res.id), zap.Error(res.err)) @@ -110,7 +110,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { err := sh.Close() if err != nil { - e.log.Error(logs.EngineCouldNotClosePartiallyInitializedShard, + e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard, zap.String("id", res.id), zap.Error(res.err)) } @@ -165,7 +165,7 @@ func (e *StorageEngine) close(releasePools bool) error { for id, sh := range e.shards { if err := sh.Close(); err != nil { - e.log.Debug(logs.EngineCouldNotCloseShard, + e.log.Debug(context.Background(), logs.EngineCouldNotCloseShard, zap.String("id", id), zap.String("error", err.Error()), ) @@ -311,7 +311,7 @@ loop: for _, p := range shardsToReload { err := p.sh.Reload(ctx, p.opts...) if err != nil { - e.log.Error(logs.EngineCouldNotReloadAShard, + e.log.Error(ctx, logs.EngineCouldNotReloadAShard, zap.Stringer("shard id", p.sh.ID()), zap.Error(err)) } @@ -340,7 +340,7 @@ loop: return fmt.Errorf("could not add %s shard: %w", idStr, err) } - e.log.Info(logs.EngineAddedNewShard, zap.String("id", idStr)) + e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr)) } return nil diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 9ca3a7cee..2e957eb04 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -152,7 +152,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Select(ctx, selectPrm) if err != nil { - e.log.Warn(logs.EngineErrorDuringSearchingForObjectChildren, + e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren, zap.Stringer("addr", addr), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -164,7 +164,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo _, err = sh.Inhume(ctx, inhumePrm) if err != nil { - e.log.Debug(logs.EngineCouldNotInhumeObjectInShard, + e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), zap.String("err", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -194,7 +194,7 @@ func (e *StorageEngine) deleteChunks( inhumePrm.MarkAsGarbage(addr) _, err = sh.Inhume(ctx, inhumePrm) if err != nil { - e.log.Debug(logs.EngineCouldNotInhumeObjectInShard, + e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), zap.String("err", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 3183d6137..8963ec099 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -99,20 +99,20 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta if isMeta { err := sh.SetMode(mode.DegradedReadOnly) if err == nil { - log.Info(logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold) + log.Info(context.Background(), logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold) return } - log.Error(logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly, + log.Error(context.Background(), logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly, zap.Error(err)) } err := sh.SetMode(mode.ReadOnly) if err != nil { - log.Error(logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err)) + log.Error(context.Background(), logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err)) return } - log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold) + log.Info(context.Background(), logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold) } // reportShardErrorByID increases shard error counter and logs an error. @@ -137,7 +137,7 @@ func (e *StorageEngine) reportShardError( fields ...zap.Field, ) { if isLogical(err) { - e.log.Warn(msg, + e.log.Warn(context.Background(), msg, zap.Stringer("shard_id", sh.ID()), zap.String("error", err.Error())) return @@ -147,7 +147,7 @@ func (e *StorageEngine) reportShardError( e.metrics.IncErrorCounter(sh.ID().String()) sid := sh.ID() - e.log.Warn(msg, append([]zap.Field{ + e.log.Warn(context.Background(), msg, append([]zap.Field{ zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount), zap.String("error", err.Error()), @@ -168,7 +168,7 @@ func (e *StorageEngine) reportShardError( default: // For background workers we can have a lot of such errors, // thus logging is done with DEBUG level. - e.log.Debug(logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest, + e.log.Debug(context.Background(), logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest, zap.Stringer("shard_id", sid), zap.Uint32("error_count", errCount)) } @@ -197,7 +197,7 @@ type cfg struct { func defaultCfg() *cfg { res := &cfg{ - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), shardPoolSize: 20, metrics: noopMetrics{}, } @@ -269,8 +269,8 @@ type containerSource struct { func (s *containerSource) IsContainerAvailable(ctx context.Context, id cid.ID) (bool, error) { select { - case <-ctx.Done(): - return false, ctx.Err() + case <-context.Background().Done(): + return false, context.Background().Err() default: } diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index 940e30323..b88c249b1 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -297,12 +297,12 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p e.evacuateLimiter.Complete(err) }() - e.log.Info(logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, + e.log.Info(ctx, logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) err = e.getTotals(ctx, prm, shardsToEvacuate, res) if err != nil { - e.log.Error(logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField, + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) return err } @@ -336,12 +336,12 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p err = errors.Join(err, fmt.Errorf("object error: %w", errObject)) } if err != nil { - e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, + e.log.Error(ctx, logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) return err } - e.log.Info(logs.EngineFinishedSuccessfullyShardsEvacuation, + e.log.Info(ctx, logs.EngineFinishedSuccessfullyShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, zap.Uint64("total_objects", res.ObjectsTotal()), @@ -494,7 +494,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context err := sh.IterateOverContainers(ctx, cntPrm) if err != nil { cancel(err) - e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField, + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } return err @@ -551,7 +551,7 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree return err } if success { - e.log.Debug(logs.EngineShardsEvacuationTreeEvacuatedLocal, + e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedLocal, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), zap.String("to_shard_id", shardID), evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -561,26 +561,26 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree moved, nodePK, err := e.evacuateTreeToOtherNode(ctx, sh, contTree, prm) if err != nil { - e.log.Error(logs.EngineShardsEvacuationFailedToMoveTree, + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return err } if moved { - e.log.Debug(logs.EngineShardsEvacuationTreeEvacuatedRemote, + e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedRemote, zap.String("cid", contTree.CID.EncodeToString()), zap.String("treeID", contTree.TreeID), zap.String("from_shardID", sh.ID().String()), zap.String("to_node", nodePK), evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) res.trEvacuated.Add(1) } else if prm.IgnoreErrors { res.trFailed.Add(1) - e.log.Warn(logs.EngineShardsEvacuationFailedToMoveTree, + e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } else { - e.log.Error(logs.EngineShardsEvacuationFailedToMoveTree, + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -770,7 +770,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI res.objFailed.Add(1) return nil } - e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return err } @@ -792,7 +792,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object()) if err != nil { - e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return err } @@ -800,7 +800,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI res.objEvacuated.Add(1) } else if prm.IgnoreErrors { res.objFailed.Add(1) - e.log.Warn(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, + e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } else { return fmt.Errorf("object %s was not replicated", addr) @@ -835,7 +835,7 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object, container.IsIndexedContainer(cnr)).status { case putToShardSuccess: res.objEvacuated.Add(1) - e.log.Debug(logs.EngineObjectIsMovedToAnotherShard, + e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard, zap.Stringer("from", sh.ID()), zap.Stringer("to", shards[j].ID()), zap.Stringer("addr", addr), diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index a1fe8a010..d6827e6c3 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -104,7 +104,7 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { return GetRes{}, it.OutError } if it.ShardWithMeta.Shard != nil && it.MetaError != nil { - e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound, + e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, zap.Stringer("shard_id", it.ShardWithMeta.ID()), zap.String("error", it.MetaError.Error()), zap.Stringer("address", prm.addr), diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index b8959b534..1dc64c174 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -90,7 +90,7 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e if !prm.forceRemoval { locked, err := e.IsLocked(ctx, prm.addrs[i]) if err != nil { - e.log.Warn(logs.EngineRemovingAnObjectWithoutFullLockingCheck, + e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck, zap.Error(err), zap.Stringer("addr", prm.addrs[i]), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -264,7 +264,7 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l select { case <-ctx.Done(): - e.log.Info(logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err())) + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err())) return true default: return false @@ -278,7 +278,7 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A select { case <-ctx.Done(): - e.log.Info(logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err())) + e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err())) return true default: return false @@ -305,7 +305,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid e.iterateOverUnsortedShards(func(sh hashedShard) bool { select { case <-ctx.Done(): - e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) + e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) failed = true return true default: @@ -316,7 +316,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid prm.SetContainerID(id) s, err := sh.ContainerSize(prm) if err != nil { - e.log.Warn(logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err)) + e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err)) failed = true return true } @@ -338,7 +338,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid e.iterateOverUnsortedShards(func(sh hashedShard) bool { select { case <-ctx.Done(): - e.log.Info(logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) + e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) failed = true return true default: @@ -346,7 +346,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid for id := range idMap { if err := sh.DeleteContainerSize(ctx, id); err != nil { - e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err)) + e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err)) failed = true return true } @@ -383,7 +383,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci e.iterateOverUnsortedShards(func(sh hashedShard) bool { select { case <-ctx.Done(): - e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) + e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) failed = true return true default: @@ -394,7 +394,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci prm.ContainerID = id s, err := sh.ContainerCount(ctx, prm) if err != nil { - e.log.Warn(logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err)) + e.log.Warn(ctx, logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err)) failed = true return true } @@ -416,7 +416,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci e.iterateOverUnsortedShards(func(sh hashedShard) bool { select { case <-ctx.Done(): - e.log.Info(logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) + e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) failed = true return true default: @@ -424,7 +424,7 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci for id := range idMap { if err := sh.DeleteContainerCount(ctx, id); err != nil { - e.log.Warn(logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err)) + e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err)) failed = true return true } @@ -449,7 +449,7 @@ func (e *StorageEngine) selectNonExistentIDs(ctx context.Context, ids []cid.ID) for _, id := range ids { isAvailable, err := cs.IsContainerAvailable(ctx, id) if err != nil { - e.log.Warn(logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err)) + e.log.Warn(ctx, logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err)) return nil, err } if isAvailable { diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 56d3ef490..635f0e302 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -141,7 +141,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti // expired => do nothing with it res.status = putToShardExists } else { - e.log.Warn(logs.EngineCouldNotCheckObjectExistence, + e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence, zap.Stringer("shard_id", sh.ID()), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -163,14 +163,14 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti if err != nil { if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { - e.log.Warn(logs.EngineCouldNotPutObjectToShard, + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Stringer("shard_id", sh.ID()), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return } if client.IsErrObjectAlreadyRemoved(err) { - e.log.Warn(logs.EngineCouldNotPutObjectToShard, + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Stringer("shard_id", sh.ID()), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -185,7 +185,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti res.status = putToShardSuccess }); err != nil { - e.log.Warn(logs.EngineCouldNotPutObjectToShard, zap.Error(err)) + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Error(err)) close(exitCh) } diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index fde6052ae..c5c94eef7 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -116,7 +116,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error return RngRes{}, it.OutError } if it.ShardWithMeta.Shard != nil && it.MetaError != nil { - e.log.Warn(logs.ShardMetaInfoPresentButObjectNotFound, + e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, zap.Stringer("shard_id", it.ShardWithMeta.ID()), zap.String("error", it.MetaError.Error()), zap.Stringer("address", prm.addr), diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go index 5e1ced56a..8ab3c5217 100644 --- a/pkg/local_object_storage/engine/remove_copies.go +++ b/pkg/local_object_storage/engine/remove_copies.go @@ -43,7 +43,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat prm.Concurrency = defaultRemoveDuplicatesConcurrency } - e.log.Info(logs.EngineStartingRemovalOfLocallyredundantCopies, + e.log.Info(ctx, logs.EngineStartingRemovalOfLocallyredundantCopies, zap.Int("concurrency", prm.Concurrency)) // The mutext must be taken for the whole duration to avoid target shard being removed @@ -55,7 +55,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat // This is not currently the case, because `FreeSpace` metric used by weight sorting is always 0. // However we could change weights in future and easily forget this function. for _, sh := range e.shards { - e.log.Debug(logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID())) + e.log.Debug(ctx, logs.EngineStartedDuplicatesRemovalRoutine, zap.Stringer("shard_id", sh.ID())) ch := make(chan oid.Address) errG, ctx := errgroup.WithContext(ctx) @@ -93,12 +93,12 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat }) } if err := errG.Wait(); err != nil { - e.log.Error(logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err)) + e.log.Error(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies, zap.Error(err)) return err } } - e.log.Info(logs.EngineFinishedRemovalOfLocallyredundantCopies) + e.log.Info(ctx, logs.EngineFinishedRemovalOfLocallyredundantCopies) return nil } diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index aab2c423c..e172706e3 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -140,7 +140,7 @@ func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*sh )...) if err := sh.UpdateID(); err != nil { - e.log.Warn(logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err)) + e.log.Warn(context.Background(), logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err)) } return sh, nil @@ -228,7 +228,7 @@ func (e *StorageEngine) removeShards(ids ...string) { delete(e.shardPools, id) } - e.log.Info(logs.EngineShardHasBeenRemoved, + e.log.Info(context.Background(), logs.EngineShardHasBeenRemoved, zap.String("id", id)) } e.mtx.Unlock() @@ -236,14 +236,14 @@ func (e *StorageEngine) removeShards(ids ...string) { for _, sh := range ss { err := sh.SetMode(mode.Disabled) if err != nil { - e.log.Error(logs.EngineCouldNotChangeShardModeToDisabled, + e.log.Error(context.Background(), logs.EngineCouldNotChangeShardModeToDisabled, zap.Stringer("id", sh.ID()), zap.Error(err), ) } err = sh.Close() if err != nil { - e.log.Error(logs.EngineCouldNotCloseRemovedShard, + e.log.Error(context.Background(), logs.EngineCouldNotCloseRemovedShard, zap.Stringer("id", sh.ID()), zap.Error(err), ) @@ -340,7 +340,7 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { return case sh.NotificationChannel() <- ev: default: - e.log.Debug(logs.ShardEventProcessingInProgress, + e.log.Debug(ctx, logs.ShardEventProcessingInProgress, zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID())) } } @@ -369,7 +369,7 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error { eg.Go(func() error { err := sh.SetMode(mode.Disabled) if err != nil { - e.log.Error(logs.EngineCouldNotChangeShardModeToDisabled, + e.log.Error(context.Background(), logs.EngineCouldNotChangeShardModeToDisabled, zap.Stringer("id", sh.ID()), zap.Error(err), ) @@ -380,7 +380,7 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error { err = sh.Close() if err != nil { - e.log.Error(logs.EngineCouldNotCloseRemovedShard, + e.log.Error(context.Background(), logs.EngineCouldNotCloseRemovedShard, zap.Stringer("id", sh.ID()), zap.Error(err), ) @@ -432,7 +432,7 @@ func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) { delete(e.shardPools, idStr) } - e.log.Info(logs.EngineShardHasBeenRemoved, + e.log.Info(context.Background(), logs.EngineShardHasBeenRemoved, zap.String("id", idStr)) } diff --git a/pkg/local_object_storage/internal/log/log.go b/pkg/local_object_storage/internal/log/log.go index 23740868d..6b101fa60 100644 --- a/pkg/local_object_storage/internal/log/log.go +++ b/pkg/local_object_storage/internal/log/log.go @@ -1,14 +1,16 @@ package storagelog import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" ) // Write writes message about storage engine's operation to logger. -func Write(logger *logger.Logger, fields ...zap.Field) { - logger.Debug(logs.StorageOperation, fields...) +func Write(ctx context.Context, logger *logger.Logger, fields ...zap.Field) { + logger.Debug(ctx, logs.StorageOperation, fields...) } // AddressField returns logger's field for object address. diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go index d6546d922..68e065a0a 100644 --- a/pkg/local_object_storage/metabase/control.go +++ b/pkg/local_object_storage/metabase/control.go @@ -57,7 +57,7 @@ func (db *DB) openDB(mode mode.Mode) error { return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err) } - db.log.Debug(logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) + db.log.Debug(context.Background(), logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) if db.boltOptions == nil { opts := *bbolt.DefaultOptions @@ -78,9 +78,9 @@ func (db *DB) openBolt() error { db.boltDB.MaxBatchDelay = db.boltBatchDelay db.boltDB.MaxBatchSize = db.boltBatchSize - db.log.Debug(logs.MetabaseOpenedBoltDBInstanceForMetabase) + db.log.Debug(context.Background(), logs.MetabaseOpenedBoltDBInstanceForMetabase) - db.log.Debug(logs.MetabaseCheckingMetabaseVersion) + db.log.Debug(context.Background(), logs.MetabaseCheckingMetabaseVersion) return db.boltDB.View(func(tx *bbolt.Tx) error { // The safest way to check if the metabase is fresh is to check if it has no buckets. // However, shard info can be present. So here we check that the number of buckets is diff --git a/pkg/local_object_storage/metabase/db.go b/pkg/local_object_storage/metabase/db.go index 2cd990814..4474aa229 100644 --- a/pkg/local_object_storage/metabase/db.go +++ b/pkg/local_object_storage/metabase/db.go @@ -70,7 +70,7 @@ func defaultCfg() *cfg { }, boltBatchDelay: bbolt.DefaultMaxBatchDelay, boltBatchSize: bbolt.DefaultMaxBatchSize, - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), metrics: &noopMetrics{}, } } diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index b5ac22017..62ab1056d 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -117,7 +117,7 @@ func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { if err == nil { deleted = true for i := range prm.addrs { - storagelog.Write(db.log, + storagelog.Write(ctx, db.log, storagelog.AddressField(prm.addrs[i]), storagelog.OpField("metabase DELETE")) } diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index 12f27d330..8d1e18729 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -205,7 +205,7 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { success = err == nil if success { for _, addr := range prm.target { - storagelog.Write(db.log, + storagelog.Write(ctx, db.log, storagelog.AddressField(addr), storagelog.OpField("metabase INHUME")) } diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index 2d94e7ae1..d7675869f 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -101,7 +101,7 @@ func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) { }) if err == nil { success = true - storagelog.Write(db.log, + storagelog.Write(ctx, db.log, storagelog.AddressField(objectCore.AddressOf(prm.obj)), storagelog.OpField("metabase PUT")) } diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go index aeb14aeb6..e2eee86b0 100644 --- a/pkg/local_object_storage/metabase/upgrade_test.go +++ b/pkg/local_object_storage/metabase/upgrade_test.go @@ -113,7 +113,7 @@ func TestGenerateMetabaseFile(t *testing.T) { }) } require.NoError(t, eg.Wait()) - db.log.Info("simple objects generated") + db.log.Info(ctx, "simple objects generated") eg, ctx = errgroup.WithContext(context.Background()) eg.SetLimit(generateWorkersCount) // complex objects @@ -137,7 +137,7 @@ func TestGenerateMetabaseFile(t *testing.T) { }) } require.NoError(t, eg.Wait()) - db.log.Info("complex objects generated") + db.log.Info(ctx, "complex objects generated") eg, ctx = errgroup.WithContext(context.Background()) eg.SetLimit(generateWorkersCount) // simple objects deleted by gc marks @@ -159,7 +159,7 @@ func TestGenerateMetabaseFile(t *testing.T) { }) } require.NoError(t, eg.Wait()) - db.log.Info("simple objects deleted by gc marks generated") + db.log.Info(ctx, "simple objects deleted by gc marks generated") eg, ctx = errgroup.WithContext(context.Background()) eg.SetLimit(10000) // simple objects deleted by tombstones @@ -189,7 +189,7 @@ func TestGenerateMetabaseFile(t *testing.T) { }) } require.NoError(t, eg.Wait()) - db.log.Info("simple objects deleted by tombstones generated") + db.log.Info(ctx, "simple objects deleted by tombstones generated") eg, ctx = errgroup.WithContext(context.Background()) eg.SetLimit(generateWorkersCount) // simple objects locked by locks @@ -216,7 +216,7 @@ func TestGenerateMetabaseFile(t *testing.T) { }) } require.NoError(t, eg.Wait()) - db.log.Info("simple objects locked by locks generated") + db.log.Info(ctx, "simple objects locked by locks generated") require.NoError(t, db.boltDB.Sync()) require.NoError(t, db.Close()) } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 62800dbd0..eb3aa61c0 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -21,7 +21,7 @@ import ( ) func (s *Shard) handleMetabaseFailure(stage string, err error) error { - s.log.Error(logs.ShardMetabaseFailureSwitchingMode, + s.log.Error(context.Background(), logs.ShardMetabaseFailureSwitchingMode, zap.String("stage", stage), zap.Stringer("mode", mode.ReadOnly), zap.Error(err)) @@ -31,7 +31,7 @@ func (s *Shard) handleMetabaseFailure(stage string, err error) error { return nil } - s.log.Error(logs.ShardCantMoveShardToReadonlySwitchMode, + s.log.Error(context.Background(), logs.ShardCantMoveShardToReadonlySwitchMode, zap.String("stage", stage), zap.Stringer("mode", mode.DegradedReadOnly), zap.Error(err)) @@ -211,7 +211,7 @@ func (s *Shard) refillMetabase(ctx context.Context) error { withCount := true totalObjects, err := s.blobStor.ObjectsCount(ctx) if err != nil { - s.log.Warn(logs.EngineRefillFailedToGetObjectsCount, zap.Error(err)) + s.log.Warn(ctx, logs.EngineRefillFailedToGetObjectsCount, zap.Error(err)) withCount = false } @@ -270,7 +270,7 @@ func (s *Shard) refillMetabase(ctx context.Context) error { func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, descriptor []byte) error { obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - s.log.Warn(logs.ShardCouldNotUnmarshalObject, + s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject, zap.Stringer("address", addr), zap.String("err", err.Error())) return nil @@ -285,7 +285,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, return err } if info.Removed { - s.log.Debug(logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr)) + s.log.Debug(ctx, logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr)) return nil } isIndexedContainer = info.Indexed @@ -386,7 +386,7 @@ func (s *Shard) Close() error { for _, component := range components { if err := component.Close(); err != nil { lastErr = err - s.log.Error(logs.ShardCouldNotCloseShardComponent, zap.Error(err)) + s.log.Error(context.Background(), logs.ShardCouldNotCloseShardComponent, zap.Error(err)) } } @@ -424,7 +424,7 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error { ok, err := s.metaBase.Reload(c.metaOpts...) if err != nil { if errors.Is(err, meta.ErrDegradedMode) { - s.log.Error(logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err)) + s.log.Error(ctx, logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err)) _ = s.setMode(mode.DegradedReadOnly) } return err @@ -440,7 +440,7 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error { err = s.metaBase.Init() } if err != nil { - s.log.Error(logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err)) + s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err)) _ = s.setMode(mode.DegradedReadOnly) return err } diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go index c898fdf41..f62cecd56 100644 --- a/pkg/local_object_storage/shard/delete.go +++ b/pkg/local_object_storage/shard/delete.go @@ -95,7 +95,7 @@ func (s *Shard) validateWritecacheDoesntContainObject(ctx context.Context, addr } _, err := s.writeCache.Head(ctx, addr) if err == nil { - s.log.Warn(logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr)) + s.log.Warn(ctx, logs.ObjectRemovalFailureExistsInWritecache, zap.Stringer("object_address", addr)) return fmt.Errorf("object %s must be flushed from writecache", addr) } if client.IsErrObjectNotFound(err) { @@ -110,7 +110,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error res, err := s.metaBase.StorageID(ctx, sPrm) if err != nil { - s.log.Debug(logs.StorageIDRetrievalFailure, + s.log.Debug(ctx, logs.StorageIDRetrievalFailure, zap.Stringer("object", addr), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -130,7 +130,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error _, err = s.blobStor.Delete(ctx, delPrm) if err != nil && !client.IsErrObjectNotFound(err) { - s.log.Debug(logs.ObjectRemovalFailureBlobStor, + s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor, zap.Stringer("object_address", addr), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index d605746e8..6fabf7103 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -131,7 +131,7 @@ type gcCfg struct { func defaultGCCfg() gcCfg { return gcCfg{ removerInterval: 10 * time.Second, - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), workerPoolInit: func(int) util.WorkerPool { return nil }, @@ -161,14 +161,14 @@ func (gc *gc) listenEvents(ctx context.Context) { for { select { case <-gc.stopChannel: - gc.log.Warn(logs.ShardStopEventListenerByClosedStopChannel) + gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedStopChannel) return case <-ctx.Done(): - gc.log.Warn(logs.ShardStopEventListenerByContext) + gc.log.Warn(ctx, logs.ShardStopEventListenerByContext) return case event, ok := <-gc.eventChan: if !ok { - gc.log.Warn(logs.ShardStopEventListenerByClosedEventChannel) + gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel) return } @@ -204,7 +204,7 @@ func (gc *gc) handleEvent(ctx context.Context, event Event) { h(runCtx, event) }) if err != nil { - gc.log.Warn(logs.ShardCouldNotSubmitGCJobToWorkerPool, + gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool, zap.String("error", err.Error()), ) @@ -222,7 +222,7 @@ func (gc *gc) releaseResources() { // because it is possible that we are close it earlier than stop writing. // It is ok to keep it opened. - gc.log.Debug(logs.ShardGCIsStopped) + gc.log.Debug(context.Background(), logs.ShardGCIsStopped) } func (gc *gc) tickRemover(ctx context.Context) { @@ -263,7 +263,7 @@ func (gc *gc) stop() { close(gc.stopChannel) }) - gc.log.Info(logs.ShardWaitingForGCWorkersToStop) + gc.log.Info(context.Background(), logs.ShardWaitingForGCWorkersToStop) gc.wg.Wait() } @@ -286,8 +286,8 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { return } - s.log.Debug(logs.ShardGCRemoveGarbageStarted) - defer s.log.Debug(logs.ShardGCRemoveGarbageCompleted) + s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted) + defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted) buf := make([]oid.Address, 0, s.rmBatchSize) @@ -312,7 +312,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { // (no more than s.rmBatchSize objects) err := s.metaBase.IterateOverGarbage(ctx, iterPrm) if err != nil { - s.log.Warn(logs.ShardIteratorOverMetabaseGraveyardFailed, + s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed, zap.String("error", err.Error()), ) @@ -333,7 +333,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { result.success = true if err != nil { - s.log.Warn(logs.ShardCouldNotDeleteTheObjects, + s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects, zap.String("error", err.Error()), ) result.success = false @@ -356,8 +356,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular) }() - s.log.Debug(logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) - defer s.log.Debug(logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -396,7 +396,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { }) if err = errGroup.Wait(); err != nil { - s.log.Warn(logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error())) } } @@ -416,7 +416,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) expired, err := s.getExpiredWithLinked(ctx, expired) if err != nil { - s.log.Warn(logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err)) + s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err)) return } @@ -428,7 +428,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) // inhume the collected objects res, err := s.metaBase.Inhume(ctx, inhumePrm) if err != nil { - s.log.Warn(logs.ShardCouldNotInhumeTheObjects, + s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.String("error", err.Error()), ) @@ -473,8 +473,8 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { epoch := e.(newEpoch).epoch log := s.log.With(zap.Uint64("epoch", epoch)) - log.Debug(logs.ShardStartedExpiredTombstonesHandling) - defer log.Debug(logs.ShardFinishedExpiredTombstonesHandling) + log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling) + defer log.Debug(ctx, logs.ShardFinishedExpiredTombstonesHandling) const tssDeleteBatch = 50 tss := make([]meta.TombstonedObject, 0, tssDeleteBatch) @@ -492,12 +492,12 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { }) for { - log.Debug(logs.ShardIteratingTombstones) + log.Debug(ctx, logs.ShardIteratingTombstones) s.m.RLock() if s.info.Mode.NoMetabase() { - s.log.Debug(logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones) + s.log.Debug(ctx, logs.ShardShardIsInADegradedModeSkipCollectingExpiredTombstones) s.m.RUnlock() return @@ -505,7 +505,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { err = s.metaBase.IterateOverGraveyard(ctx, iterPrm) if err != nil { - log.Error(logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) + log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) s.m.RUnlock() return @@ -524,7 +524,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { } } - log.Debug(logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp))) + log.Debug(ctx, logs.ShardHandlingExpiredTombstonesBatch, zap.Int("number", len(tssExp))) if len(tssExp) > 0 { s.expiredTombstonesCallback(ctx, tssExp) } @@ -543,8 +543,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock) }() - s.log.Debug(logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) - defer s.log.Debug(logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -584,7 +584,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { }) if err = errGroup.Wait(); err != nil { - s.log.Warn(logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error())) } } @@ -645,7 +645,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston // inhume tombstones res, err := s.metaBase.Inhume(ctx, pInhume) if err != nil { - s.log.Warn(logs.ShardCouldNotMarkTombstonesAsGarbage, + s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.String("error", err.Error()), ) @@ -668,7 +668,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston // from graveyard err = s.metaBase.DropGraves(ctx, tss) if err != nil { - s.log.Warn(logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err)) + s.log.Warn(ctx, logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err)) } } @@ -680,7 +680,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] } unlocked, err := s.metaBase.FreeLockedBy(lockers) if err != nil { - s.log.Warn(logs.ShardFailureToUnlockObjects, + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.String("error", err.Error()), ) @@ -693,7 +693,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] res, err := s.metaBase.Inhume(ctx, pInhume) if err != nil { - s.log.Warn(logs.ShardFailureToMarkLockersAsGarbage, + s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.String("error", err.Error()), ) @@ -718,7 +718,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unlocked []oid.Address) { expiredUnlocked, err := s.selectExpired(ctx, epoch, unlocked) if err != nil { - s.log.Warn(logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err)) + s.log.Warn(ctx, logs.ShardFailureToGetExpiredUnlockedObjects, zap.Error(err)) return } @@ -737,7 +737,7 @@ func (s *Shard) HandleDeletedLocks(lockers []oid.Address) { _, err := s.metaBase.FreeLockedBy(lockers) if err != nil { - s.log.Warn(logs.ShardFailureToUnlockObjects, + s.log.Warn(context.Background(), logs.ShardFailureToUnlockObjects, zap.String("error", err.Error()), ) @@ -756,8 +756,8 @@ func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) { epoch := e.(newEpoch).epoch - s.log.Debug(logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch)) - defer s.log.Debug(logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch)) s.collectExpiredContainerSizeMetrics(ctx, epoch) s.collectExpiredContainerCountMetrics(ctx, epoch) @@ -766,7 +766,7 @@ func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) { func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) { ids, err := s.metaBase.ZeroSizeContainers(ctx) if err != nil { - s.log.Warn(logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return } if len(ids) == 0 { @@ -778,7 +778,7 @@ func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch ui func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) { ids, err := s.metaBase.ZeroCountContainers(ctx) if err != nil { - s.log.Warn(logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return } if len(ids) == 0 { diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go index d1c393613..7a31a705e 100644 --- a/pkg/local_object_storage/shard/get.go +++ b/pkg/local_object_storage/shard/get.go @@ -144,7 +144,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta return nil, false, logicerr.Wrap(new(apistatus.ObjectNotFound)) } } else { - s.log.Warn(logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr)) + s.log.Warn(ctx, logs.ShardFetchingObjectWithoutMeta, zap.Stringer("addr", addr)) } if s.hasWriteCache() { @@ -153,12 +153,12 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta return res, false, err } if client.IsErrObjectNotFound(err) { - s.log.Debug(logs.ShardObjectIsMissingInWritecache, + s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache, zap.Stringer("addr", addr), zap.Bool("skip_meta", skipMeta), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } else { - s.log.Error(logs.ShardFailedToFetchObjectFromWritecache, + s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache, zap.Error(err), zap.Stringer("addr", addr), zap.Bool("skip_meta", skipMeta), diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go index a72313498..e27dc0733 100644 --- a/pkg/local_object_storage/shard/id.go +++ b/pkg/local_object_storage/shard/id.go @@ -5,7 +5,6 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "github.com/mr-tron/base58" "go.uber.org/zap" ) @@ -50,7 +49,7 @@ func (s *Shard) UpdateID() (err error) { s.writeCache.GetMetrics().SetShardID(shardID) } - s.log = &logger.Logger{Logger: s.log.With(zap.Stringer("shard_id", s.info.ID))} + s.log = s.log.With(zap.Stringer("shard_id", s.info.ID)) s.metaBase.SetLogger(s.log) s.blobStor.SetLogger(s.log) if s.hasWriteCache() { diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go index 746177c3a..984c54fbc 100644 --- a/pkg/local_object_storage/shard/inhume.go +++ b/pkg/local_object_storage/shard/inhume.go @@ -109,7 +109,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { return InhumeRes{}, ErrLockObjectRemoval } - s.log.Debug(logs.ShardCouldNotMarkObjectToDeleteInMetabase, + s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase, zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index 8d09974b8..7b267d2e4 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -122,7 +122,7 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { sRes, err := s.metaBase.Select(ctx, sPrm) // consider making List in metabase if err != nil { - s.log.Debug(logs.ShardCantSelectAllObjects, + s.log.Debug(ctx, logs.ShardCantSelectAllObjects, zap.Stringer("cid", lst[i]), zap.String("error", err.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go index 9ce95feb1..595afb60e 100644 --- a/pkg/local_object_storage/shard/lock_test.go +++ b/pkg/local_object_storage/shard/lock_test.go @@ -30,7 +30,7 @@ func TestShard_Lock(t *testing.T) { rootPath := t.TempDir() opts := []Option{ WithID(NewIDFromBytes([]byte{})), - WithLogger(&logger.Logger{Logger: zap.NewNop()}), + WithLogger(logger.NewLoggerWrapper(zap.NewNop())), WithBlobStorOptions( blobstor.WithStorages([]blobstor.SubStorage{ { diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go index d90a5f4b6..98b4c37b2 100644 --- a/pkg/local_object_storage/shard/mode.go +++ b/pkg/local_object_storage/shard/mode.go @@ -1,6 +1,8 @@ package shard import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" @@ -26,7 +28,7 @@ func (s *Shard) SetMode(m mode.Mode) error { } func (s *Shard) setMode(m mode.Mode) error { - s.log.Info(logs.ShardSettingShardMode, + s.log.Info(context.Background(), logs.ShardSettingShardMode, zap.Stringer("old_mode", s.info.Mode), zap.Stringer("new_mode", m)) @@ -67,7 +69,7 @@ func (s *Shard) setMode(m mode.Mode) error { s.info.Mode = m s.metricsWriter.SetMode(s.info.Mode) - s.log.Info(logs.ShardShardModeSetSuccessfully, + s.log.Info(context.Background(), logs.ShardShardModeSetSuccessfully, zap.Stringer("mode", s.info.Mode)) return nil } diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index 24cc75154..50125a88d 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -75,7 +75,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { } if err != nil || !tryCache { if err != nil { - s.log.Debug(logs.ShardCantPutObjectToTheWritecacheTryingBlobstor, + s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor, zap.String("err", err.Error())) } diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go index 0d83caa0c..124b72a5c 100644 --- a/pkg/local_object_storage/shard/rebuild.go +++ b/pkg/local_object_storage/shard/rebuild.go @@ -102,11 +102,11 @@ func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *lo return default: } - log.Info(logs.BlobstoreRebuildStarted) + log.Info(ctx, logs.BlobstoreRebuildStarted) if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil { - log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err)) + log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err)) } else { - log.Info(logs.BlobstoreRebuildCompletedSuccessfully) + log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully) } } @@ -138,7 +138,7 @@ func (r *rebuilder) Stop(log *logger.Logger) { r.wg.Wait() r.cancel = nil r.done = nil - log.Info(logs.BlobstoreRebuildStopped) + log.Info(context.Background(), logs.BlobstoreRebuildStopped) } var errMBIsNotAvailable = errors.New("metabase is not available") diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index 413bfd2f7..3a06fe8a7 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -103,7 +103,7 @@ type cfg struct { func defaultCfg() *cfg { return &cfg{ rmBatchSize: 100, - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), gcCfg: defaultGCCfg(), reportErrorFunc: func(string, string, error) {}, zeroSizeContainersCallback: func(context.Context, []cid.ID) {}, @@ -401,7 +401,7 @@ func (s *Shard) updateMetrics(ctx context.Context) { cc, err := s.metaBase.ObjectCounters() if err != nil { - s.log.Warn(logs.ShardMetaObjectCounterRead, + s.log.Warn(ctx, logs.ShardMetaObjectCounterRead, zap.Error(err), ) @@ -414,7 +414,7 @@ func (s *Shard) updateMetrics(ctx context.Context) { cnrList, err := s.metaBase.Containers(ctx) if err != nil { - s.log.Warn(logs.ShardMetaCantReadContainerList, zap.Error(err)) + s.log.Warn(ctx, logs.ShardMetaCantReadContainerList, zap.Error(err)) return } @@ -423,7 +423,7 @@ func (s *Shard) updateMetrics(ctx context.Context) { for i := range cnrList { size, err := s.metaBase.ContainerSize(cnrList[i]) if err != nil { - s.log.Warn(logs.ShardMetaCantReadContainerSize, + s.log.Warn(ctx, logs.ShardMetaCantReadContainerSize, zap.String("cid", cnrList[i].EncodeToString()), zap.Error(err)) continue @@ -436,7 +436,7 @@ func (s *Shard) updateMetrics(ctx context.Context) { contCount, err := s.metaBase.ContainerCounters(ctx) if err != nil { - s.log.Warn(logs.FailedToGetContainerCounters, zap.Error(err)) + s.log.Warn(ctx, logs.FailedToGetContainerCounters, zap.Error(err)) return } for contID, count := range contCount.Counts { diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go index a6de07f03..f655e477a 100644 --- a/pkg/local_object_storage/shard/writecache.go +++ b/pkg/local_object_storage/shard/writecache.go @@ -124,12 +124,12 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { close(started) defer cleanup() - s.log.Info(logs.StartedWritecacheSealAsync) + s.log.Info(ctx, logs.StartedWritecacheSealAsync) if err := s.writeCache.Seal(ctx, prm); err != nil { - s.log.Warn(logs.FailedToSealWritecacheAsync, zap.Error(err)) + s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) return } - s.log.Info(logs.WritecacheSealCompletedAsync) + s.log.Info(ctx, logs.WritecacheSealCompletedAsync) }() select { case <-ctx.Done(): diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go index b97fc5856..098872e08 100644 --- a/pkg/local_object_storage/writecache/cache.go +++ b/pkg/local_object_storage/writecache/cache.go @@ -55,7 +55,7 @@ func New(opts ...Option) Cache { counter: fstree.NewSimpleCounter(), options: options{ - log: &logger.Logger{Logger: zap.NewNop()}, + log: logger.NewLoggerWrapper(zap.NewNop()), maxObjectSize: defaultMaxObjectSize, workersCount: defaultFlushWorkersCount, maxCacheSize: defaultMaxCacheSize, diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/delete.go index dda284439..94a0a40db 100644 --- a/pkg/local_object_storage/writecache/delete.go +++ b/pkg/local_object_storage/writecache/delete.go @@ -46,7 +46,7 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error { storageType = StorageTypeFSTree _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr}) if err == nil { - storagelog.Write(c.log, + storagelog.Write(ctx, c.log, storagelog.AddressField(addr.EncodeToString()), storagelog.StorageTypeField(wcStorageType), storagelog.OpField("fstree DELETE"), diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index bfa6aacb0..123eb4abc 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -80,7 +80,7 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) { } }) if err != nil { - c.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err)) + c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err)) } c.modeMtx.RUnlock() @@ -130,7 +130,7 @@ func (c *cache) reportFlushError(msg string, addr string, err error) { if c.reportError != nil { c.reportError(msg, err) } else { - c.log.Error(msg, + c.log.Error(context.Background(), msg, zap.String("address", addr), zap.Error(err)) } diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go index 59a4e4895..26f47e82e 100644 --- a/pkg/local_object_storage/writecache/flush_test.go +++ b/pkg/local_object_storage/writecache/flush_test.go @@ -40,7 +40,7 @@ func TestFlush(t *testing.T) { cnt := &atomic.Uint32{} return WithReportErrorFunc(func(msg string, err error) { cnt.Add(1) - testlogger.Warn(msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err)) + testlogger.Warn(context.Background(), msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err)) }), cnt } diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go index d12dd603b..26658e9b8 100644 --- a/pkg/local_object_storage/writecache/mode.go +++ b/pkg/local_object_storage/writecache/mode.go @@ -60,7 +60,7 @@ func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error // flushCh is populated by `flush` with `modeMtx` taken, thus waiting until it is empty // guarantees that there are no in-fly operations. for len(c.flushCh) != 0 { - c.log.Info(logs.WritecacheWaitingForChannelsToFlush) + c.log.Info(ctx, logs.WritecacheWaitingForChannelsToFlush) time.Sleep(time.Second) } @@ -110,7 +110,7 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error { return fmt.Errorf("failed to remove write-cache files: %w", err) } } else { - c.log.Info(logs.WritecacheShrinkSkippedNotEmpty) + c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty) } return nil } diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go index 66ac7805c..25c1694a8 100644 --- a/pkg/local_object_storage/writecache/options.go +++ b/pkg/local_object_storage/writecache/options.go @@ -41,7 +41,7 @@ type options struct { // WithLogger sets logger. func WithLogger(log *logger.Logger) Option { return func(o *options) { - o.log = &logger.Logger{Logger: log.With(zap.String("component", "WriteCache"))} + o.log = log.With(zap.String("component", "WriteCache")) } } diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go index c53067bea..7da5c4d3a 100644 --- a/pkg/local_object_storage/writecache/put.go +++ b/pkg/local_object_storage/writecache/put.go @@ -68,7 +68,7 @@ func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error { return err } - storagelog.Write(c.log, + storagelog.Write(ctx, c.log, storagelog.AddressField(prm.Address.EncodeToString()), storagelog.StorageTypeField(wcStorageType), storagelog.OpField("fstree PUT"), diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go index 2e52e5b20..a0e236cb7 100644 --- a/pkg/local_object_storage/writecache/storage.go +++ b/pkg/local_object_storage/writecache/storage.go @@ -43,9 +43,9 @@ func (c *cache) openStore(mod mode.ComponentMode) error { func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address, size uint64) { _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr, Size: size}) if err != nil && !client.IsErrObjectNotFound(err) { - c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err)) + c.log.Error(ctx, logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err)) } else if err == nil { - storagelog.Write(c.log, + storagelog.Write(ctx, c.log, storagelog.AddressField(addr.EncodeToString()), storagelog.StorageTypeField(wcStorageType), storagelog.OpField("fstree DELETE"), diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index 933f1039f..12c0e0842 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -199,7 +199,7 @@ func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, return 0, fmt.Errorf("could not invoke %s: %w", method, err) } - c.logger.Debug(logs.ClientNeoClientInvoke, + c.logger.Debug(context.Background(), logs.ClientNeoClientInvoke, zap.String("method", method), zap.Uint32("vub", vub), zap.Stringer("tx_hash", txHash.Reverse())) @@ -328,7 +328,7 @@ func (c *Client) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error return err } - c.logger.Debug(logs.ClientNativeGasTransferInvoke, + c.logger.Debug(context.Background(), logs.ClientNativeGasTransferInvoke, zap.String("to", receiver.StringLE()), zap.Stringer("tx_hash", txHash.Reverse()), zap.Uint32("vub", vub)) @@ -362,7 +362,7 @@ func (c *Client) BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8 return err } - c.logger.Debug(logs.ClientBatchGasTransferInvoke, + c.logger.Debug(context.Background(), logs.ClientBatchGasTransferInvoke, zap.Strings("to", receiversLog), zap.Stringer("tx_hash", txHash.Reverse()), zap.Uint32("vub", vub)) @@ -389,7 +389,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { height, err = c.rpcActor.GetBlockCount() if err != nil { - c.logger.Error(logs.ClientCantGetBlockchainHeight, + c.logger.Error(context.Background(), logs.ClientCantGetBlockchainHeight, zap.String("error", err.Error())) return nil } @@ -403,7 +403,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { newHeight, err = c.rpcActor.GetBlockCount() if err != nil { - c.logger.Error(logs.ClientCantGetBlockchainHeight243, + c.logger.Error(context.Background(), logs.ClientCantGetBlockchainHeight243, zap.String("error", err.Error())) return nil } diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go index 08d16deb4..d061747bb 100644 --- a/pkg/morph/client/constructor.go +++ b/pkg/morph/client/constructor.go @@ -61,7 +61,7 @@ var ErrNoHealthyEndpoint = errors.New("no healthy endpoint") func defaultConfig() *cfg { return &cfg{ dialTimeout: defaultDialTimeout, - logger: &logger.Logger{Logger: zap.L()}, + logger: logger.NewLoggerWrapper(zap.L()), metrics: morphmetrics.NoopRegister{}, waitInterval: defaultWaitInterval, signer: &transaction.Signer{ @@ -130,10 +130,10 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er for cli.endpoints.curr, endpoint = range cli.endpoints.list { cli.client, act, err = cli.newCli(ctx, endpoint) if err != nil { - cli.logger.Warn(logs.FrostFSIRCouldntCreateRPCClientForEndpoint, + cli.logger.Warn(ctx, logs.FrostFSIRCouldntCreateRPCClientForEndpoint, zap.Error(err), zap.String("endpoint", endpoint.Address)) } else { - cli.logger.Info(logs.FrostFSIRCreatedRPCClientForEndpoint, + cli.logger.Info(ctx, logs.FrostFSIRCreatedRPCClientForEndpoint, zap.String("endpoint", endpoint.Address)) if cli.endpoints.curr > 0 && cli.cfg.switchInterval != 0 { cli.switchIsActive.Store(true) diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go index 10ed21582..708d3b39f 100644 --- a/pkg/morph/client/multi.go +++ b/pkg/morph/client/multi.go @@ -42,7 +42,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool { newEndpoint := c.endpoints.list[c.endpoints.curr] cli, act, err := c.newCli(ctx, newEndpoint) if err != nil { - c.logger.Warn(logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode, + c.logger.Warn(ctx, logs.ClientCouldNotEstablishConnectionToTheSwitchedRPCNode, zap.String("endpoint", newEndpoint.Address), zap.Error(err), ) @@ -52,7 +52,7 @@ func (c *Client) SwitchRPC(ctx context.Context) bool { c.cache.invalidate() - c.logger.Info(logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished, + c.logger.Info(ctx, logs.ClientConnectionToTheNewRPCNodeHasBeenEstablished, zap.String("endpoint", newEndpoint.Address)) c.client = cli @@ -122,7 +122,7 @@ mainLoop: cli, act, err := c.newCli(ctx, e) if err != nil { - c.logger.Warn(logs.ClientCouldNotCreateClientToTheHigherPriorityNode, + c.logger.Warn(ctx, logs.ClientCouldNotCreateClientToTheHigherPriorityNode, zap.String("endpoint", tryE), zap.Error(err), ) @@ -147,7 +147,7 @@ mainLoop: c.switchLock.Unlock() - c.logger.Info(logs.ClientSwitchedToTheHigherPriorityRPC, + c.logger.Info(ctx, logs.ClientSwitchedToTheHigherPriorityRPC, zap.String("endpoint", tryE)) return diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 2a500b31b..58c417fb1 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -1,6 +1,7 @@ package client import ( + "context" "crypto/elliptic" "encoding/binary" "errors" @@ -201,7 +202,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256, // Transaction is already in mempool waiting to be processed. // This is an expected situation if we restart the service. - c.logger.Info(logs.ClientNotaryDepositHasAlreadyBeenMade, + c.logger.Info(context.Background(), logs.ClientNotaryDepositHasAlreadyBeenMade, zap.Int64("amount", int64(amount)), zap.Int64("expire_at", till), zap.Uint32("vub", vub), @@ -209,7 +210,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256, return util.Uint256{}, 0, nil } - c.logger.Info(logs.ClientNotaryDepositInvoke, + c.logger.Info(context.Background(), logs.ClientNotaryDepositInvoke, zap.Int64("amount", int64(amount)), zap.Int64("expire_at", till), zap.Uint32("vub", vub), @@ -429,7 +430,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error { return err } - c.logger.Debug(logs.ClientNotaryRequestWithPreparedMainTXInvoked, + c.logger.Debug(context.Background(), logs.ClientNotaryRequestWithPreparedMainTXInvoked, zap.String("tx_hash", mainH.StringLE()), zap.Uint32("valid_until_block", untilActual), zap.String("fallback_hash", fbH.StringLE())) @@ -485,7 +486,7 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint return 0, err } - c.logger.Debug(logs.ClientNotaryRequestInvoked, + c.logger.Debug(context.Background(), logs.ClientNotaryRequestInvoked, zap.String("method", method), zap.Uint32("valid_until_block", untilActual), zap.String("tx_hash", mainH.StringLE()), diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index dd3c7d216..03bba8ab9 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -144,7 +144,7 @@ func (l *listener) Listen(ctx context.Context) { l.wg.Add(1) defer l.wg.Done() if err := l.listen(ctx, nil); err != nil { - l.log.Error(logs.EventCouldNotStartListenToEvents, + l.log.Error(ctx, logs.EventCouldNotStartListenToEvents, zap.String("error", err.Error()), ) } @@ -162,7 +162,7 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) { l.wg.Add(1) defer l.wg.Done() if err := l.listen(ctx, intError); err != nil { - l.log.Error(logs.EventCouldNotStartListenToEvents, + l.log.Error(ctx, logs.EventCouldNotStartListenToEvents, zap.String("error", err.Error()), ) l.sendError(ctx, intError, err) @@ -234,7 +234,7 @@ func (l *listener) sendError(ctx context.Context, intErr chan<- error, err error // in the same routine when shutting down node. select { case <-ctx.Done(): - l.log.Info(logs.EventStopEventListenerByContext, + l.log.Info(ctx, logs.EventStopEventListenerByContext, zap.String("reason", ctx.Err().Error()), ) return false @@ -251,43 +251,43 @@ loop: select { case err := <-subErrCh: if !l.sendError(ctx, intErr, err) { - l.log.Error(logs.EventStopEventListenerByError, zap.Error(err)) + l.log.Error(ctx, logs.EventStopEventListenerByError, zap.Error(err)) } break loop case <-ctx.Done(): - l.log.Info(logs.EventStopEventListenerByContext, + l.log.Info(ctx, logs.EventStopEventListenerByContext, zap.String("reason", ctx.Err().Error()), ) break loop case notifyEvent, ok := <-chs.NotificationsCh: if !ok { - l.log.Warn(logs.EventStopEventListenerByNotificationChannel) + l.log.Warn(ctx, logs.EventStopEventListenerByNotificationChannel) l.sendError(ctx, intErr, errNotificationSubscrConnectionTerminated) break loop } else if notifyEvent == nil { - l.log.Warn(logs.EventNilNotificationEventWasCaught) + l.log.Warn(ctx, logs.EventNilNotificationEventWasCaught) continue loop } l.handleNotifyEvent(notifyEvent) case notaryEvent, ok := <-chs.NotaryRequestsCh: if !ok { - l.log.Warn(logs.EventStopEventListenerByNotaryChannel) + l.log.Warn(ctx, logs.EventStopEventListenerByNotaryChannel) l.sendError(ctx, intErr, errNotarySubscrConnectionTerminated) break loop } else if notaryEvent == nil { - l.log.Warn(logs.EventNilNotaryEventWasCaught) + l.log.Warn(ctx, logs.EventNilNotaryEventWasCaught) continue loop } l.handleNotaryEvent(notaryEvent) case b, ok := <-chs.BlockCh: if !ok { - l.log.Warn(logs.EventStopEventListenerByBlockChannel) + l.log.Warn(ctx, logs.EventStopEventListenerByBlockChannel) l.sendError(ctx, intErr, errBlockNotificationChannelClosed) break loop } else if b == nil { - l.log.Warn(logs.EventNilBlockWasCaught) + l.log.Warn(ctx, logs.EventNilBlockWasCaught) continue loop } @@ -302,7 +302,7 @@ func (l *listener) handleBlockEvent(b *block.Block) { l.blockHandlers[i](b) } }); err != nil { - l.log.Warn(logs.EventListenerWorkerPoolDrained, + l.log.Warn(context.Background(), logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } @@ -311,7 +311,7 @@ func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) { if err := l.pool.Submit(func() { l.parseAndHandleNotary(notaryEvent) }); err != nil { - l.log.Warn(logs.EventListenerWorkerPoolDrained, + l.log.Warn(context.Background(), logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } @@ -320,7 +320,7 @@ func (l *listener) handleNotifyEvent(notifyEvent *state.ContainedNotificationEve if err := l.pool.Submit(func() { l.parseAndHandleNotification(notifyEvent) }); err != nil { - l.log.Warn(logs.EventListenerWorkerPoolDrained, + l.log.Warn(context.Background(), logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } @@ -347,7 +347,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi l.mtx.RUnlock() if !ok { - log.Debug(logs.EventEventParserNotSet) + log.Debug(context.Background(), logs.EventEventParserNotSet) return } @@ -355,7 +355,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi // parse the notification event event, err := parser(notifyEvent) if err != nil { - log.Warn(logs.EventCouldNotParseNotificationEvent, + log.Warn(context.Background(), logs.EventCouldNotParseNotificationEvent, zap.String("error", err.Error()), ) @@ -368,7 +368,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi l.mtx.RUnlock() if len(handlers) == 0 { - log.Info(logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered, + log.Info(context.Background(), logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered, zap.Any("event", event), ) @@ -388,13 +388,13 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { switch { case errors.Is(err, ErrTXAlreadyHandled): case errors.As(err, &expErr): - l.log.Warn(logs.EventSkipExpiredMainTXNotaryEvent, + l.log.Warn(context.Background(), logs.EventSkipExpiredMainTXNotaryEvent, zap.String("error", err.Error()), zap.Uint32("current_block_height", expErr.CurrentBlockHeight), zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight), ) default: - l.log.Warn(logs.EventCouldNotPrepareAndValidateNotaryEvent, + l.log.Warn(context.Background(), logs.EventCouldNotPrepareAndValidateNotaryEvent, zap.String("error", err.Error()), ) } @@ -418,7 +418,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { l.mtx.RUnlock() if !ok { - log.Debug(logs.EventNotaryParserNotSet) + log.Debug(context.Background(), logs.EventNotaryParserNotSet) return } @@ -426,7 +426,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { // parse the notary event event, err := parser(notaryEvent) if err != nil { - log.Warn(logs.EventCouldNotParseNotaryEvent, + log.Warn(context.Background(), logs.EventCouldNotParseNotaryEvent, zap.String("error", err.Error()), ) @@ -439,7 +439,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { l.mtx.RUnlock() if !ok { - log.Info(logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered, + log.Info(context.Background(), logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered, zap.Any("event", event), ) @@ -461,7 +461,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) { parser := pi.parser() if parser == nil { - log.Info(logs.EventIgnoreNilEventParser) + log.Info(context.Background(), logs.EventIgnoreNilEventParser) return } @@ -470,7 +470,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) { // check if the listener was started if l.started { - log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreParser) + log.Warn(context.Background(), logs.EventListenerHasBeenAlreadyStartedIgnoreParser) return } @@ -479,7 +479,7 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) { l.notificationParsers[pi.scriptHashWithType] = pi.parser() } - log.Debug(logs.EventRegisteredNewEventParser) + log.Debug(context.Background(), logs.EventRegisteredNewEventParser) } // RegisterNotificationHandler registers the handler for particular notification event of contract. @@ -494,7 +494,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { handler := hi.Handler() if handler == nil { - log.Warn(logs.EventIgnoreNilEventHandler) + log.Warn(context.Background(), logs.EventIgnoreNilEventHandler) return } @@ -504,7 +504,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { l.mtx.RUnlock() if !ok { - log.Warn(logs.EventIgnoreHandlerOfEventWoParser) + log.Warn(context.Background(), logs.EventIgnoreHandlerOfEventWoParser) return } @@ -516,7 +516,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { ) l.mtx.Unlock() - log.Debug(logs.EventRegisteredNewEventHandler) + log.Debug(context.Background(), logs.EventRegisteredNewEventHandler) } // EnableNotarySupport enables notary request listening. Passed hash is @@ -557,7 +557,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) { parser := pi.parser() if parser == nil { - log.Info(logs.EventIgnoreNilNotaryEventParser) + log.Info(context.Background(), logs.EventIgnoreNilNotaryEventParser) return } @@ -566,7 +566,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) { // check if the listener was started if l.started { - log.Warn(logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser) + log.Warn(context.Background(), logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser) return } @@ -575,7 +575,7 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) { l.notaryParsers[pi.notaryRequestTypes] = pi.parser() } - log.Info(logs.EventRegisteredNewEventParser) + log.Info(context.Background(), logs.EventRegisteredNewEventParser) } // RegisterNotaryHandler registers the handler for particular notification notary request event. @@ -595,7 +595,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { handler := hi.Handler() if handler == nil { - log.Warn(logs.EventIgnoreNilNotaryEventHandler) + log.Warn(context.Background(), logs.EventIgnoreNilNotaryEventHandler) return } @@ -605,7 +605,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { l.mtx.RUnlock() if !ok { - log.Warn(logs.EventIgnoreHandlerOfNotaryEventWoParser) + log.Warn(context.Background(), logs.EventIgnoreHandlerOfNotaryEventWoParser) return } @@ -614,7 +614,7 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { l.notaryHandlers[hi.notaryRequestTypes] = hi.Handler() l.mtx.Unlock() - log.Info(logs.EventRegisteredNewEventHandler) + log.Info(context.Background(), logs.EventRegisteredNewEventHandler) } // Stop closes subscription channel with remote neo node. @@ -628,7 +628,7 @@ func (l *listener) Stop() { func (l *listener) RegisterBlockHandler(handler BlockHandler) { if handler == nil { - l.log.Warn(logs.EventIgnoreNilBlockHandler) + l.log.Warn(context.Background(), logs.EventIgnoreNilBlockHandler) return } diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go index f3b6443fb..31bbf4432 100644 --- a/pkg/morph/event/utils.go +++ b/pkg/morph/event/utils.go @@ -1,6 +1,7 @@ package event import ( + "context" "errors" "fmt" @@ -89,7 +90,7 @@ func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handle h(e) }) if err != nil { - log.Warn(logs.EventCouldNotSubmitHandlerToWorkerPool, + log.Warn(context.Background(), logs.EventCouldNotSubmitHandlerToWorkerPool, zap.String("error", err.Error()), ) } diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go index ee5466a7d..3a2da6757 100644 --- a/pkg/morph/subscriber/subscriber.go +++ b/pkg/morph/subscriber/subscriber.go @@ -245,9 +245,9 @@ routeloop: } func (s *subscriber) switchEndpoint(ctx context.Context, finishCh chan<- bool) bool { - s.log.Info(logs.RPConnectionLost) + s.log.Info(ctx, logs.RPConnectionLost) if !s.client.SwitchRPC(ctx) { - s.log.Error(logs.RPCNodeSwitchFailure) + s.log.Error(ctx, logs.RPCNodeSwitchFailure) return false } @@ -304,7 +304,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific if s.subscribedToNewBlocks { _, err = s.client.ReceiveBlocks(blCh) if err != nil { - s.log.Error(logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err)) + s.log.Error(context.Background(), logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err)) return false } } @@ -313,7 +313,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific for contract := range s.subscribedEvents { _, err = s.client.ReceiveExecutionNotifications(contract, notifCh) if err != nil { - s.log.Error(logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) + s.log.Error(context.Background(), logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) return false } } @@ -322,7 +322,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific for signer := range s.subscribedNotaryEvents { _, err = s.client.ReceiveNotaryRequests(signer, notaryCh) if err != nil { - s.log.Error(logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) + s.log.Error(context.Background(), logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) return false } } diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go index 86f9cb893..cc792e23d 100644 --- a/pkg/services/apemanager/executor.go +++ b/pkg/services/apemanager/executor.go @@ -53,7 +53,7 @@ func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedC } if s.log == nil { - s.log = &logger.Logger{Logger: zap.NewNop()} + s.log = logger.NewLoggerWrapper(zap.NewNop()) } return s diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go index 56748b08c..93ad3dc46 100644 --- a/pkg/services/object/acl/v2/service.go +++ b/pkg/services/object/acl/v2/service.go @@ -64,7 +64,7 @@ func New(next object.ServiceServer, opts ...Option, ) Service { cfg := &cfg{ - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), next: next, nm: nm, irFetcher: irf, diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go index 6689557ee..8b92d34ed 100644 --- a/pkg/services/object/common/writer/common.go +++ b/pkg/services/object/common/writer/common.go @@ -59,7 +59,7 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, if n.Traversal.submitPrimaryPlacementFinish() { err := n.ForEachNode(ctx, f) if err != nil { - n.cfg.Logger.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err)) + n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err)) // we don't fail primary operation because of broadcast failure } } diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index 571bae7bb..64115b86b 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -162,7 +162,7 @@ func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index if err == nil { return nil } - e.Config.Logger.Logger.Warn(logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup())) + e.Config.Logger.Warn(ctx, logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup())) lastErr = err } } @@ -275,7 +275,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx if err == nil { return nil } - e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), + e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx), zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err)) @@ -299,7 +299,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx if err == nil { return nil } - e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), + e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx), zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err)) @@ -323,7 +323,7 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx if err == nil { return nil } - e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), + e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx), zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err)) diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go index 88454625d..8aaff670c 100644 --- a/pkg/services/object/delete/delete.go +++ b/pkg/services/object/delete/delete.go @@ -33,13 +33,13 @@ func (s *Service) Delete(ctx context.Context, prm Prm) error { } func (exec *execCtx) execute(ctx context.Context) error { - exec.log.Debug(logs.ServingRequest) + exec.log.Debug(ctx, logs.ServingRequest) if err := exec.executeLocal(ctx); err != nil { - exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.String("error", err.Error())) return err } - exec.log.Debug(logs.OperationFinishedSuccessfully) + exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) return nil } diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go index ec771320e..36a17bde2 100644 --- a/pkg/services/object/delete/exec.go +++ b/pkg/services/object/delete/exec.go @@ -34,13 +34,13 @@ type execCtx struct { } func (exec *execCtx) setLogger(l *logger.Logger) { - exec.log = &logger.Logger{Logger: l.With( + exec.log = l.With( zap.String("request", "DELETE"), zap.Stringer("address", exec.address()), zap.Bool("local", exec.isLocal()), zap.Bool("with session", exec.prm.common.SessionToken() != nil), zap.Bool("with bearer", exec.prm.common.BearerToken() != nil), - )} + ) } func (exec *execCtx) isLocal() bool { @@ -83,16 +83,16 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error { exec.splitInfo = errSplitInfo.SplitInfo() exec.tombstone.SetSplitID(exec.splitInfo.SplitID()) - exec.log.Debug(logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers) + exec.log.Debug(ctx, logs.DeleteSplitInfoSuccessfullyFormedCollectingMembers) if err := exec.collectMembers(ctx); err != nil { return err } - exec.log.Debug(logs.DeleteMembersSuccessfullyCollected) + exec.log.Debug(ctx, logs.DeleteMembersSuccessfullyCollected) return nil case errors.As(err, &errECInfo): - exec.log.Debug(logs.DeleteECObjectReceived) + exec.log.Debug(ctx, logs.DeleteECObjectReceived) return nil } @@ -108,7 +108,7 @@ func (exec *execCtx) formExtendedInfo(ctx context.Context) error { func (exec *execCtx) collectMembers(ctx context.Context) error { if exec.splitInfo == nil { - exec.log.Debug(logs.DeleteNoSplitInfoObjectIsPHY) + exec.log.Debug(ctx, logs.DeleteNoSplitInfoObjectIsPHY) return nil } @@ -131,7 +131,7 @@ func (exec *execCtx) collectMembers(ctx context.Context) error { func (exec *execCtx) collectChain(ctx context.Context) error { var chain []oid.ID - exec.log.Debug(logs.DeleteAssemblingChain) + exec.log.Debug(ctx, logs.DeleteAssemblingChain) for prev, withPrev := exec.splitInfo.LastPart(); withPrev; { chain = append(chain, prev) @@ -152,7 +152,7 @@ func (exec *execCtx) collectChain(ctx context.Context) error { } func (exec *execCtx) collectChildren(ctx context.Context) error { - exec.log.Debug(logs.DeleteCollectingChildren) + exec.log.Debug(ctx, logs.DeleteCollectingChildren) children, err := exec.svc.header.children(ctx, exec) if err != nil { @@ -165,7 +165,7 @@ func (exec *execCtx) collectChildren(ctx context.Context) error { } func (exec *execCtx) supplementBySplitID(ctx context.Context) error { - exec.log.Debug(logs.DeleteSupplementBySplitID) + exec.log.Debug(ctx, logs.DeleteSupplementBySplitID) chain, err := exec.svc.searcher.splitMembers(ctx, exec) if err != nil { diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go index 2c3c47f49..01b2d9b3f 100644 --- a/pkg/services/object/delete/local.go +++ b/pkg/services/object/delete/local.go @@ -10,13 +10,13 @@ import ( ) func (exec *execCtx) executeLocal(ctx context.Context) error { - exec.log.Debug(logs.DeleteFormingTombstoneStructure) + exec.log.Debug(ctx, logs.DeleteFormingTombstoneStructure) if err := exec.formTombstone(ctx); err != nil { return err } - exec.log.Debug(logs.DeleteTombstoneStructureSuccessfullyFormedSaving) + exec.log.Debug(ctx, logs.DeleteTombstoneStructureSuccessfullyFormedSaving) return exec.saveTombstone(ctx) } @@ -33,7 +33,7 @@ func (exec *execCtx) formTombstone(ctx context.Context) error { ) exec.addMembers([]oid.ID{exec.address().Object()}) - exec.log.Debug(logs.DeleteFormingSplitInfo) + exec.log.Debug(ctx, logs.DeleteFormingSplitInfo) if err := exec.formExtendedInfo(ctx); err != nil { return fmt.Errorf("form extended info: %w", err) diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go index e4f7a8c50..867d3f4ef 100644 --- a/pkg/services/object/delete/service.go +++ b/pkg/services/object/delete/service.go @@ -72,7 +72,7 @@ func New(gs *getsvc.Service, opts ...Option, ) *Service { c := &cfg{ - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), header: &headSvcWrapper{s: gs}, searcher: &searchSvcWrapper{s: ss}, placer: &putSvcWrapper{s: ps}, @@ -92,6 +92,6 @@ func New(gs *getsvc.Service, // WithLogger returns option to specify Delete service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = &logger.Logger{Logger: l.With(zap.String("component", "objectSDK.Delete service"))} + c.log = l.With(zap.String("component", "objectSDK.Delete service")) } } diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go index 9f17f1e4c..e164627d2 100644 --- a/pkg/services/object/get/assemble.go +++ b/pkg/services/object/get/assemble.go @@ -13,7 +13,7 @@ import ( func (r *request) assemble(ctx context.Context) { if !r.canAssembleComplexObject() { - r.log.Debug(logs.GetCanNotAssembleTheObject) + r.log.Debug(ctx, logs.GetCanNotAssembleTheObject) return } @@ -35,23 +35,23 @@ func (r *request) assemble(ctx context.Context) { // `execCtx` so it should be disabled there. r.disableForwarding() - r.log.Debug(logs.GetTryingToAssembleTheObject) + r.log.Debug(ctx, logs.GetTryingToAssembleTheObject) r.prm.common = r.prm.common.WithLocalOnly(false) assembler := newAssembler(r.address(), r.splitInfo(), r.ctxRange(), r, r.headOnly()) - r.log.Debug(logs.GetAssemblingSplittedObject, + r.log.Debug(ctx, logs.GetAssemblingSplittedObject, zap.Uint64("range_offset", r.ctxRange().GetOffset()), zap.Uint64("range_length", r.ctxRange().GetLength()), ) - defer r.log.Debug(logs.GetAssemblingSplittedObjectCompleted, + defer r.log.Debug(ctx, logs.GetAssemblingSplittedObjectCompleted, zap.Uint64("range_offset", r.ctxRange().GetOffset()), zap.Uint64("range_length", r.ctxRange().GetLength()), ) obj, err := assembler.Assemble(ctx, r.prm.objWriter) if err != nil { - r.log.Warn(logs.GetFailedToAssembleSplittedObject, + r.log.Warn(ctx, logs.GetFailedToAssembleSplittedObject, zap.Error(err), zap.Uint64("range_offset", r.ctxRange().GetOffset()), zap.Uint64("range_length", r.ctxRange().GetLength()), diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go index 03f913bbf..8ab423c87 100644 --- a/pkg/services/object/get/assembleec.go +++ b/pkg/services/object/get/assembleec.go @@ -12,7 +12,7 @@ import ( func (r *request) assembleEC(ctx context.Context) { if r.isRaw() { - r.log.Debug(logs.GetCanNotAssembleTheObject) + r.log.Debug(ctx, logs.GetCanNotAssembleTheObject) return } @@ -34,7 +34,7 @@ func (r *request) assembleEC(ctx context.Context) { // `execCtx` so it should be disabled there. r.disableForwarding() - r.log.Debug(logs.GetTryingToAssembleTheECObject) + r.log.Debug(ctx, logs.GetTryingToAssembleTheECObject) // initialize epoch number ok := r.initEpoch() @@ -45,18 +45,18 @@ func (r *request) assembleEC(ctx context.Context) { r.prm.common = r.prm.common.WithLocalOnly(false) assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch) - r.log.Debug(logs.GetAssemblingECObject, + r.log.Debug(ctx, logs.GetAssemblingECObject, zap.Uint64("range_offset", r.ctxRange().GetOffset()), zap.Uint64("range_length", r.ctxRange().GetLength()), ) - defer r.log.Debug(logs.GetAssemblingECObjectCompleted, + defer r.log.Debug(ctx, logs.GetAssemblingECObjectCompleted, zap.Uint64("range_offset", r.ctxRange().GetOffset()), zap.Uint64("range_length", r.ctxRange().GetLength()), ) obj, err := assembler.Assemble(ctx, r.prm.objWriter) if err != nil && !errors.As(err, new(*objectSDK.ECInfoError)) { - r.log.Warn(logs.GetFailedToAssembleECObject, + r.log.Warn(ctx, logs.GetFailedToAssembleECObject, zap.Error(err), zap.Uint64("range_offset", r.ctxRange().GetOffset()), zap.Uint64("range_length", r.ctxRange().GetLength()), diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go index 44d9af3a2..b0895e13e 100644 --- a/pkg/services/object/get/assemblerec.go +++ b/pkg/services/object/get/assemblerec.go @@ -155,7 +155,7 @@ func (a *assemblerec) retrieveParts(ctx context.Context, trav *placement.Travers parts, err := a.processECNodesRequests(ctx, remoteNodes, dataCount, parityCount) if err != nil { - a.log.Debug(logs.GetUnableToGetAllPartsECObject, zap.Error(err)) + a.log.Debug(ctx, logs.GetUnableToGetAllPartsECObject, zap.Error(err)) } return parts } @@ -229,7 +229,7 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object var objID oid.ID err := objID.ReadFromV2(ch.ID) if err != nil { - a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err))) + a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err))) return nil } var addr oid.Address @@ -239,13 +239,13 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object if a.head { object, err = a.localStorage.Head(ctx, addr, false) if err != nil { - a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) + a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) return nil } } else { object, err = a.localStorage.Get(ctx, addr) if err != nil { - a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) + a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) return nil } } @@ -259,11 +259,11 @@ func (a *assemblerec) tryGetChunkListFromNode(ctx context.Context, node client.N var errECInfo *objectSDK.ECInfoError _, err := a.remoteStorage.headObjectFromNode(ctx, a.addr, node, true) if err == nil { - a.log.Error(logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey()))) + a.log.Error(ctx, logs.GetUnexpectedECObject, zap.String("node", hex.EncodeToString(node.PublicKey()))) return nil } if !errors.As(err, &errECInfo) { - a.log.Warn(logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err)) + a.log.Warn(ctx, logs.GetUnableToHeadPartsECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err)) return nil } result := make([]objectSDK.ECChunk, 0, len(errECInfo.ECInfo().Chunks)) @@ -277,7 +277,7 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli var objID oid.ID err := objID.ReadFromV2(ch.ID) if err != nil { - a.log.Error(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err))) + a.log.Error(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Uint32("part_index", ch.Index), zap.Error(fmt.Errorf("invalid object ID: %w", err))) return nil } var addr oid.Address @@ -287,13 +287,13 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli if a.head { object, err = a.remoteStorage.headObjectFromNode(ctx, addr, node, false) if err != nil { - a.log.Warn(logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) + a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) return nil } } else { object, err = a.remoteStorage.getObjectFromNode(ctx, addr, node) if err != nil { - a.log.Warn(logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) + a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) return nil } } diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go index 034768c81..2b84c5b32 100644 --- a/pkg/services/object/get/container.go +++ b/pkg/services/object/get/container.go @@ -10,13 +10,13 @@ import ( func (r *request) executeOnContainer(ctx context.Context) { if r.isLocal() { - r.log.Debug(logs.GetReturnResultDirectly) + r.log.Debug(ctx, logs.GetReturnResultDirectly) return } lookupDepth := r.netmapLookupDepth() - r.log.Debug(logs.TryingToExecuteInContainer, + r.log.Debug(ctx, logs.TryingToExecuteInContainer, zap.Uint64("netmap lookup depth", lookupDepth), ) @@ -46,7 +46,7 @@ func (r *request) executeOnContainer(ctx context.Context) { } func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool { - r.log.Debug(logs.ProcessEpoch, + r.log.Debug(ctx, logs.ProcessEpoch, zap.Uint64("number", r.curProcEpoch), ) @@ -67,7 +67,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool for { addrs := traverser.Next() if len(addrs) == 0 { - r.log.Debug(logs.NoMoreNodesAbortPlacementIteration) + r.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration) return false } @@ -75,7 +75,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool for i := range addrs { select { case <-ctx.Done(): - r.log.Debug(logs.InterruptPlacementIterationByContext, + r.log.Debug(ctx, logs.InterruptPlacementIterationByContext, zap.Error(ctx.Err()), ) @@ -91,7 +91,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool client.NodeInfoFromNetmapElement(&info, addrs[i]) if r.processNode(ctx, info) { - r.log.Debug(logs.GetCompletingTheOperation) + r.log.Debug(ctx, logs.GetCompletingTheOperation) return true } } diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go index 03b7f8bf2..557e9a028 100644 --- a/pkg/services/object/get/get.go +++ b/pkg/services/object/get/get.go @@ -91,7 +91,7 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error { } func (exec *request) execute(ctx context.Context) { - exec.log.Debug(logs.ServingRequest) + exec.log.Debug(ctx, logs.ServingRequest) // perform local operation exec.executeLocal(ctx) @@ -103,23 +103,23 @@ func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) { // analyze local result switch exec.status { case statusOK: - exec.log.Debug(logs.OperationFinishedSuccessfully) + exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) case statusINHUMED: - exec.log.Debug(logs.GetRequestedObjectWasMarkedAsRemoved) + exec.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved) case statusVIRTUAL: - exec.log.Debug(logs.GetRequestedObjectIsVirtual) + exec.log.Debug(ctx, logs.GetRequestedObjectIsVirtual) exec.assemble(ctx) case statusOutOfRange: - exec.log.Debug(logs.GetRequestedRangeIsOutOfObjectBounds) + exec.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds) case statusEC: - exec.log.Debug(logs.GetRequestedObjectIsEC) + exec.log.Debug(ctx, logs.GetRequestedObjectIsEC) if exec.isRaw() && execCnr { exec.executeOnContainer(ctx) exec.analyzeStatus(ctx, false) } exec.assembleEC(ctx) default: - exec.log.Debug(logs.OperationFinishedWithError, + exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(exec.err), ) var errAccessDenied *apistatus.ObjectAccessDenied diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go index 1cd5e549c..cfabb082f 100644 --- a/pkg/services/object/get/local.go +++ b/pkg/services/object/get/local.go @@ -31,7 +31,7 @@ func (r *request) executeLocal(ctx context.Context) { r.status = statusUndefined r.err = err - r.log.Debug(logs.GetLocalGetFailed, zap.Error(err)) + r.log.Debug(ctx, logs.GetLocalGetFailed, zap.Error(err)) case err == nil: r.status = statusOK r.err = nil diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go index f2639f8e6..b6a83fd0c 100644 --- a/pkg/services/object/get/remote.go +++ b/pkg/services/object/get/remote.go @@ -18,7 +18,7 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool { ctx, span := tracing.StartSpanFromContext(ctx, "getService.processNode") defer span.End() - r.log.Debug(logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey()))) + r.log.Debug(ctx, logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey()))) rs, ok := r.getRemoteStorage(info) if !ok { @@ -35,7 +35,7 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool { switch { default: - r.log.Debug(logs.GetRemoteCallFailed, zap.Error(err)) + r.log.Debug(ctx, logs.GetRemoteCallFailed, zap.Error(err)) if r.status != statusEC { // for raw requests, continue to collect other parts r.status = statusUndefined diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go index 1a7a43a35..bba767d2d 100644 --- a/pkg/services/object/get/request.go +++ b/pkg/services/object/get/request.go @@ -47,14 +47,14 @@ func (r *request) setLogger(l *logger.Logger) { req = "GET_RANGE" } - r.log = &logger.Logger{Logger: l.With( + r.log = l.With( zap.String("request", req), zap.Stringer("address", r.address()), zap.Bool("raw", r.isRaw()), zap.Bool("local", r.isLocal()), zap.Bool("with session", r.prm.common.SessionToken() != nil), zap.Bool("with bearer", r.prm.common.BearerToken() != nil), - )} + ) } func (r *request) isLocal() bool { @@ -129,7 +129,7 @@ func (r *request) initEpoch() bool { r.status = statusUndefined r.err = err - r.log.Debug(logs.CouldNotGetCurrentEpochNumber, zap.Error(err)) + r.log.Debug(context.Background(), logs.CouldNotGetCurrentEpochNumber, zap.Error(err)) return false case err == nil: @@ -148,7 +148,7 @@ func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, boo r.status = statusUndefined r.err = err - r.log.Debug(logs.GetCouldNotGenerateContainerTraverser, zap.Error(err)) + r.log.Debug(context.Background(), logs.GetCouldNotGenerateContainerTraverser, zap.Error(err)) return nil, false case err == nil: @@ -162,7 +162,7 @@ func (r *request) getRemoteStorage(info clientcore.NodeInfo) (remoteStorage, boo r.status = statusUndefined r.err = err - r.log.Debug(logs.GetCouldNotConstructRemoteNodeClient) + r.log.Debug(context.Background(), logs.GetCouldNotConstructRemoteNodeClient) return nil, false } @@ -185,7 +185,7 @@ func (r *request) writeCollectedHeader(ctx context.Context) bool { r.status = statusUndefined r.err = err - r.log.Debug(logs.GetCouldNotWriteHeader, zap.Error(err)) + r.log.Debug(ctx, logs.GetCouldNotWriteHeader, zap.Error(err)) case err == nil: r.status = statusOK r.err = nil @@ -206,7 +206,7 @@ func (r *request) writeObjectPayload(ctx context.Context, obj *objectSDK.Object) r.status = statusUndefined r.err = err - r.log.Debug(logs.GetCouldNotWritePayloadChunk, zap.Error(err)) + r.log.Debug(ctx, logs.GetCouldNotWritePayloadChunk, zap.Error(err)) case err == nil: r.status = statusOK r.err = nil diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go index 3413abeb7..9ec10b5f2 100644 --- a/pkg/services/object/get/service.go +++ b/pkg/services/object/get/service.go @@ -34,7 +34,7 @@ func New( result := &Service{ keyStore: ks, epochSource: es, - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), localStorage: &engineLocalStorage{ engine: e, }, @@ -53,6 +53,6 @@ func New( // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(s *Service) { - s.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get service"))} + s.log = l.With(zap.String("component", "Object.Get service")) } } diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go index e8e82ddd9..7d26a38c3 100644 --- a/pkg/services/object/get/v2/get_range_hash.go +++ b/pkg/services/object/get/v2/get_range_hash.go @@ -125,14 +125,14 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2. var addrGr network.AddressGroup if err := addrGr.FromIterator(network.NodeEndpointsIterator(node)); err != nil { - s.log.Warn(logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey()))) + s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeEndpoints, zap.String("node_public_key", hex.EncodeToString(node.PublicKey()))) continue } var extAddr network.AddressGroup if len(node.ExternalAddresses()) > 0 { if err := extAddr.FromStringSlice(node.ExternalAddresses()); err != nil { - s.log.Warn(logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey()))) + s.log.Warn(ctx, logs.GetSvcV2FailedToParseNodeExternalAddresses, zap.String("node_public_key", hex.EncodeToString(node.PublicKey()))) continue } } @@ -150,12 +150,12 @@ func (s *Service) forwardGetRangeHashRequest(ctx context.Context, req *objectV2. if firstErr == nil { firstErr = err } - s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromNode, + s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromNode, zap.String("node_public_key", hex.EncodeToString(node.PublicKey())), zap.Stringer("address", params.address), zap.Error(err)) } - s.log.Debug(logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr)) + s.log.Debug(ctx, logs.GetSvcV2FailedToGetRangeHashFromAllOfContainerNodes, zap.Stringer("address", params.address), zap.Error(firstErr)) if firstErr != nil { return nil, firstErr } diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go index 24b2f0099..fc483b74b 100644 --- a/pkg/services/object/get/v2/service.go +++ b/pkg/services/object/get/v2/service.go @@ -60,7 +60,7 @@ func NewService(svc *getsvc.Service, netmapSource: netmapSource, announcedKeys: announcedKeys, contSource: contSource, - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), } for i := range opts { @@ -145,6 +145,6 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Get V2 service"))} + c.log = l.With(zap.String("component", "Object.Get V2 service")) } } diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go index 8cf4f0d62..5cc0a5722 100644 --- a/pkg/services/object/put/service.go +++ b/pkg/services/object/put/service.go @@ -29,7 +29,7 @@ func NewService(ks *objutil.KeyStorage, c := &objectwriter.Config{ RemotePool: util.NewPseudoWorkerPool(), LocalPool: util.NewPseudoWorkerPool(), - Logger: &logger.Logger{Logger: zap.L()}, + Logger: logger.NewLoggerWrapper(zap.L()), KeyStorage: ks, ClientConstructor: cc, MaxSizeSrc: ms, diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go index 3a0b3901f..36b0bd54c 100644 --- a/pkg/services/object/put/single.go +++ b/pkg/services/object/put/single.go @@ -317,7 +317,7 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context, if err != nil { objID, _ := obj.ID() cnrID, _ := obj.ContainerID() - s.Config.Logger.Warn(logs.PutSingleRedirectFailure, + s.Config.Logger.Warn(ctx, logs.PutSingleRedirectFailure, zap.Error(err), zap.Stringer("address", addr), zap.Stringer("object_id", objID), diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go index 39259b0ca..999a3cc9e 100644 --- a/pkg/services/object/search/container.go +++ b/pkg/services/object/search/container.go @@ -15,7 +15,7 @@ import ( func (exec *execCtx) executeOnContainer(ctx context.Context) error { lookupDepth := exec.netmapLookupDepth() - exec.log.Debug(logs.TryingToExecuteInContainer, + exec.log.Debug(ctx, logs.TryingToExecuteInContainer, zap.Uint64("netmap lookup depth", lookupDepth), ) @@ -44,7 +44,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) error { } func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { - exec.log.Debug(logs.ProcessEpoch, + exec.log.Debug(ctx, logs.ProcessEpoch, zap.Uint64("number", exec.curProcEpoch), ) @@ -59,7 +59,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { for { addrs := traverser.Next() if len(addrs) == 0 { - exec.log.Debug(logs.NoMoreNodesAbortPlacementIteration) + exec.log.Debug(ctx, logs.NoMoreNodesAbortPlacementIteration) break } @@ -72,7 +72,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { defer wg.Done() select { case <-ctx.Done(): - exec.log.Debug(logs.InterruptPlacementIterationByContext, + exec.log.Debug(ctx, logs.InterruptPlacementIterationByContext, zap.String("error", ctx.Err().Error())) return default: @@ -82,17 +82,17 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { client.NodeInfoFromNetmapElement(&info, addrs[i]) - exec.log.Debug(logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey()))) + exec.log.Debug(ctx, logs.ProcessingNode, zap.String("key", hex.EncodeToString(addrs[i].PublicKey()))) c, err := exec.svc.clientConstructor.get(info) if err != nil { - exec.log.Debug(logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error())) return } ids, err := c.searchObjects(ctx, exec, info) if err != nil { - exec.log.Debug(logs.SearchRemoteOperationFailed, + exec.log.Debug(ctx, logs.SearchRemoteOperationFailed, zap.String("error", err.Error())) return @@ -102,7 +102,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { err = exec.writeIDList(ids) mtx.Unlock() if err != nil { - exec.log.Debug(logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error())) return } }(i) diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go index 4a2c04ecd..eb9635f14 100644 --- a/pkg/services/object/search/exec.go +++ b/pkg/services/object/search/exec.go @@ -19,13 +19,13 @@ type execCtx struct { } func (exec *execCtx) setLogger(l *logger.Logger) { - exec.log = &logger.Logger{Logger: l.With( + exec.log = l.With( zap.String("request", "SEARCH"), zap.Stringer("container", exec.containerID()), zap.Bool("local", exec.isLocal()), zap.Bool("with session", exec.prm.common.SessionToken() != nil), zap.Bool("with bearer", exec.prm.common.BearerToken() != nil), - )} + ) } func (exec *execCtx) isLocal() bool { diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go index cfaed13b8..bc59d0394 100644 --- a/pkg/services/object/search/local.go +++ b/pkg/services/object/search/local.go @@ -11,7 +11,7 @@ import ( func (exec *execCtx) executeLocal(ctx context.Context) error { ids, err := exec.svc.localStorage.search(ctx, exec) if err != nil { - exec.log.Debug(logs.SearchLocalOperationFailed, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.String("error", err.Error())) return err } diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go index 4a5c414d5..bb5c720ff 100644 --- a/pkg/services/object/search/search.go +++ b/pkg/services/object/search/search.go @@ -20,13 +20,13 @@ func (s *Service) Search(ctx context.Context, prm Prm) error { } func (exec *execCtx) execute(ctx context.Context) error { - exec.log.Debug(logs.ServingRequest) + exec.log.Debug(ctx, logs.ServingRequest) err := exec.executeLocal(ctx) exec.logResult(err) if exec.isLocal() { - exec.log.Debug(logs.SearchReturnResultDirectly) + exec.log.Debug(ctx, logs.SearchReturnResultDirectly) return err } @@ -38,8 +38,8 @@ func (exec *execCtx) execute(ctx context.Context) error { func (exec *execCtx) logResult(err error) { switch { default: - exec.log.Debug(logs.OperationFinishedWithError, zap.String("error", err.Error())) + exec.log.Debug(context.Background(), logs.OperationFinishedWithError, zap.String("error", err.Error())) case err == nil: - exec.log.Debug(logs.OperationFinishedSuccessfully) + exec.log.Debug(context.Background(), logs.OperationFinishedSuccessfully) } } diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go index 7700f78d8..77d25357a 100644 --- a/pkg/services/object/search/service.go +++ b/pkg/services/object/search/service.go @@ -69,7 +69,7 @@ func New(e *engine.StorageEngine, opts ...Option, ) *Service { c := &cfg{ - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), clientConstructor: &clientConstructorWrapper{ constructor: cc, }, @@ -94,6 +94,6 @@ func New(e *engine.StorageEngine, // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = &logger.Logger{Logger: l.With(zap.String("component", "Object.Search service"))} + c.log = l.With(zap.String("component", "Object.Search service")) } } diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go index 92beedaa7..5075344a4 100644 --- a/pkg/services/object/util/log.go +++ b/pkg/services/object/util/log.go @@ -1,6 +1,8 @@ package util import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -9,7 +11,7 @@ import ( // LogServiceError writes error message of object service to provided logger. func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) { - l.Error(logs.UtilObjectServiceError, + l.Error(context.Background(), logs.UtilObjectServiceError, zap.String("node", network.StringifyGroup(node)), zap.String("request", req), zap.String("error", err.Error()), @@ -18,7 +20,7 @@ func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, er // LogWorkerPoolError writes debug error message of object worker pool to provided logger. func LogWorkerPoolError(l *logger.Logger, req string, err error) { - l.Error(logs.UtilCouldNotPushTaskToWorkerPool, + l.Error(context.Background(), logs.UtilCouldNotPushTaskToWorkerPool, zap.String("request", req), zap.String("error", err.Error()), ) diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go index 7476dbd48..6a9706b9e 100644 --- a/pkg/services/object_manager/tombstone/checker.go +++ b/pkg/services/object_manager/tombstone/checker.go @@ -57,7 +57,7 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr ts, err := g.tsSource.Tombstone(ctx, a, epoch) if err != nil { - log.Warn( + log.Warn(ctx, logs.TombstoneCouldNotGetTheTombstoneTheSource, zap.Error(err), ) @@ -77,7 +77,7 @@ func (g *ExpirationChecker) handleTS(addr string, ts *objectSDK.Object, reqEpoch if atr.Key() == objectV2.SysAttributeExpEpoch { epoch, err := strconv.ParseUint(atr.Value(), 10, 64) if err != nil { - g.log.Warn( + g.log.Warn(context.Background(), logs.TombstoneExpirationParseFailure, zap.Error(err), ) diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go index 9d33e8179..67ddf316f 100644 --- a/pkg/services/object_manager/tombstone/constructor.go +++ b/pkg/services/object_manager/tombstone/constructor.go @@ -23,7 +23,7 @@ type Option func(*cfg) func defaultCfg() *cfg { return &cfg{ - log: &logger.Logger{Logger: zap.NewNop()}, + log: logger.NewLoggerWrapper(zap.NewNop()), cacheSize: defaultLRUCacheSize, } } diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index dbc9ea53c..c82680a1e 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -86,7 +86,7 @@ func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectc } if !c.needLocalCopy && c.removeLocalCopy { - p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, + p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address), ) @@ -151,7 +151,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe } else if client.IsErrNodeUnderMaintenance(err) { shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies) } else { - p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, + p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", addr), zap.String("error", err.Error()), ) @@ -178,7 +178,7 @@ func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes nodeCache shortage-- uncheckedCopies++ - p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, + p.log.Debug(context.Background(), logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(node)), ) return shortage, uncheckedCopies @@ -189,7 +189,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address ) { switch { case shortage > 0: - p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected, + p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", addr), zap.Uint32("shortage", shortage), ) @@ -205,7 +205,7 @@ func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address case uncheckedCopies > 0: // If we have more copies than needed, but some of them are from the maintenance nodes, // save the local copy. - p.log.Debug(logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance, + p.log.Debug(ctx, logs.PolicerSomeOfTheCopiesAreStoredOnNodesUnderMaintenance, zap.Int("count", uncheckedCopies)) case uncheckedCopies == 0: diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go index 6d2c153c9..cb583f1d3 100644 --- a/pkg/services/policer/ec.go +++ b/pkg/services/policer/ec.go @@ -59,7 +59,7 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec p.processRepNodes(ctx, c, objInfo, nn[0], uint32(len(nn[0])), checkedNodes) if !c.needLocalCopy && c.removeLocalCopy { - p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, + p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address), ) @@ -91,7 +91,7 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object p.adjustECPlacement(ctx, objInfo, nn[0], cnr) if res.removeLocal { - p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address)) + p.log.Info(ctx, logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address)) p.cbRedundantCopy(ctx, objInfo.Address) } return nil @@ -109,7 +109,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n } if requiredNode.Status().IsMaintenance() { // consider maintenance mode has object, but do not drop local copy - p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode))) + p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode))) return ecChunkProcessResult{} } @@ -120,7 +120,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n if err == nil { removeLocalChunk = true } else if client.IsErrObjectNotFound(err) { - p.log.Debug(logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1)) + p.log.Debug(ctx, logs.PolicerShortageOfObjectCopiesDetected, zap.Stringer("object", objInfo.Address), zap.Uint32("shortage", 1)) task := replicator.Task{ NumCopies: 1, Addr: objInfo.Address, @@ -129,9 +129,9 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n p.replicator.HandleReplicationTask(ctx, task, newNodeCache()) } else if client.IsErrNodeUnderMaintenance(err) { // consider maintenance mode has object, but do not drop local copy - p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode))) + p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode))) } else { - p.log.Error(logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.String("error", err.Error())) + p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.String("error", err.Error())) } return ecChunkProcessResult{ @@ -146,13 +146,13 @@ func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.I requiredChunkIndexes := p.collectRequiredECChunks(nodes, objInfo) if len(requiredChunkIndexes) == 0 { - p.log.Info(logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID)) + p.log.Info(ctx, logs.PolicerNodeIsNotECObjectNode, zap.Stringer("object", objInfo.ECInfo.ParentID)) return true } err := p.resolveLocalECChunks(ctx, parentAddress, requiredChunkIndexes) if err != nil { - p.log.Error(logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress)) + p.log.Error(ctx, logs.PolicerFailedToGetLocalECChunks, zap.Error(err), zap.Stringer("object", parentAddress)) return false } if len(requiredChunkIndexes) == 0 { @@ -224,11 +224,11 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A var chunkID oid.ID if err := chunkID.ReadFromV2(ch.ID); err != nil { - p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress)) + p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress)) return false } if existed, ok := indexToObjectID[ch.Index]; ok && existed != chunkID { - p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed), + p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", existed), zap.Stringer("second", chunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index)) return false } @@ -239,7 +239,7 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A for index, candidates := range required { if len(candidates) == 0 { - p.log.Error(logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index)) + p.log.Error(ctx, logs.PolicerMissingECChunk, zap.Stringer("object", parentAddress), zap.Uint32("index", index)) return false } } @@ -271,18 +271,18 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info resolved[ch.Index] = append(resolved[ch.Index], n) var ecInfoChunkID oid.ID if err := ecInfoChunkID.ReadFromV2(ch.ID); err != nil { - p.log.Error(logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress)) + p.log.Error(ctx, logs.PolicerFailedToDecodeECChunkID, zap.Error(err), zap.Stringer("object", parentAddress)) return } if chunkID, exist := chunkIDs[ch.Index]; exist && chunkID != ecInfoChunkID { - p.log.Error(logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID), + p.log.Error(ctx, logs.PolicerDifferentObjectIDForTheSameECChunk, zap.Stringer("first", chunkID), zap.Stringer("second", ecInfoChunkID), zap.Stringer("object", parentAddress), zap.Uint32("index", ch.Index)) return } chunkIDs[ch.Index] = ecInfoChunkID } } else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { - p.log.Warn(logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err)) + p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err)) p.replicator.HandleReplicationTask(ctx, replicator.Task{ NumCopies: 1, Addr: objInfo.Address, @@ -299,7 +299,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info for i := range resolved { found = append(found, i) } - p.log.Error(logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found)) + p.log.Error(ctx, logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found)) return } p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr) @@ -310,7 +310,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, ) { c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount())) if err != nil { - p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) + p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) return } parts := p.collectExistedChunks(ctx, objInfo, existedChunks, parentAddress, chunkIDs) @@ -319,7 +319,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, } key, err := p.keyStorage.GetKey(nil) if err != nil { - p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) + p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) return } required := make([]bool, len(parts)) @@ -329,7 +329,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, } } if err := c.ReconstructParts(parts, required, key); err != nil { - p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) + p.log.Error(ctx, logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err)) return } for idx, part := range parts { @@ -377,7 +377,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I if err == nil { break } - p.log.Warn(logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey()))) + p.log.Warn(ctx, logs.PolicerCouldNotGetChunk, zap.Stringer("object", parentAddress), zap.Stringer("chunkID", objID), zap.Error(err), zap.String("node", hex.EncodeToString(node.PublicKey()))) } if obj != nil { parts[idx] = obj @@ -386,7 +386,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I }) } if err := errGroup.Wait(); err != nil { - p.log.Error(logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err)) + p.log.Error(ctx, logs.PolicerCouldNotGetChunks, zap.Stringer("object", parentAddress), zap.Error(err)) return nil } return parts diff --git a/pkg/services/policer/option.go b/pkg/services/policer/option.go index 336f7a0ab..5d59604c2 100644 --- a/pkg/services/policer/option.go +++ b/pkg/services/policer/option.go @@ -91,7 +91,7 @@ type cfg struct { func defaultCfg() *cfg { return &cfg{ - log: &logger.Logger{Logger: zap.L()}, + log: logger.NewLoggerWrapper(zap.L()), batchSize: 10, cacheSize: 1024, // 1024 * address size = 1024 * 64 = 64 MiB sleepDuration: 1 * time.Second, diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go index 363c0b922..4e8bacfec 100644 --- a/pkg/services/policer/policer.go +++ b/pkg/services/policer/policer.go @@ -4,7 +4,6 @@ import ( "sync" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" lru "github.com/hashicorp/golang-lru/v2" "go.uber.org/zap" @@ -55,7 +54,7 @@ func New(opts ...Option) *Policer { opts[i](c) } - c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Policer"))} + c.log = c.log.With(zap.String("component", "Object Policer")) cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize)) if err != nil { diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go index a5ebb0010..80a87ade9 100644 --- a/pkg/services/policer/process.go +++ b/pkg/services/policer/process.go @@ -14,7 +14,7 @@ import ( func (p *Policer) Run(ctx context.Context) { p.shardPolicyWorker(ctx) - p.log.Info(logs.PolicerRoutineStopped) + p.log.Info(ctx, logs.PolicerRoutineStopped) } func (p *Policer) shardPolicyWorker(ctx context.Context) { @@ -33,7 +33,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) { time.Sleep(p.sleepDuration) // finished whole cycle, sleep a bit continue } - p.log.Warn(logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err)) + p.log.Warn(ctx, logs.PolicerFailureAtObjectSelectForReplication, zap.Error(err)) } skipMap := newSkipMap() @@ -59,7 +59,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) { if p.objsInWork.add(addr.Address) { err := p.processObject(ctx, addr) if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) { - p.log.Error(logs.PolicerUnableToProcessObj, + p.log.Error(ctx, logs.PolicerUnableToProcessObj, zap.Stringer("object", addr.Address), zap.String("error", err.Error())) } @@ -69,7 +69,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) { } }) if err != nil { - p.log.Warn(logs.PolicerPoolSubmission, zap.Error(err)) + p.log.Warn(ctx, logs.PolicerPoolSubmission, zap.Error(err)) } } } diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go index 7e5c6e093..2120312f6 100644 --- a/pkg/services/replicator/process.go +++ b/pkg/services/replicator/process.go @@ -27,7 +27,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T p.metrics.IncInFlightRequest() defer p.metrics.DecInFlightRequest() defer func() { - p.log.Debug(logs.ReplicatorFinishWork, + p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.Uint32("amount of unfinished replicas", task.NumCopies), ) }() @@ -43,7 +43,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T var err error task.Obj, err = engine.Get(ctx, p.localStorage, task.Addr) if err != nil { - p.log.Error(logs.ReplicatorCouldNotGetObjectFromLocalStorage, + p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromLocalStorage, zap.Stringer("object", task.Addr), zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -75,11 +75,11 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T cancel() if err != nil { - log.Error(logs.ReplicatorCouldNotReplicateObject, + log.Error(ctx, logs.ReplicatorCouldNotReplicateObject, zap.String("error", err.Error()), ) } else { - log.Debug(logs.ReplicatorObjectSuccessfullyReplicated) + log.Debug(ctx, logs.ReplicatorObjectSuccessfullyReplicated) task.NumCopies-- diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go index 7e7090237..5ce929342 100644 --- a/pkg/services/replicator/pull.go +++ b/pkg/services/replicator/pull.go @@ -22,7 +22,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { p.metrics.IncInFlightRequest() defer p.metrics.DecInFlightRequest() defer func() { - p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull")) + p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull")) }() ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandlePullTask", @@ -48,7 +48,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { endpoints = append(endpoints, s) return false }) - p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage, + p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, zap.Stringer("object", task.Addr), zap.Error(err), zap.Strings("endpoints", endpoints), @@ -56,7 +56,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { } if obj == nil { - p.log.Error(logs.ReplicatorCouldNotGetObjectFromRemoteStorage, + p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, zap.Stringer("object", task.Addr), zap.Error(errFailedToGetObjectFromAnyNode), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -65,7 +65,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { err := engine.Put(ctx, p.localStorage, obj, containerCore.IsIndexedContainer(task.Container)) if err != nil { - p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage, + p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go index 537833516..489f66ae5 100644 --- a/pkg/services/replicator/put.go +++ b/pkg/services/replicator/put.go @@ -20,7 +20,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) { p.metrics.IncInFlightRequest() defer p.metrics.DecInFlightRequest() defer func() { - p.log.Debug(logs.ReplicatorFinishWork, zap.String("type", "pull")) + p.log.Debug(ctx, logs.ReplicatorFinishWork, zap.String("type", "pull")) }() ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleLocalPutTask", @@ -31,7 +31,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) { defer span.End() if task.Obj == nil { - p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage, + p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), zap.Error(errObjectNotDefined), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -40,7 +40,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) { err := engine.Put(ctx, p.localStorage, task.Obj, containerCore.IsIndexedContainer(task.Container)) if err != nil { - p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage, + p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go index f2f86daf0..6910fa5af 100644 --- a/pkg/services/replicator/replicator.go +++ b/pkg/services/replicator/replicator.go @@ -45,7 +45,7 @@ func New(opts ...Option) *Replicator { opts[i](c) } - c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Replicator"))} + c.log = c.log.With(zap.String("component", "Object Replicator")) return &Replicator{ cfg: c, diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go index e914119b4..12b221613 100644 --- a/pkg/services/session/executor.go +++ b/pkg/services/session/executor.go @@ -33,7 +33,7 @@ func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *log } func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) { - s.log.Debug(logs.ServingRequest, + s.log.Debug(ctx, logs.ServingRequest, zap.String("component", "SessionService"), zap.String("request", "Create"), ) diff --git a/pkg/services/session/storage/persistent/options.go b/pkg/services/session/storage/persistent/options.go index 411734ea1..60db97f90 100644 --- a/pkg/services/session/storage/persistent/options.go +++ b/pkg/services/session/storage/persistent/options.go @@ -19,7 +19,7 @@ type Option func(*cfg) func defaultCfg() *cfg { return &cfg{ - l: &logger.Logger{Logger: zap.L()}, + l: logger.NewLoggerWrapper(zap.L()), timeout: 100 * time.Millisecond, } } diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go index 71711e371..d312ea0ea 100644 --- a/pkg/services/session/storage/persistent/storage.go +++ b/pkg/services/session/storage/persistent/storage.go @@ -1,6 +1,7 @@ package persistent import ( + "context" "crypto/aes" "crypto/cipher" "encoding/hex" @@ -105,7 +106,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) (t *storage.PrivateTok return err }) if err != nil { - s.l.Error(logs.PersistentCouldNotGetSessionFromPersistentStorage, + s.l.Error(context.Background(), logs.PersistentCouldNotGetSessionFromPersistentStorage, zap.Error(err), zap.Stringer("ownerID", ownerID), zap.String("tokenID", hex.EncodeToString(tokenID)), @@ -130,7 +131,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) { if epochFromToken(v) <= epoch { err = c.Delete() if err != nil { - s.l.Error(logs.PersistentCouldNotDeleteSToken, + s.l.Error(context.Background(), logs.PersistentCouldNotDeleteSToken, zap.String("token_id", hex.EncodeToString(k)), ) } @@ -141,7 +142,7 @@ func (s *TokenStore) RemoveOld(epoch uint64) { }) }) if err != nil { - s.l.Error(logs.PersistentCouldNotCleanUpExpiredTokens, + s.l.Error(context.Background(), logs.PersistentCouldNotCleanUpExpiredTokens, zap.Uint64("epoch", epoch), ) } diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go index 5bde3ae38..416a0fafe 100644 --- a/pkg/services/tree/redirect.go +++ b/pkg/services/tree/redirect.go @@ -54,7 +54,7 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo return false } - s.log.Debug(logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint), + s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) called = true diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index 84e376cf7..0c5bde078 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -58,7 +58,7 @@ func (s *Service) localReplicationWorker(ctx context.Context) { err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false) if err != nil { - s.log.Error(logs.TreeFailedToApplyReplicatedOperation, + s.log.Error(ctx, logs.TreeFailedToApplyReplicatedOperation, zap.String("err", err.Error())) } span.End() @@ -116,11 +116,11 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req if lastErr != nil { if errors.Is(lastErr, errRecentlyFailed) { - s.log.Debug(logs.TreeDoNotSendUpdateToTheNode, + s.log.Debug(ctx, logs.TreeDoNotSendUpdateToTheNode, zap.String("last_error", lastErr.Error()), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } else { - s.log.Warn(logs.TreeFailedToSentUpdateToTheNode, + s.log.Warn(ctx, logs.TreeFailedToSentUpdateToTheNode, zap.String("last_error", lastErr.Error()), zap.String("address", lastAddr), zap.String("key", hex.EncodeToString(n.PublicKey())), @@ -154,7 +154,7 @@ func (s *Service) replicateLoop(ctx context.Context) { start := time.Now() err := s.replicate(op) if err != nil { - s.log.Error(logs.TreeErrorDuringReplication, + s.log.Error(ctx, logs.TreeErrorDuringReplication, zap.String("err", err.Error()), zap.Stringer("cid", op.cid), zap.String("treeID", op.treeID)) diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index b63338d25..2df3c08e6 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -63,7 +63,7 @@ func New(opts ...Option) *Service { } if s.log == nil { - s.log = &logger.Logger{Logger: zap.NewNop()} + s.log = logger.NewLoggerWrapper(zap.NewNop()) } s.cache.init(s.key, s.ds) diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 2c6deeb78..e2249c9fb 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -92,7 +92,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { for _, tid := range treesToSync { h, err := s.forest.TreeLastSyncHeight(ctx, cid, tid) if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { - s.log.Warn(logs.TreeCouldNotGetLastSynchronizedHeightForATree, + s.log.Warn(ctx, logs.TreeCouldNotGetLastSynchronizedHeightForATree, zap.Stringer("cid", cid), zap.String("tree", tid)) continue @@ -100,7 +100,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { newHeight := s.synchronizeTree(ctx, cid, h, tid, nodes) if h < newHeight { if err := s.forest.TreeUpdateLastSyncHeight(ctx, cid, tid, newHeight); err != nil { - s.log.Warn(logs.TreeCouldNotUpdateLastSynchronizedHeightForATree, + s.log.Warn(ctx, logs.TreeCouldNotUpdateLastSynchronizedHeightForATree, zap.Stringer("cid", cid), zap.String("tree", tid)) } @@ -251,7 +251,7 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, treeID string, nodes []netmapSDK.NodeInfo, ) uint64 { - s.log.Debug(logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from)) + s.log.Debug(ctx, logs.TreeSynchronizeTree, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.Uint64("from", from)) errGroup, egCtx := errgroup.WithContext(ctx) const workersCount = 1024 @@ -282,20 +282,20 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, n.IterateNetworkEndpoints(func(addr string) bool { var a network.Address if err := a.FromString(addr); err != nil { - s.log.Warn(logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr)) + s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr)) return false } cc, err := s.createConnection(a) if err != nil { - s.log.Warn(logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr)) + s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr)) return false } defer cc.Close() err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i]) if err != nil { - s.log.Warn(logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr)) + s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr)) } nodeSynced = err == nil return true @@ -309,7 +309,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, } if err := errGroup.Wait(); err != nil { allNodesSynced.Store(false) - s.log.Warn(logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err)) + s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationOverAllNodes, zap.Error(err)) } newHeight := minStreamedLastHeight @@ -376,13 +376,13 @@ func (s *Service) syncLoop(ctx context.Context) { return case <-s.syncChan: ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.sync") - s.log.Info(logs.TreeSyncingTrees) + s.log.Info(ctx, logs.TreeSyncingTrees) start := time.Now() cnrs, err := s.cfg.cnrSource.List() if err != nil { - s.log.Error(logs.TreeCouldNotFetchContainers, zap.Error(err)) + s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err)) s.metrics.AddSyncDuration(time.Since(start), false) span.End() break @@ -394,7 +394,7 @@ func (s *Service) syncLoop(ctx context.Context) { s.removeContainers(ctx, newMap) - s.log.Info(logs.TreeTreesHaveBeenSynchronized) + s.log.Info(ctx, logs.TreeTreesHaveBeenSynchronized) s.metrics.AddSyncDuration(time.Since(start), true) span.End() @@ -414,19 +414,19 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) { err := s.syncPool.Submit(func() { defer wg.Done() - s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr)) + s.log.Debug(ctx, logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr)) err := s.synchronizeAllTrees(ctx, cnr) if err != nil { - s.log.Error(logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err)) + s.log.Error(ctx, logs.TreeCouldNotSyncTrees, zap.Stringer("cid", cnr), zap.Error(err)) return } - s.log.Debug(logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr)) + s.log.Debug(ctx, logs.TreeContainerTreesHaveBeenSynced, zap.Stringer("cid", cnr)) }) if err != nil { wg.Done() - s.log.Error(logs.TreeCouldNotQueryTreesForSynchronization, + s.log.Error(ctx, logs.TreeCouldNotQueryTreesForSynchronization, zap.Stringer("cid", cnr), zap.Error(err)) if errors.Is(err, ants.ErrPoolClosed) { @@ -452,7 +452,7 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID existed, err := containerCore.WasRemoved(s.cnrSource, cnr) if err != nil { - s.log.Error(logs.TreeCouldNotCheckIfContainerExisted, + s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted, zap.Stringer("cid", cnr), zap.Error(err)) } else if existed { @@ -464,11 +464,11 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID } for _, cnr := range removed { - s.log.Debug(logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr)) + s.log.Debug(ctx, logs.TreeRemovingRedundantTrees, zap.Stringer("cid", cnr)) err := s.DropTree(ctx, cnr, "") if err != nil { - s.log.Error(logs.TreeCouldNotRemoveRedundantTree, + s.log.Error(ctx, logs.TreeCouldNotRemoveRedundantTree, zap.Stringer("cid", cnr), zap.Error(err)) } @@ -482,7 +482,7 @@ func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID for _, cnr := range cnrs { _, pos, err := s.getContainerNodes(cnr) if err != nil { - s.log.Error(logs.TreeCouldNotCalculateContainerNodes, + s.log.Error(context.Background(), logs.TreeCouldNotCalculateContainerNodes, zap.Stringer("cid", cnr), zap.Error(err)) continue diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index d4ac2ab02..b3a1b9b94 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -184,6 +184,10 @@ func (l *Logger) WithOptions(options ...zap.Option) { l.z = l.z.WithOptions(options...) } +func (l *Logger) With(fields ...zap.Field) *Logger { + return &Logger{z: l.z.With(fields...)} +} + func NewLoggerWrapper(z *zap.Logger) *Logger { return &Logger{ z: z, From 62b5181618f194f024ede60070e240bcd8fe7a31 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 21 Oct 2024 11:20:17 +0300 Subject: [PATCH 173/591] [#1437] blobovnicza: Fix contextcheck linter Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-lens/internal/blobovnicza/root.go | 2 +- .../blobovnicza/blobovnicza_test.go | 2 +- .../blobovnicza/control.go | 6 ++--- .../blobovnicza/get_test.go | 2 +- .../blobovnicza/iterate_test.go | 2 +- .../blobstor/blobovniczatree/active.go | 23 ++++++++++--------- .../blobstor/blobovniczatree/cache.go | 8 +++---- .../blobstor/blobovniczatree/control.go | 6 ++--- .../blobstor/blobovniczatree/count.go | 4 ++-- .../blobstor/blobovniczatree/delete.go | 8 +++---- .../blobstor/blobovniczatree/exists.go | 4 ++-- .../blobstor/blobovniczatree/get.go | 8 +++---- .../blobstor/blobovniczatree/get_range.go | 8 +++---- .../blobstor/blobovniczatree/iterate.go | 4 ++-- .../blobstor/blobovniczatree/manager.go | 4 ++-- .../blobstor/blobovniczatree/put.go | 2 +- .../blobstor/blobovniczatree/rebuild.go | 22 +++++++++--------- .../blobovniczatree/rebuild_failover_test.go | 14 +++++------ pkg/local_object_storage/engine/engine.go | 4 ++-- pkg/morph/client/client.go | 4 ++-- pkg/morph/event/listener.go | 16 ++++++------- pkg/morph/subscriber/subscriber.go | 10 ++++---- 22 files changed, 82 insertions(+), 81 deletions(-) diff --git a/cmd/frostfs-lens/internal/blobovnicza/root.go b/cmd/frostfs-lens/internal/blobovnicza/root.go index 9d8ef3dad..2819981d6 100644 --- a/cmd/frostfs-lens/internal/blobovnicza/root.go +++ b/cmd/frostfs-lens/internal/blobovnicza/root.go @@ -27,7 +27,7 @@ func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza { blobovnicza.WithPath(vPath), blobovnicza.WithReadOnly(true), ) - common.ExitOnErr(cmd, blz.Open()) + common.ExitOnErr(cmd, blz.Open(cmd.Context())) return blz } diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go index caee770e8..10cb6f368 100644 --- a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go +++ b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go @@ -69,7 +69,7 @@ func TestBlobovnicza(t *testing.T) { defer os.Remove(p) // open Blobovnicza - require.NoError(t, blz.Open()) + require.NoError(t, blz.Open(context.Background())) // initialize Blobovnicza require.NoError(t, blz.Init()) diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go index 5d7135741..a317279a4 100644 --- a/pkg/local_object_storage/blobovnicza/control.go +++ b/pkg/local_object_storage/blobovnicza/control.go @@ -16,7 +16,7 @@ import ( // // If the database file does not exist, it will be created automatically. // If blobovnicza is already open, does nothing. -func (b *Blobovnicza) Open() error { +func (b *Blobovnicza) Open(ctx context.Context) error { b.controlMtx.Lock() defer b.controlMtx.Unlock() @@ -24,7 +24,7 @@ func (b *Blobovnicza) Open() error { return nil } - b.log.Debug(context.Background(), logs.BlobovniczaCreatingDirectoryForBoltDB, + b.log.Debug(ctx, logs.BlobovniczaCreatingDirectoryForBoltDB, zap.String("path", b.path), zap.Bool("ro", b.boltOptions.ReadOnly), ) @@ -38,7 +38,7 @@ func (b *Blobovnicza) Open() error { } } - b.log.Debug(context.Background(), logs.BlobovniczaOpeningBoltDB, + b.log.Debug(ctx, logs.BlobovniczaOpeningBoltDB, zap.String("path", b.path), zap.Stringer("permissions", b.perm), ) diff --git a/pkg/local_object_storage/blobovnicza/get_test.go b/pkg/local_object_storage/blobovnicza/get_test.go index c464abc87..8c435af89 100644 --- a/pkg/local_object_storage/blobovnicza/get_test.go +++ b/pkg/local_object_storage/blobovnicza/get_test.go @@ -26,7 +26,7 @@ func TestBlobovnicza_Get(t *testing.T) { WithObjectSizeLimit(szLimit), ) - require.NoError(t, blz.Open()) + require.NoError(t, blz.Open(context.Background())) require.NoError(t, blz.Init()) } diff --git a/pkg/local_object_storage/blobovnicza/iterate_test.go b/pkg/local_object_storage/blobovnicza/iterate_test.go index 5db1e4165..bec23bb96 100644 --- a/pkg/local_object_storage/blobovnicza/iterate_test.go +++ b/pkg/local_object_storage/blobovnicza/iterate_test.go @@ -15,7 +15,7 @@ import ( func TestBlobovniczaIterate(t *testing.T) { filename := filepath.Join(t.TempDir(), "blob") b := New(WithPath(filename)) - require.NoError(t, b.Open()) + require.NoError(t, b.Open(context.Background())) require.NoError(t, b.Init()) data := [][]byte{{0, 1, 2, 3}, {5, 6, 7, 8}} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/active.go b/pkg/local_object_storage/blobstor/blobovniczatree/active.go index 603c6abe3..0ac15df82 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/active.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/active.go @@ -1,6 +1,7 @@ package blobovniczatree import ( + "context" "path/filepath" "sync" @@ -53,8 +54,8 @@ func newActiveDBManager(dbManager *dbManager, rootPath string) *activeDBManager // GetOpenedActiveDBForLevel returns active DB for level. // DB must be closed after use. -func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB, error) { - activeDB, err := m.getCurrentActiveIfOk(lvlPath) +func (m *activeDBManager) GetOpenedActiveDBForLevel(ctx context.Context, lvlPath string) (*activeDB, error) { + activeDB, err := m.getCurrentActiveIfOk(ctx, lvlPath) if err != nil { return nil, err } @@ -62,7 +63,7 @@ func (m *activeDBManager) GetOpenedActiveDBForLevel(lvlPath string) (*activeDB, return activeDB, nil } - return m.updateAndGetActive(lvlPath) + return m.updateAndGetActive(ctx, lvlPath) } func (m *activeDBManager) Open() { @@ -83,7 +84,7 @@ func (m *activeDBManager) Close() { m.closed = true } -func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error) { +func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath string) (*activeDB, error) { m.levelToActiveDBGuard.RLock() defer m.levelToActiveDBGuard.RUnlock() @@ -96,7 +97,7 @@ func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error return nil, nil } - blz, err := db.Open() // open db for usage, will be closed on activeDB.Close() + blz, err := db.Open(ctx) // open db for usage, will be closed on activeDB.Close() if err != nil { return nil, err } @@ -112,11 +113,11 @@ func (m *activeDBManager) getCurrentActiveIfOk(lvlPath string) (*activeDB, error }, nil } -func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) { +func (m *activeDBManager) updateAndGetActive(ctx context.Context, lvlPath string) (*activeDB, error) { m.levelLock.Lock(lvlPath) defer m.levelLock.Unlock(lvlPath) - current, err := m.getCurrentActiveIfOk(lvlPath) + current, err := m.getCurrentActiveIfOk(ctx, lvlPath) if err != nil { return nil, err } @@ -124,7 +125,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) return current, nil } - nextShDB, err := m.getNextSharedDB(lvlPath) + nextShDB, err := m.getNextSharedDB(ctx, lvlPath) if err != nil { return nil, err } @@ -133,7 +134,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) return nil, nil } - blz, err := nextShDB.Open() // open db for client, client must call Close() after usage + blz, err := nextShDB.Open(ctx) // open db for client, client must call Close() after usage if err != nil { return nil, err } @@ -143,7 +144,7 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error) }, nil } -func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) { +func (m *activeDBManager) getNextSharedDB(ctx context.Context, lvlPath string) (*sharedDB, error) { var nextActiveDBIdx uint64 hasActive, currentIdx := m.hasActiveDB(lvlPath) if hasActive { @@ -160,7 +161,7 @@ func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) { path := filepath.Join(lvlPath, u64ToHexStringExt(nextActiveDBIdx)) next := m.dbManager.GetByPath(path) - _, err := next.Open() // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close() + _, err := next.Open(ctx) // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close() if err != nil { return nil, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go index 5c103c1bb..e8016781a 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go @@ -81,12 +81,12 @@ func (c *dbCache) Close() { c.closed = true } -func (c *dbCache) GetOrCreate(path string) *sharedDB { +func (c *dbCache) GetOrCreate(ctx context.Context, path string) *sharedDB { value := c.getExisted(path) if value != nil { return value } - return c.create(path) + return c.create(ctx, path) } func (c *dbCache) EvictAndMarkNonCached(path string) { @@ -122,7 +122,7 @@ func (c *dbCache) getExisted(path string) *sharedDB { return nil } -func (c *dbCache) create(path string) *sharedDB { +func (c *dbCache) create(ctx context.Context, path string) *sharedDB { c.pathLock.Lock(path) defer c.pathLock.Unlock(path) @@ -133,7 +133,7 @@ func (c *dbCache) create(path string) *sharedDB { value = c.dbManager.GetByPath(path) - _, err := value.Open() // open db to hold reference, closed by evictedDB.Close() or if cache closed + _, err := value.Open(ctx) // open db to hold reference, closed by evictedDB.Close() or if cache closed if err != nil { return value } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go index 55c9d6630..7c0a9edd6 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go @@ -46,7 +46,7 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error { eg.Go(func() error { p = strings.TrimSuffix(p, rebuildSuffix) shBlz := b.getBlobovniczaWithoutCaching(p) - blz, err := shBlz.Open() + blz, err := shBlz.Open(egCtx) if err != nil { return err } @@ -91,8 +91,8 @@ func (b *Blobovniczas) Close() error { // returns blobovnicza with path p // // If blobovnicza is already cached, instance from cache is returned w/o changes. -func (b *Blobovniczas) getBlobovnicza(p string) *sharedDB { - return b.dbCache.GetOrCreate(p) +func (b *Blobovniczas) getBlobovnicza(ctx context.Context, p string) *sharedDB { + return b.dbCache.GetOrCreate(ctx, p) } func (b *Blobovniczas) getBlobovniczaWithoutCaching(p string) *sharedDB { diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/count.go b/pkg/local_object_storage/blobstor/blobovniczatree/count.go index cf91637d7..1137b9eb2 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/count.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/count.go @@ -16,13 +16,13 @@ func (b *Blobovniczas) ObjectsCount(ctx context.Context) (uint64, error) { b.metrics.ObjectsCount(time.Since(startedAt), success) }() - _, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount") + ctx, span := tracing.StartSpanFromContext(ctx, "Blobovniczas.ObjectsCount") defer span.End() var result uint64 err := b.iterateExistingDBPaths(ctx, func(p string) (bool, error) { shDB := b.getBlobovniczaWithoutCaching(p) - blz, err := shDB.Open() + blz, err := shDB.Open(ctx) if err != nil { return true, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go index dd5258042..b26e44144 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go @@ -61,8 +61,8 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co if prm.StorageID != nil { id := NewIDFromBytes(prm.StorageID) - shBlz := b.getBlobovnicza(id.Path()) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, id.Path()) + blz, err := shBlz.Open(ctx) if err != nil { return res, err } @@ -109,8 +109,8 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co // // returns no error if object was removed from some blobovnicza of the same level. func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicza.DeletePrm, blzPath string) (common.DeleteRes, error) { - shBlz := b.getBlobovnicza(blzPath) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, blzPath) + blz, err := shBlz.Open(ctx) if err != nil { return common.DeleteRes{}, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go index 2149b17c0..528dbfed7 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go @@ -37,8 +37,8 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common if prm.StorageID != nil { id := NewIDFromBytes(prm.StorageID) - shBlz := b.getBlobovnicza(id.Path()) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, id.Path()) + blz, err := shBlz.Open(ctx) if err != nil { return common.ExistsRes{}, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go index e79480095..fc017f22d 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go @@ -48,8 +48,8 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G if prm.StorageID != nil { id := NewIDFromBytes(prm.StorageID) - shBlz := b.getBlobovnicza(id.Path()) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, id.Path()) + blz, err := shBlz.Open(ctx) if err != nil { return res, err } @@ -95,8 +95,8 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G // returns error if object could not be read from any blobovnicza of the same level. func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.GetPrm, blzPath string) (common.GetRes, error) { // open blobovnicza (cached inside) - shBlz := b.getBlobovnicza(blzPath) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, blzPath) + blz, err := shBlz.Open(ctx) if err != nil { return common.GetRes{}, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go index 20f2be2ba..384544d7b 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go @@ -47,8 +47,8 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re if prm.StorageID != nil { id := NewIDFromBytes(prm.StorageID) - shBlz := b.getBlobovnicza(id.Path()) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, id.Path()) + blz, err := shBlz.Open(ctx) if err != nil { return common.GetRangeRes{}, err } @@ -103,8 +103,8 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re // returns error if object could not be read from any blobovnicza of the same level. func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRangePrm, blzPath string) (common.GetRangeRes, error) { // open blobovnicza (cached inside) - shBlz := b.getBlobovnicza(blzPath) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, blzPath) + blz, err := shBlz.Open(ctx) if err != nil { return common.GetRangeRes{}, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go index 7f0453410..049a61d72 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go @@ -72,8 +72,8 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm // iterator over all Blobovniczas in unsorted order. Break on f's error return. func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors bool, f func(string, *blobovnicza.Blobovnicza) error) error { return b.iterateExistingDBPaths(ctx, func(p string) (bool, error) { - shBlz := b.getBlobovnicza(p) - blz, err := shBlz.Open() + shBlz := b.getBlobovnicza(ctx, p) + blz, err := shBlz.Open(ctx) if err != nil { if ignoreErrors { b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go index 235c9f65d..502202d68 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go @@ -49,7 +49,7 @@ func newSharedDB(options []blobovnicza.Option, path string, readOnly bool, } } -func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) { +func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) { if b.closedFlag.Load() { return nil, errClosed } @@ -68,7 +68,7 @@ func (b *sharedDB) Open() (*blobovnicza.Blobovnicza, error) { blobovnicza.WithMetrics(b.metrics), )...) - if err := blz.Open(); err != nil { + if err := blz.Open(ctx); err != nil { return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err) } if err := blz.Init(); err != nil { diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go index 844b43151..8dff436d3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go @@ -77,7 +77,7 @@ type putIterator struct { } func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) { - active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath) + active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath) if err != nil { if !isLogical(err) { i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index fee67a0a8..e137bdd99 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -165,7 +165,7 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe continue } path := filepath.Join(lvlPath, e.Name()) - resettlementRequired, err := b.rebuildBySize(path, target) + resettlementRequired, err := b.rebuildBySize(ctx, path, target) if err != nil { return false, err } @@ -180,9 +180,9 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe return result, nil } -func (b *Blobovniczas) rebuildBySize(path string, targetFillPercent int) (bool, error) { - shDB := b.getBlobovnicza(path) - blz, err := shDB.Open() +func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFillPercent int) (bool, error) { + shDB := b.getBlobovnicza(ctx, path) + blz, err := shDB.Open(ctx) if err != nil { return false, err } @@ -196,8 +196,8 @@ func (b *Blobovniczas) rebuildBySize(path string, targetFillPercent int) (bool, } func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) { - shDB := b.getBlobovnicza(path) - blz, err := shDB.Open() + shDB := b.getBlobovnicza(ctx, path) + blz, err := shDB.Open(ctx) if err != nil { return 0, err } @@ -365,8 +365,8 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) { rebuildTmpFilePath := s s = strings.TrimSuffix(s, rebuildSuffix) - shDB := b.getBlobovnicza(s) - blz, err := shDB.Open() + shDB := b.getBlobovnicza(ctx, s) + blz, err := shDB.Open(ctx) if err != nil { return true, err } @@ -398,8 +398,8 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string, move blobovnicza.MoveInfo, metaStore common.MetaStorage, ) error { - targetDB := b.getBlobovnicza(NewIDFromBytes(move.TargetStorageID).Path()) - target, err := targetDB.Open() + targetDB := b.getBlobovnicza(ctx, NewIDFromBytes(move.TargetStorageID).Path()) + target, err := targetDB.Open(ctx) if err != nil { return err } @@ -477,7 +477,7 @@ type moveIterator struct { } func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, error) { - target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(lvlPath) + target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath) if err != nil { if !isLogical(err) { i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go index b177d20fc..bfea97afe 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go @@ -35,7 +35,7 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) { dir := t.TempDir() blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) - require.NoError(t, blz.Open()) + require.NoError(t, blz.Open(context.Background())) require.NoError(t, blz.Init()) obj := blobstortest.NewObject(1024) @@ -65,7 +65,7 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) { dir := t.TempDir() blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) - require.NoError(t, blz.Open()) + require.NoError(t, blz.Open(context.Background())) require.NoError(t, blz.Init()) obj := blobstortest.NewObject(1024) @@ -89,7 +89,7 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) { require.NoError(t, err) blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) - require.NoError(t, blz.Open()) + require.NoError(t, blz.Open(context.Background())) require.NoError(t, blz.Init()) _, err = blz.Put(context.Background(), pPrm) @@ -105,7 +105,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { dir := t.TempDir() blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) - require.NoError(t, blz.Open()) + require.NoError(t, blz.Open(context.Background())) require.NoError(t, blz.Init()) obj := blobstortest.NewObject(1024) @@ -123,7 +123,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { require.NoError(t, err) blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) - require.NoError(t, blz.Open()) + require.NoError(t, blz.Open(context.Background())) require.NoError(t, blz.Init()) var pPrm blobovnicza.PutPrm @@ -173,7 +173,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object require.NoError(t, b.Close()) blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) - require.NoError(t, blz.Open()) + require.NoError(t, blz.Open(context.Background())) require.NoError(t, blz.Init()) moveInfo, err := blz.ListMoveInfo(context.Background()) @@ -188,7 +188,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object require.NoError(t, blz.Close()) blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) - require.NoError(t, blz.Open()) + require.NoError(t, blz.Open(context.Background())) require.NoError(t, blz.Init()) moveInfo, err = blz.ListMoveInfo(context.Background()) diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 8963ec099..a8caa215a 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -269,8 +269,8 @@ type containerSource struct { func (s *containerSource) IsContainerAvailable(ctx context.Context, id cid.ID) (bool, error) { select { - case <-context.Background().Done(): - return false, context.Background().Err() + case <-ctx.Done(): + return false, ctx.Err() default: } diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index 12c0e0842..ef6a009e4 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -389,7 +389,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { height, err = c.rpcActor.GetBlockCount() if err != nil { - c.logger.Error(context.Background(), logs.ClientCantGetBlockchainHeight, + c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight, zap.String("error", err.Error())) return nil } @@ -403,7 +403,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { newHeight, err = c.rpcActor.GetBlockCount() if err != nil { - c.logger.Error(context.Background(), logs.ClientCantGetBlockchainHeight243, + c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243, zap.String("error", err.Error())) return nil } diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index 03bba8ab9..3d3d806a4 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -269,7 +269,7 @@ loop: continue loop } - l.handleNotifyEvent(notifyEvent) + l.handleNotifyEvent(ctx, notifyEvent) case notaryEvent, ok := <-chs.NotaryRequestsCh: if !ok { l.log.Warn(ctx, logs.EventStopEventListenerByNotaryChannel) @@ -316,16 +316,16 @@ func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) { } } -func (l *listener) handleNotifyEvent(notifyEvent *state.ContainedNotificationEvent) { +func (l *listener) handleNotifyEvent(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) { if err := l.pool.Submit(func() { - l.parseAndHandleNotification(notifyEvent) + l.parseAndHandleNotification(ctx, notifyEvent) }); err != nil { - l.log.Warn(context.Background(), logs.EventListenerWorkerPoolDrained, + l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } -func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotificationEvent) { +func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent *state.ContainedNotificationEvent) { log := l.log.With( zap.String("script hash LE", notifyEvent.ScriptHash.StringLE()), ) @@ -347,7 +347,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi l.mtx.RUnlock() if !ok { - log.Debug(context.Background(), logs.EventEventParserNotSet) + log.Debug(ctx, logs.EventEventParserNotSet) return } @@ -355,7 +355,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi // parse the notification event event, err := parser(notifyEvent) if err != nil { - log.Warn(context.Background(), logs.EventCouldNotParseNotificationEvent, + log.Warn(ctx, logs.EventCouldNotParseNotificationEvent, zap.String("error", err.Error()), ) @@ -368,7 +368,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi l.mtx.RUnlock() if len(handlers) == 0 { - log.Info(context.Background(), logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered, + log.Info(ctx, logs.EventNotificationHandlersForParsedNotificationEventWereNotRegistered, zap.Any("event", event), ) diff --git a/pkg/morph/subscriber/subscriber.go b/pkg/morph/subscriber/subscriber.go index 3a2da6757..4ef59ed6a 100644 --- a/pkg/morph/subscriber/subscriber.go +++ b/pkg/morph/subscriber/subscriber.go @@ -254,7 +254,7 @@ func (s *subscriber) switchEndpoint(ctx context.Context, finishCh chan<- bool) b s.Lock() chs := newSubChannels() go func() { - finishCh <- s.restoreSubscriptions(chs.NotifyChan, chs.BlockChan, chs.NotaryChan) + finishCh <- s.restoreSubscriptions(ctx, chs.NotifyChan, chs.BlockChan, chs.NotaryChan) }() s.current = chs s.Unlock() @@ -295,7 +295,7 @@ drainloop: // restoreSubscriptions restores subscriptions according to // cached information about them. -func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotificationEvent, +func (s *subscriber) restoreSubscriptions(ctx context.Context, notifCh chan<- *state.ContainedNotificationEvent, blCh chan<- *block.Block, notaryCh chan<- *result.NotaryRequestEvent, ) bool { var err error @@ -304,7 +304,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific if s.subscribedToNewBlocks { _, err = s.client.ReceiveBlocks(blCh) if err != nil { - s.log.Error(context.Background(), logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err)) + s.log.Error(ctx, logs.ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch, zap.Error(err)) return false } } @@ -313,7 +313,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific for contract := range s.subscribedEvents { _, err = s.client.ReceiveExecutionNotifications(contract, notifCh) if err != nil { - s.log.Error(context.Background(), logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) + s.log.Error(ctx, logs.ClientCouldNotRestoreNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) return false } } @@ -322,7 +322,7 @@ func (s *subscriber) restoreSubscriptions(notifCh chan<- *state.ContainedNotific for signer := range s.subscribedNotaryEvents { _, err = s.client.ReceiveNotaryRequests(signer, notaryCh) if err != nil { - s.log.Error(context.Background(), logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) + s.log.Error(ctx, logs.ClientCouldNotRestoreNotaryNotificationSubscriptionAfterRPCSwitch, zap.Error(err)) return false } } From c139892117c3ebd6405ea1362cd7576edda1ac00 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 21 Oct 2024 11:30:31 +0300 Subject: [PATCH 174/591] [#1437] ir: Fix contextcheck linter Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-ir/config.go | 38 ++++++++++++++++++------------------- cmd/frostfs-ir/main.go | 12 ++++++------ pkg/innerring/innerring.go | 12 ++++++------ pkg/innerring/state.go | 12 ++++++------ pkg/innerring/state_test.go | 3 ++- 5 files changed, 39 insertions(+), 38 deletions(-) diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go index 34d9d5595..7415e8e70 100644 --- a/cmd/frostfs-ir/config.go +++ b/cmd/frostfs-ir/config.go @@ -47,7 +47,7 @@ func reloadConfig() error { return logPrm.Reload() } -func watchForSignal(cancel func()) { +func watchForSignal(ctx context.Context, cancel func()) { ch := make(chan os.Signal, 1) signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) @@ -59,49 +59,49 @@ func watchForSignal(cancel func()) { // signals causing application to shut down should have priority over // reconfiguration signal case <-ch: - log.Info(context.Background(), logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) + log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) cancel() - shutdown() - log.Info(context.Background(), logs.FrostFSNodeTerminationSignalProcessingIsComplete) + shutdown(ctx) + log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) return case err := <-intErr: // internal application error - log.Info(context.Background(), logs.FrostFSIRInternalError, zap.String("msg", err.Error())) + log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error())) cancel() - shutdown() + shutdown(ctx) return default: // block until any signal is receieved select { case <-ch: - log.Info(context.Background(), logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) + log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) cancel() - shutdown() - log.Info(context.Background(), logs.FrostFSNodeTerminationSignalProcessingIsComplete) + shutdown(ctx) + log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) return case err := <-intErr: // internal application error - log.Info(context.Background(), logs.FrostFSIRInternalError, zap.String("msg", err.Error())) + log.Info(ctx, logs.FrostFSIRInternalError, zap.String("msg", err.Error())) cancel() - shutdown() + shutdown(ctx) return case <-sighupCh: - log.Info(context.Background(), logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) - if !innerRing.CompareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) { - log.Info(context.Background(), logs.FrostFSNodeSIGHUPSkip) + log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) + if !innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) { + log.Info(ctx, logs.FrostFSNodeSIGHUPSkip) break } err := reloadConfig() if err != nil { - log.Error(context.Background(), logs.FrostFSNodeConfigurationReading, zap.Error(err)) + log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err)) } pprofCmp.reload() metricsCmp.reload() - log.Info(context.Background(), logs.FrostFSIRReloadExtraWallets) + log.Info(ctx, logs.FrostFSIRReloadExtraWallets) err = innerRing.SetExtraWallets(cfg) if err != nil { - log.Error(context.Background(), logs.FrostFSNodeConfigurationReading, zap.Error(err)) + log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err)) } - innerRing.CompareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY) - log.Info(context.Background(), logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) + innerRing.CompareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY) + log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) } } } diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index 55a8ce00d..bcb2c5dd8 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -106,7 +106,7 @@ func main() { log.Info(ctx, logs.CommonApplicationStarted, zap.String("version", misc.Version)) - watchForSignal(cancel) + watchForSignal(ctx, cancel) <-ctx.Done() // graceful shutdown log.Debug(ctx, logs.FrostFSNodeWaitingForAllProcessesToStop) @@ -115,20 +115,20 @@ func main() { log.Info(ctx, logs.FrostFSIRApplicationStopped) } -func shutdown() { - innerRing.Stop() +func shutdown(ctx context.Context) { + innerRing.Stop(ctx) if err := metricsCmp.shutdown(); err != nil { - log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer, + log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error()), ) } if err := pprofCmp.shutdown(); err != nil { - log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer, + log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error()), ) } if err := sdnotify.ClearStatus(); err != nil { - log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err)) + log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) } } diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index 63a4cb1cb..67927c10c 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -140,10 +140,10 @@ var ( // Start runs all event providers. func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { - s.setHealthStatus(control.HealthStatus_STARTING) + s.setHealthStatus(ctx, control.HealthStatus_STARTING) defer func() { if err == nil { - s.setHealthStatus(control.HealthStatus_READY) + s.setHealthStatus(ctx, control.HealthStatus_READY) } }() @@ -299,15 +299,15 @@ func (s *Server) startWorkers(ctx context.Context) { } // Stop closes all subscription channels. -func (s *Server) Stop() { - s.setHealthStatus(control.HealthStatus_SHUTTING_DOWN) +func (s *Server) Stop(ctx context.Context) { + s.setHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN) go s.morphListener.Stop() go s.mainnetListener.Stop() for _, c := range s.closers { if err := c(); err != nil { - s.log.Warn(context.Background(), logs.InnerringCloserError, + s.log.Warn(ctx, logs.InnerringCloserError, zap.String("error", err.Error()), ) } @@ -349,7 +349,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan return nil, err } - server.setHealthStatus(control.HealthStatus_HEALTH_STATUS_UNDEFINED) + server.setHealthStatus(ctx, control.HealthStatus_HEALTH_STATUS_UNDEFINED) // parse notary support server.feeConfig = config.NewFeeConfig(cfg) diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go index 250f41e5f..2dbcd7494 100644 --- a/pkg/innerring/state.go +++ b/pkg/innerring/state.go @@ -154,17 +154,17 @@ func (s *Server) ResetEpochTimer(h uint32) error { return s.epochTimer.Reset() } -func (s *Server) setHealthStatus(hs control.HealthStatus) { +func (s *Server) setHealthStatus(ctx context.Context, hs control.HealthStatus) { s.healthStatus.Store(int32(hs)) - s.notifySystemd(hs) + s.notifySystemd(ctx, hs) if s.irMetrics != nil { s.irMetrics.SetHealth(int32(hs)) } } -func (s *Server) CompareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) { +func (s *Server) CompareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) { if swapped = s.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped { - s.notifySystemd(newSt) + s.notifySystemd(ctx, newSt) if s.irMetrics != nil { s.irMetrics.SetHealth(int32(newSt)) } @@ -187,7 +187,7 @@ func initPersistentStateStorage(cfg *viper.Viper) (*state.PersistentStorage, err return persistStorage, nil } -func (s *Server) notifySystemd(st control.HealthStatus) { +func (s *Server) notifySystemd(ctx context.Context, st control.HealthStatus) { if !s.sdNotify { return } @@ -203,6 +203,6 @@ func (s *Server) notifySystemd(st control.HealthStatus) { err = sdnotify.Status(fmt.Sprintf("%v", st)) } if err != nil { - s.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err)) + s.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) } } diff --git a/pkg/innerring/state_test.go b/pkg/innerring/state_test.go index fe09f8f2d..9313edf78 100644 --- a/pkg/innerring/state_test.go +++ b/pkg/innerring/state_test.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "testing" "time" @@ -42,7 +43,7 @@ func TestServerState(t *testing.T) { require.Equal(t, epochDuration, srv.EpochDuration(), "invalid epoch duration") var healthStatus control.HealthStatus = control.HealthStatus_READY - srv.setHealthStatus(healthStatus) + srv.setHealthStatus(context.Background(), healthStatus) require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status") require.True(t, srv.IsActive(), "invalid IsActive result") From 16598553d9603cdf7d5b349db250041a80f9ec14 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 21 Oct 2024 11:56:38 +0300 Subject: [PATCH 175/591] [#1437] shard: Fix contextcheck linter Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/blobstor/mode.go | 4 +-- pkg/local_object_storage/engine/control.go | 20 +++++------ pkg/local_object_storage/engine/engine.go | 10 +++--- pkg/local_object_storage/engine/error_test.go | 4 +-- .../engine/evacuate_test.go | 36 +++++++++---------- pkg/local_object_storage/engine/shards.go | 36 +++++++++---------- .../engine/shards_test.go | 10 +++--- .../internal/storagetest/storage.go | 12 +++---- pkg/local_object_storage/metabase/control.go | 14 ++++---- pkg/local_object_storage/metabase/mode.go | 5 +-- pkg/local_object_storage/metabase/shard_id.go | 9 ++--- .../metabase/version_test.go | 2 +- pkg/local_object_storage/pilorama/boltdb.go | 2 +- pkg/local_object_storage/pilorama/forest.go | 2 +- .../pilorama/interface.go | 2 +- pkg/local_object_storage/shard/control.go | 34 +++++++++--------- .../shard/control_test.go | 16 ++++----- pkg/local_object_storage/shard/delete_test.go | 2 +- .../shard/gc_internal_test.go | 2 +- pkg/local_object_storage/shard/gc_test.go | 8 ++--- pkg/local_object_storage/shard/get_test.go | 2 +- pkg/local_object_storage/shard/head_test.go | 2 +- pkg/local_object_storage/shard/id.go | 7 ++-- pkg/local_object_storage/shard/inhume_test.go | 2 +- pkg/local_object_storage/shard/list_test.go | 4 +-- pkg/local_object_storage/shard/lock_test.go | 4 +-- .../shard/metrics_test.go | 6 ++-- pkg/local_object_storage/shard/mode.go | 16 +++++---- pkg/local_object_storage/shard/range_test.go | 2 +- pkg/local_object_storage/shard/rebuild.go | 4 +-- pkg/local_object_storage/shard/refill_test.go | 6 ++-- pkg/local_object_storage/shard/reload_test.go | 2 +- .../shard/shutdown_test.go | 4 +-- .../writecache/flush_test.go | 20 +++++------ pkg/local_object_storage/writecache/mode.go | 4 +-- .../writecache/writecache.go | 2 +- pkg/services/control/server/detach_shards.go | 4 +-- pkg/services/control/server/set_shard_mode.go | 4 +-- 38 files changed, 165 insertions(+), 160 deletions(-) diff --git a/pkg/local_object_storage/blobstor/mode.go b/pkg/local_object_storage/blobstor/mode.go index a579a6f92..f081ff645 100644 --- a/pkg/local_object_storage/blobstor/mode.go +++ b/pkg/local_object_storage/blobstor/mode.go @@ -8,7 +8,7 @@ import ( ) // SetMode sets the blobstor mode of operation. -func (b *BlobStor) SetMode(m mode.Mode) error { +func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error { b.modeMtx.Lock() defer b.modeMtx.Unlock() @@ -22,7 +22,7 @@ func (b *BlobStor) SetMode(m mode.Mode) error { err := b.Close() if err == nil { - if err = b.openBlobStor(context.TODO(), m); err == nil { + if err = b.openBlobStor(ctx, m); err == nil { err = b.Init() } } diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index 98ec73ae9..a5c53dcad 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -56,7 +56,7 @@ func (e *StorageEngine) open(ctx context.Context) error { sh := e.shards[res.id] delete(e.shards, res.id) - err := sh.Close() + err := sh.Close(ctx) if err != nil { e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard, zap.String("id", res.id), @@ -108,7 +108,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { sh := e.shards[res.id] delete(e.shards, res.id) - err := sh.Close() + err := sh.Close(ctx) if err != nil { e.log.Error(ctx, logs.EngineCouldNotClosePartiallyInitializedShard, zap.String("id", res.id), @@ -126,7 +126,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { } e.wg.Add(1) - go e.setModeLoop() + go e.setModeLoop(ctx) return nil } @@ -153,7 +153,7 @@ func (e *StorageEngine) Close(ctx context.Context) error { } // closes all shards. Never returns an error, shard errors are logged. -func (e *StorageEngine) close(releasePools bool) error { +func (e *StorageEngine) close(ctx context.Context, releasePools bool) error { e.mtx.RLock() defer e.mtx.RUnlock() @@ -164,8 +164,8 @@ func (e *StorageEngine) close(releasePools bool) error { } for id, sh := range e.shards { - if err := sh.Close(); err != nil { - e.log.Debug(context.Background(), logs.EngineCouldNotCloseShard, + if err := sh.Close(ctx); err != nil { + e.log.Debug(ctx, logs.EngineCouldNotCloseShard, zap.String("id", id), zap.String("error", err.Error()), ) @@ -213,7 +213,7 @@ func (e *StorageEngine) setBlockExecErr(ctx context.Context, err error) error { return e.open(ctx) } } else if prevErr == nil { // ok -> block - return e.close(errors.Is(err, errClosed)) + return e.close(ctx, errors.Is(err, errClosed)) } // otherwise do nothing @@ -306,7 +306,7 @@ loop: e.mtx.RUnlock() - e.removeShards(shardsToRemove...) + e.removeShards(ctx, shardsToRemove...) for _, p := range shardsToReload { err := p.sh.Reload(ctx, p.opts...) @@ -330,13 +330,13 @@ loop: err = sh.Init(ctx) } if err != nil { - _ = sh.Close() + _ = sh.Close(ctx) return fmt.Errorf("could not init %s shard: %w", idStr, err) } err = e.addShard(sh) if err != nil { - _ = sh.Close() + _ = sh.Close(ctx) return fmt.Errorf("could not add %s shard: %w", idStr, err) } diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index a8caa215a..6e30ee9de 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -55,7 +55,7 @@ type setModeRequest struct { // setModeLoop listens setModeCh to perform degraded mode transition of a single shard. // Instead of creating a worker per single shard we use a single goroutine. -func (e *StorageEngine) setModeLoop() { +func (e *StorageEngine) setModeLoop(ctx context.Context) { defer e.wg.Done() var ( @@ -75,7 +75,7 @@ func (e *StorageEngine) setModeLoop() { if !ok { inProgress[sid] = struct{}{} go func() { - e.moveToDegraded(r.sh, r.errorCount, r.isMeta) + e.moveToDegraded(ctx, r.sh, r.errorCount, r.isMeta) mtx.Lock() delete(inProgress, sid) @@ -87,7 +87,7 @@ func (e *StorageEngine) setModeLoop() { } } -func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta bool) { +func (e *StorageEngine) moveToDegraded(ctx context.Context, sh *shard.Shard, errCount uint32, isMeta bool) { sid := sh.ID() log := e.log.With( zap.Stringer("shard_id", sid), @@ -97,7 +97,7 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta defer e.mtx.RUnlock() if isMeta { - err := sh.SetMode(mode.DegradedReadOnly) + err := sh.SetMode(ctx, mode.DegradedReadOnly) if err == nil { log.Info(context.Background(), logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold) return @@ -106,7 +106,7 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta zap.Error(err)) } - err := sh.SetMode(mode.ReadOnly) + err := sh.SetMode(ctx, mode.ReadOnly) if err != nil { log.Error(context.Background(), logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err)) return diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go index 1619003a1..57c423764 100644 --- a/pkg/local_object_storage/engine/error_test.go +++ b/pkg/local_object_storage/engine/error_test.go @@ -158,10 +158,10 @@ func TestErrorReporting(t *testing.T) { checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite) } - require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, false)) + require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, false)) checkShardState(t, te.ng, te.shards[0].id, errThreshold+1, mode.ReadWrite) - require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, true)) + require.NoError(t, te.ng.SetShardMode(context.Background(), te.shards[0].id, mode.ReadWrite, true)) checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite) require.NoError(t, te.ng.Close(context.Background())) }) diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index 8498c9245..54eacc3f2 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -146,7 +146,7 @@ func TestEvacuateShardObjects(t *testing.T) { require.Equal(t, uint64(0), res.ObjectsEvacuated()) }) - require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly)) res, err := e.Evacuate(context.Background(), prm) require.NoError(t, err) @@ -237,7 +237,7 @@ func TestEvacuateObjectsNetwork(t *testing.T) { evacuateShardID := ids[0].String() - require.NoError(t, e.shards[evacuateShardID].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly)) var prm EvacuateShardPrm prm.ShardID = ids[0:1] @@ -260,8 +260,8 @@ func TestEvacuateObjectsNetwork(t *testing.T) { require.NoError(t, e.Close(context.Background())) }() - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) var prm EvacuateShardPrm prm.ShardID = ids[1:2] @@ -298,7 +298,7 @@ func TestEvacuateObjectsNetwork(t *testing.T) { } for i := range ids { - require.NoError(t, e.shards[ids[i].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[i].String()].SetMode(context.Background(), mode.ReadOnly)) } var prm EvacuateShardPrm @@ -327,8 +327,8 @@ func TestEvacuateCancellation(t *testing.T) { require.NoError(t, e.Close(context.Background())) }() - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) var prm EvacuateShardPrm prm.ShardID = ids[1:2] @@ -357,8 +357,8 @@ func TestEvacuateCancellationByError(t *testing.T) { require.NoError(t, e.Close(context.Background())) }() - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) var prm EvacuateShardPrm prm.ShardID = ids[1:2] @@ -386,8 +386,8 @@ func TestEvacuateSingleProcess(t *testing.T) { require.NoError(t, e.Close(context.Background())) }() - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) blocker := make(chan interface{}) running := make(chan interface{}) @@ -429,8 +429,8 @@ func TestEvacuateObjectsAsync(t *testing.T) { require.NoError(t, e.Close(context.Background())) }() - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) blocker := make(chan interface{}) running := make(chan interface{}) @@ -515,7 +515,7 @@ func TestEvacuateTreesLocal(t *testing.T) { require.NoError(t, e.Close(context.Background())) }() - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) var prm EvacuateShardPrm prm.ShardID = ids[0:1] @@ -594,8 +594,8 @@ func TestEvacuateTreesRemote(t *testing.T) { require.NoError(t, e.Close(context.Background())) }() - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) - require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) mutex := sync.Mutex{} evacuatedTreeOps := make(map[string][]*pilorama.Move) @@ -753,7 +753,7 @@ func TestEvacuateShardObjectsRepOneOnly(t *testing.T) { prm.Scope = EvacuateScopeObjects prm.RepOneOnly = true - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) res, err := e.Evacuate(context.Background(), prm) require.NoError(t, err) @@ -810,7 +810,7 @@ func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) { prm.RepOneOnly = true prm.ContainerWorkerCount = 10 - require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly)) + require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) start := time.Now() _, err := e.Evacuate(context.Background(), prm) diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index e172706e3..2b94103e9 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -121,7 +121,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh return sh.ID(), nil } -func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*shard.Shard, error) { +func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*shard.Shard, error) { id, err := generateShardID() if err != nil { return nil, fmt.Errorf("could not generate shard ID: %w", err) @@ -139,8 +139,8 @@ func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*sh shard.WithZeroCountCallback(e.processZeroCountContainers), )...) - if err := sh.UpdateID(); err != nil { - e.log.Warn(context.Background(), logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err)) + if err := sh.UpdateID(ctx); err != nil { + e.log.Warn(ctx, logs.FailedToUpdateShardID, zap.Stringer("shard_id", sh.ID()), zap.String("metabase_path", sh.DumpInfo().MetaBaseInfo.Path), zap.Error(err)) } return sh, nil @@ -203,7 +203,7 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error { // removeShards removes specified shards. Skips non-existent shards. // Logs errors about shards that it could not Close after the removal. -func (e *StorageEngine) removeShards(ids ...string) { +func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) { if len(ids) == 0 { return } @@ -228,22 +228,22 @@ func (e *StorageEngine) removeShards(ids ...string) { delete(e.shardPools, id) } - e.log.Info(context.Background(), logs.EngineShardHasBeenRemoved, + e.log.Info(ctx, logs.EngineShardHasBeenRemoved, zap.String("id", id)) } e.mtx.Unlock() for _, sh := range ss { - err := sh.SetMode(mode.Disabled) + err := sh.SetMode(ctx, mode.Disabled) if err != nil { - e.log.Error(context.Background(), logs.EngineCouldNotChangeShardModeToDisabled, + e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled, zap.Stringer("id", sh.ID()), zap.Error(err), ) } - err = sh.Close() + err = sh.Close(ctx) if err != nil { - e.log.Error(context.Background(), logs.EngineCouldNotCloseRemovedShard, + e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard, zap.Stringer("id", sh.ID()), zap.Error(err), ) @@ -310,7 +310,7 @@ func (e *StorageEngine) iterateOverUnsortedShards(handler func(hashedShard) (sto // SetShardMode sets mode of the shard with provided identifier. // // Returns an error if shard mode was not set, or shard was not found in storage engine. -func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounter bool) error { +func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.Mode, resetErrorCounter bool) error { e.mtx.RLock() defer e.mtx.RUnlock() @@ -320,7 +320,7 @@ func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounte sh.errorCount.Store(0) e.metrics.ClearErrorCounter(shID) } - return sh.SetMode(m) + return sh.SetMode(ctx, m) } } @@ -346,7 +346,7 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { } } -func (e *StorageEngine) DetachShards(ids []*shard.ID) error { +func (e *StorageEngine) DetachShards(ctx context.Context, ids []*shard.ID) error { if len(ids) == 0 { return logicerr.New("ids must be non-empty") } @@ -356,20 +356,20 @@ func (e *StorageEngine) DetachShards(ids []*shard.ID) error { return err } - return e.closeShards(deletedShards) + return e.closeShards(ctx, deletedShards) } // closeShards closes deleted shards. Tries to close all shards. // Returns single error with joined shard errors. -func (e *StorageEngine) closeShards(deletedShards []hashedShard) error { +func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedShard) error { var multiErr error var multiErrGuard sync.Mutex var eg errgroup.Group for _, sh := range deletedShards { eg.Go(func() error { - err := sh.SetMode(mode.Disabled) + err := sh.SetMode(ctx, mode.Disabled) if err != nil { - e.log.Error(context.Background(), logs.EngineCouldNotChangeShardModeToDisabled, + e.log.Error(ctx, logs.EngineCouldNotChangeShardModeToDisabled, zap.Stringer("id", sh.ID()), zap.Error(err), ) @@ -378,9 +378,9 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error { multiErrGuard.Unlock() } - err = sh.Close() + err = sh.Close(ctx) if err != nil { - e.log.Error(context.Background(), logs.EngineCouldNotCloseRemovedShard, + e.log.Error(ctx, logs.EngineCouldNotCloseRemovedShard, zap.Stringer("id", sh.ID()), zap.Error(err), ) diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go index 3347d58f1..207491bd4 100644 --- a/pkg/local_object_storage/engine/shards_test.go +++ b/pkg/local_object_storage/engine/shards_test.go @@ -33,7 +33,7 @@ func TestRemoveShard(t *testing.T) { for id, remove := range mSh { if remove { - e.removeShards(id) + e.removeShards(context.Background(), id) } } @@ -55,11 +55,11 @@ func TestDisableShards(t *testing.T) { e, ids := te.engine, te.shardIDs defer func() { require.NoError(t, e.Close(context.Background())) }() - require.ErrorAs(t, e.DetachShards(ids), new(logicerr.Logical)) - require.ErrorAs(t, e.DetachShards(nil), new(logicerr.Logical)) - require.ErrorAs(t, e.DetachShards([]*shard.ID{}), new(logicerr.Logical)) + require.ErrorAs(t, e.DetachShards(context.Background(), ids), new(logicerr.Logical)) + require.ErrorAs(t, e.DetachShards(context.Background(), nil), new(logicerr.Logical)) + require.ErrorAs(t, e.DetachShards(context.Background(), []*shard.ID{}), new(logicerr.Logical)) - require.NoError(t, e.DetachShards([]*shard.ID{ids[0]})) + require.NoError(t, e.DetachShards(context.Background(), []*shard.ID{ids[0]})) require.Equal(t, 1, len(e.shards)) } diff --git a/pkg/local_object_storage/internal/storagetest/storage.go b/pkg/local_object_storage/internal/storagetest/storage.go index 586b3dcc6..d8ac106dd 100644 --- a/pkg/local_object_storage/internal/storagetest/storage.go +++ b/pkg/local_object_storage/internal/storagetest/storage.go @@ -11,7 +11,7 @@ import ( // Component represents single storage component. type Component interface { Open(context.Context, mode.Mode) error - SetMode(mode.Mode) error + SetMode(context.Context, mode.Mode) error Init() error Close() error } @@ -91,12 +91,12 @@ func TestSetMode(t *testing.T, cons Constructor, m mode.Mode) { // call `SetMode` on all not-yet-initialized components. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.SetMode(m)) + require.NoError(t, s.SetMode(context.Background(), m)) t.Run("after open in RO", func(t *testing.T) { require.NoError(t, s.Close()) require.NoError(t, s.Open(context.Background(), mode.ReadOnly)) - require.NoError(t, s.SetMode(m)) + require.NoError(t, s.SetMode(context.Background(), m)) }) require.NoError(t, s.Close()) @@ -106,7 +106,7 @@ func TestSetMode(t *testing.T, cons Constructor, m mode.Mode) { // Use-case: notmal node operation. require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) require.NoError(t, s.Init()) - require.NoError(t, s.SetMode(m)) + require.NoError(t, s.SetMode(context.Background(), m)) require.NoError(t, s.Close()) }) } @@ -116,7 +116,7 @@ func TestModeTransition(t *testing.T, cons Constructor, from, to mode.Mode) { s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) require.NoError(t, s.Init()) - require.NoError(t, s.SetMode(from)) - require.NoError(t, s.SetMode(to)) + require.NoError(t, s.SetMode(context.Background(), from)) + require.NoError(t, s.SetMode(context.Background(), to)) require.NoError(t, s.Close()) } diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go index 68e065a0a..54bea4204 100644 --- a/pkg/local_object_storage/metabase/control.go +++ b/pkg/local_object_storage/metabase/control.go @@ -39,7 +39,7 @@ var ( ) // Open boltDB instance for metabase. -func (db *DB) Open(_ context.Context, m mode.Mode) error { +func (db *DB) Open(ctx context.Context, m mode.Mode) error { db.modeMtx.Lock() defer db.modeMtx.Unlock() db.mode = m @@ -48,10 +48,10 @@ func (db *DB) Open(_ context.Context, m mode.Mode) error { if m.NoMetabase() { return nil } - return db.openDB(m) + return db.openDB(ctx, m) } -func (db *DB) openDB(mode mode.Mode) error { +func (db *DB) openDB(ctx context.Context, mode mode.Mode) error { err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission) if err != nil { return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err) @@ -65,10 +65,10 @@ func (db *DB) openDB(mode mode.Mode) error { } db.boltOptions.ReadOnly = mode.ReadOnly() - return metaerr.Wrap(db.openBolt()) + return metaerr.Wrap(db.openBolt(ctx)) } -func (db *DB) openBolt() error { +func (db *DB) openBolt(ctx context.Context) error { var err error db.boltDB, err = bbolt.Open(db.info.Path, db.info.Permission, db.boltOptions) @@ -226,7 +226,7 @@ func (db *DB) close() error { // If there was a problem with applying new configuration, an error is returned. // // If a metabase was couldn't be reopened because of an error, ErrDegradedMode is returned. -func (db *DB) Reload(opts ...Option) (bool, error) { +func (db *DB) Reload(ctx context.Context, opts ...Option) (bool, error) { var c cfg for i := range opts { opts[i](&c) @@ -243,7 +243,7 @@ func (db *DB) Reload(opts ...Option) (bool, error) { db.mode = mode.Disabled db.metrics.SetMode(mode.ComponentDisabled) db.info.Path = c.info.Path - if err := db.openBolt(); err != nil { + if err := db.openBolt(ctx); err != nil { return false, metaerr.Wrap(fmt.Errorf("%w: %v", ErrDegradedMode, err)) } diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go index 2032ed6b2..f99262be4 100644 --- a/pkg/local_object_storage/metabase/mode.go +++ b/pkg/local_object_storage/metabase/mode.go @@ -1,6 +1,7 @@ package meta import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -8,7 +9,7 @@ import ( // SetMode sets the metabase mode of operation. // If the mode assumes no operation metabase, the database is closed. -func (db *DB) SetMode(m mode.Mode) error { +func (db *DB) SetMode(ctx context.Context, m mode.Mode) error { db.modeMtx.Lock() defer db.modeMtx.Unlock() @@ -25,7 +26,7 @@ func (db *DB) SetMode(m mode.Mode) error { if m.NoMetabase() { db.boltDB = nil } else { - err := db.openDB(m) + err := db.openDB(ctx, m) if err == nil && !m.ReadOnly() { err = db.Init() } diff --git a/pkg/local_object_storage/metabase/shard_id.go b/pkg/local_object_storage/metabase/shard_id.go index 88446494e..e58115bc8 100644 --- a/pkg/local_object_storage/metabase/shard_id.go +++ b/pkg/local_object_storage/metabase/shard_id.go @@ -2,6 +2,7 @@ package meta import ( "bytes" + "context" "errors" "fmt" "os" @@ -21,7 +22,7 @@ var ( // If id is missing, returns nil, nil. // // GetShardID does not report any metrics. -func (db *DB) GetShardID(mode metamode.Mode) ([]byte, error) { +func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error) { db.modeMtx.Lock() defer db.modeMtx.Unlock() db.mode = mode @@ -30,7 +31,7 @@ func (db *DB) GetShardID(mode metamode.Mode) ([]byte, error) { return nil, nil } - if err := db.openDB(mode); err != nil { + if err := db.openDB(ctx, mode); err != nil { return nil, fmt.Errorf("failed to open metabase: %w", err) } @@ -59,7 +60,7 @@ func (db *DB) readShardID() ([]byte, error) { // SetShardID sets metabase operation mode // and writes shard id to db. -func (db *DB) SetShardID(id []byte, mode metamode.Mode) error { +func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) error { db.modeMtx.Lock() defer db.modeMtx.Unlock() db.mode = mode @@ -68,7 +69,7 @@ func (db *DB) SetShardID(id []byte, mode metamode.Mode) error { return ErrReadOnlyMode } - if err := db.openDB(mode); err != nil { + if err := db.openDB(ctx, mode); err != nil { return fmt.Errorf("failed to open metabase: %w", err) } diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go index 75229a1b4..509e72479 100644 --- a/pkg/local_object_storage/metabase/version_test.go +++ b/pkg/local_object_storage/metabase/version_test.go @@ -58,7 +58,7 @@ func TestVersion(t *testing.T) { }) t.Run("old data", func(t *testing.T) { db := newDB(t) - require.NoError(t, db.SetShardID([]byte{1, 2, 3, 4}, mode.ReadWrite)) + require.NoError(t, db.SetShardID(context.Background(), []byte{1, 2, 3, 4}, mode.ReadWrite)) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) require.NoError(t, db.Init()) diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index a778434dd..9ffcf1e83 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -91,7 +91,7 @@ func NewBoltForest(opts ...Option) ForestStorage { return &b } -func (t *boltForest) SetMode(m mode.Mode) error { +func (t *boltForest) SetMode(_ context.Context, m mode.Mode) error { t.modeMtx.Lock() defer t.modeMtx.Unlock() diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index 374943745..76da1c0c2 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -128,7 +128,7 @@ func (f *memoryForest) Open(context.Context, mode.Mode) error { return nil } -func (f *memoryForest) SetMode(mode.Mode) error { +func (f *memoryForest) SetMode(context.Context, mode.Mode) error { return nil } diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go index b6ca246f2..9717b2401 100644 --- a/pkg/local_object_storage/pilorama/interface.go +++ b/pkg/local_object_storage/pilorama/interface.go @@ -65,7 +65,7 @@ type ForestStorage interface { Init() error Open(context.Context, mode.Mode) error Close() error - SetMode(m mode.Mode) error + SetMode(context.Context, mode.Mode) error SetParentID(id string) Forest diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index eb3aa61c0..056737a9d 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -20,23 +20,23 @@ import ( "golang.org/x/sync/errgroup" ) -func (s *Shard) handleMetabaseFailure(stage string, err error) error { - s.log.Error(context.Background(), logs.ShardMetabaseFailureSwitchingMode, +func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err error) error { + s.log.Error(ctx, logs.ShardMetabaseFailureSwitchingMode, zap.String("stage", stage), zap.Stringer("mode", mode.ReadOnly), zap.Error(err)) - err = s.SetMode(mode.ReadOnly) + err = s.SetMode(ctx, mode.ReadOnly) if err == nil { return nil } - s.log.Error(context.Background(), logs.ShardCantMoveShardToReadonlySwitchMode, + s.log.Error(ctx, logs.ShardCantMoveShardToReadonlySwitchMode, zap.String("stage", stage), zap.Stringer("mode", mode.DegradedReadOnly), zap.Error(err)) - err = s.SetMode(mode.DegradedReadOnly) + err = s.SetMode(ctx, mode.DegradedReadOnly) if err != nil { return fmt.Errorf("could not switch to mode %s", mode.Mode(mode.DegradedReadOnly)) } @@ -75,7 +75,7 @@ func (s *Shard) Open(ctx context.Context) error { return fmt.Errorf("could not open %T: %w", components[j], err) } } - err = s.handleMetabaseFailure("open", err) + err = s.handleMetabaseFailure(ctx, "open", err) if err != nil { return err } @@ -101,7 +101,7 @@ func (x *metabaseSynchronizer) Init() error { // Init initializes all Shard's components. func (s *Shard) Init(ctx context.Context) error { m := s.GetMode() - if err := s.initializeComponents(m); err != nil { + if err := s.initializeComponents(ctx, m); err != nil { return err } @@ -138,7 +138,7 @@ func (s *Shard) Init(ctx context.Context) error { return nil } -func (s *Shard) initializeComponents(m mode.Mode) error { +func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error { type initializer interface { Init() error } @@ -176,7 +176,7 @@ func (s *Shard) initializeComponents(m mode.Mode) error { return fmt.Errorf("metabase initialization: %w", err) } - err = s.handleMetabaseFailure("init", err) + err = s.handleMetabaseFailure(ctx, "init", err) if err != nil { return err } @@ -364,9 +364,9 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object } // Close releases all Shard's components. -func (s *Shard) Close() error { +func (s *Shard) Close(ctx context.Context) error { if s.rb != nil { - s.rb.Stop(s.log) + s.rb.Stop(ctx, s.log) } var components []interface{ Close() error } @@ -386,7 +386,7 @@ func (s *Shard) Close() error { for _, component := range components { if err := component.Close(); err != nil { lastErr = err - s.log.Error(context.Background(), logs.ShardCouldNotCloseShardComponent, zap.Error(err)) + s.log.Error(ctx, logs.ShardCouldNotCloseShardComponent, zap.Error(err)) } } @@ -414,18 +414,18 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error { unlock := s.lockExclusive() defer unlock() - s.rb.Stop(s.log) + s.rb.Stop(ctx, s.log) if !s.info.Mode.NoMetabase() { defer func() { s.rb.Start(ctx, s.blobStor, s.metaBase, s.log) }() } - ok, err := s.metaBase.Reload(c.metaOpts...) + ok, err := s.metaBase.Reload(ctx, c.metaOpts...) if err != nil { if errors.Is(err, meta.ErrDegradedMode) { s.log.Error(ctx, logs.ShardCantOpenMetabaseMoveToADegradedMode, zap.Error(err)) - _ = s.setMode(mode.DegradedReadOnly) + _ = s.setMode(ctx, mode.DegradedReadOnly) } return err } @@ -441,11 +441,11 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error { } if err != nil { s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err)) - _ = s.setMode(mode.DegradedReadOnly) + _ = s.setMode(ctx, mode.DegradedReadOnly) return err } } - return s.setMode(c.info.Mode) + return s.setMode(ctx, c.info.Mode) } func (s *Shard) lockExclusive() func() { diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go index b8f1d4417..6d2cd7137 100644 --- a/pkg/local_object_storage/shard/control_test.go +++ b/pkg/local_object_storage/shard/control_test.go @@ -86,7 +86,7 @@ func TestShardOpen(t *testing.T) { require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Init(context.Background())) require.Equal(t, mode.ReadWrite, sh.GetMode()) - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) // Metabase can be opened in read-only => start in ReadOnly mode. allowedMode.Store(int64(os.O_RDONLY)) @@ -95,9 +95,9 @@ func TestShardOpen(t *testing.T) { require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Init(context.Background())) require.Equal(t, mode.ReadOnly, sh.GetMode()) - require.Error(t, sh.SetMode(mode.ReadWrite)) + require.Error(t, sh.SetMode(context.Background(), mode.ReadWrite)) require.Equal(t, mode.ReadOnly, sh.GetMode()) - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) // Metabase is corrupted => start in DegradedReadOnly mode. allowedMode.Store(math.MaxInt64) @@ -106,7 +106,7 @@ func TestShardOpen(t *testing.T) { require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Init(context.Background())) require.Equal(t, mode.DegradedReadOnly, sh.GetMode()) - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) } func TestRefillMetabaseCorrupted(t *testing.T) { @@ -146,7 +146,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) { putPrm.SetObject(obj) _, err := sh.Put(context.Background(), putPrm) require.NoError(t, err) - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) addr := object.AddressOf(obj) // This is copied from `fstree.treePath()` to avoid exporting function just for tests. @@ -170,7 +170,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) { getPrm.SetAddress(addr) _, err = sh.Get(context.Background(), getPrm) require.True(t, client.IsErrObjectNotFound(err)) - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) } func TestRefillMetabase(t *testing.T) { @@ -358,7 +358,7 @@ func TestRefillMetabase(t *testing.T) { phyBefore := c.Phy logicalBefore := c.Logic - err = sh.Close() + err = sh.Close(context.Background()) require.NoError(t, err) sh = New( @@ -379,7 +379,7 @@ func TestRefillMetabase(t *testing.T) { // initialize Blobstor require.NoError(t, sh.Init(context.Background())) - defer sh.Close() + defer sh.Close(context.Background()) checkAllObjs(false) checkObj(object.AddressOf(tombObj), nil) diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go index 574250a93..c9ce93bc5 100644 --- a/pkg/local_object_storage/shard/delete_test.go +++ b/pkg/local_object_storage/shard/delete_test.go @@ -37,7 +37,7 @@ func TestShard_Delete_BigObject(t *testing.T) { func testShard(t *testing.T, hasWriteCache bool, payloadSize int) { sh := newShard(t, hasWriteCache) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() cnr := cidtest.ID() diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go index 11db5e54e..39073a529 100644 --- a/pkg/local_object_storage/shard/gc_internal_test.go +++ b/pkg/local_object_storage/shard/gc_internal_test.go @@ -79,7 +79,7 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { sh = New(opts...) require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Init(context.Background())) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() cnr := cidtest.ID() obj := testutil.GenerateObjectWithCID(cnr) diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go index 2b97111e7..e3670b441 100644 --- a/pkg/local_object_storage/shard/gc_test.go +++ b/pkg/local_object_storage/shard/gc_test.go @@ -34,7 +34,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) { return util.NewPseudoWorkerPool() // synchronous event processing })}, }) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() cnr := cidtest.ID() @@ -131,7 +131,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) { return util.NewPseudoWorkerPool() // synchronous event processing })}, }) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() lock := testutil.GenerateObjectWithCID(cnr) lock.SetType(objectSDK.TypeLock) @@ -190,7 +190,7 @@ func testGCDropsObjectInhumedFromWritecache(t *testing.T, flushbeforeInhume bool additionalShardOptions: []Option{WithDisabledGC()}, wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()}, }) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() obj := testutil.GenerateObjectWithSize(1024) @@ -254,7 +254,7 @@ func TestGCDontDeleteObjectFromWritecache(t *testing.T) { additionalShardOptions: []Option{WithDisabledGC()}, wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()}, }) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() obj := testutil.GenerateObjectWithSize(1024) diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go index d0eecf74e..837991b73 100644 --- a/pkg/local_object_storage/shard/get_test.go +++ b/pkg/local_object_storage/shard/get_test.go @@ -30,7 +30,7 @@ func TestShard_Get(t *testing.T) { func testShardGet(t *testing.T, hasWriteCache bool) { sh := newShard(t, hasWriteCache) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() var putPrm PutPrm var getPrm GetPrm diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go index c65bbb1e3..deb3019df 100644 --- a/pkg/local_object_storage/shard/head_test.go +++ b/pkg/local_object_storage/shard/head_test.go @@ -28,7 +28,7 @@ func TestShard_Head(t *testing.T) { func testShardHead(t *testing.T, hasWriteCache bool) { sh := newShard(t, hasWriteCache) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() var putPrm PutPrm var headPrm HeadPrm diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go index e27dc0733..6ccae3f53 100644 --- a/pkg/local_object_storage/shard/id.go +++ b/pkg/local_object_storage/shard/id.go @@ -1,6 +1,7 @@ package shard import ( + "context" "errors" "fmt" @@ -30,11 +31,11 @@ func (s *Shard) ID() *ID { } // UpdateID reads shard ID saved in the metabase and updates it if it is missing. -func (s *Shard) UpdateID() (err error) { +func (s *Shard) UpdateID(ctx context.Context) (err error) { var idFromMetabase []byte modeDegraded := s.GetMode().NoMetabase() if !modeDegraded { - if idFromMetabase, err = s.metaBase.GetShardID(mode.ReadOnly); err != nil { + if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil { err = fmt.Errorf("failed to read shard id from metabase: %w", err) } } @@ -62,7 +63,7 @@ func (s *Shard) UpdateID() (err error) { } if len(idFromMetabase) == 0 && !modeDegraded { - if setErr := s.metaBase.SetShardID(*s.info.ID, s.GetMode()); setErr != nil { + if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil { err = errors.Join(err, fmt.Errorf("failed to write shard id to metabase: %w", setErr)) } } diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go index 1353d5d94..1421f0e18 100644 --- a/pkg/local_object_storage/shard/inhume_test.go +++ b/pkg/local_object_storage/shard/inhume_test.go @@ -27,7 +27,7 @@ func TestShard_Inhume(t *testing.T) { func testShardInhume(t *testing.T, hasWriteCache bool) { sh := newShard(t, hasWriteCache) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() cnr := cidtest.ID() diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go index 3414dc76a..139b2e316 100644 --- a/pkg/local_object_storage/shard/list_test.go +++ b/pkg/local_object_storage/shard/list_test.go @@ -18,14 +18,14 @@ func TestShard_List(t *testing.T) { t.Run("without write cache", func(t *testing.T) { t.Parallel() sh := newShard(t, false) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() testShardList(t, sh) }) t.Run("with write cache", func(t *testing.T) { t.Parallel() shWC := newShard(t, true) - defer func() { require.NoError(t, shWC.Close()) }() + defer func() { require.NoError(t, shWC.Close(context.Background())) }() testShardList(t, shWC) }) } diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go index 595afb60e..7da8b8c28 100644 --- a/pkg/local_object_storage/shard/lock_test.go +++ b/pkg/local_object_storage/shard/lock_test.go @@ -62,7 +62,7 @@ func TestShard_Lock(t *testing.T) { require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Init(context.Background())) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() cnr := cidtest.ID() obj := testutil.GenerateObjectWithCID(cnr) @@ -148,7 +148,7 @@ func TestShard_Lock(t *testing.T) { func TestShard_IsLocked(t *testing.T) { sh := newShard(t, false) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() cnr := cidtest.ID() obj := testutil.GenerateObjectWithCID(cnr) diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go index cec5a12ad..5230dcad0 100644 --- a/pkg/local_object_storage/shard/metrics_test.go +++ b/pkg/local_object_storage/shard/metrics_test.go @@ -201,11 +201,11 @@ func TestCounters(t *testing.T) { dir := t.TempDir() sh, mm := shardWithMetrics(t, dir) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() - sh.SetMode(mode.ReadOnly) + sh.SetMode(context.Background(), mode.ReadOnly) require.Equal(t, mode.ReadOnly, mm.mode) - sh.SetMode(mode.ReadWrite) + sh.SetMode(context.Background(), mode.ReadWrite) require.Equal(t, mode.ReadWrite, mm.mode) const objNumber = 10 diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go index 98b4c37b2..901528976 100644 --- a/pkg/local_object_storage/shard/mode.go +++ b/pkg/local_object_storage/shard/mode.go @@ -20,19 +20,21 @@ var ErrDegradedMode = logicerr.New("shard is in degraded mode") // // Returns any error encountered that did not allow // setting shard mode. -func (s *Shard) SetMode(m mode.Mode) error { +func (s *Shard) SetMode(ctx context.Context, m mode.Mode) error { unlock := s.lockExclusive() defer unlock() - return s.setMode(m) + return s.setMode(ctx, m) } -func (s *Shard) setMode(m mode.Mode) error { - s.log.Info(context.Background(), logs.ShardSettingShardMode, +func (s *Shard) setMode(ctx context.Context, m mode.Mode) error { + s.log.Info(ctx, logs.ShardSettingShardMode, zap.Stringer("old_mode", s.info.Mode), zap.Stringer("new_mode", m)) - components := []interface{ SetMode(mode.Mode) error }{ + components := []interface { + SetMode(context.Context, mode.Mode) error + }{ s.metaBase, s.blobStor, } @@ -60,7 +62,7 @@ func (s *Shard) setMode(m mode.Mode) error { if !m.Disabled() { for i := range components { - if err := components[i].SetMode(m); err != nil { + if err := components[i].SetMode(ctx, m); err != nil { return err } } @@ -69,7 +71,7 @@ func (s *Shard) setMode(m mode.Mode) error { s.info.Mode = m s.metricsWriter.SetMode(s.info.Mode) - s.log.Info(context.Background(), logs.ShardShardModeSetSuccessfully, + s.log.Info(ctx, logs.ShardShardModeSetSuccessfully, zap.Stringer("mode", s.info.Mode)) return nil } diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go index cc73db316..146e834cc 100644 --- a/pkg/local_object_storage/shard/range_test.go +++ b/pkg/local_object_storage/shard/range_test.go @@ -94,7 +94,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) { }), }, }) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go index 124b72a5c..10eb51a28 100644 --- a/pkg/local_object_storage/shard/rebuild.go +++ b/pkg/local_object_storage/shard/rebuild.go @@ -125,7 +125,7 @@ func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLi } } -func (r *rebuilder) Stop(log *logger.Logger) { +func (r *rebuilder) Stop(ctx context.Context, log *logger.Logger) { r.mtx.Lock() defer r.mtx.Unlock() @@ -138,7 +138,7 @@ func (r *rebuilder) Stop(log *logger.Logger) { r.wg.Wait() r.cancel = nil r.done = nil - log.Info(context.Background(), logs.BlobstoreRebuildStopped) + log.Info(ctx, logs.BlobstoreRebuildStopped) } var errMBIsNotAvailable = errors.New("metabase is not available") diff --git a/pkg/local_object_storage/shard/refill_test.go b/pkg/local_object_storage/shard/refill_test.go index 0025bb45a..d90343265 100644 --- a/pkg/local_object_storage/shard/refill_test.go +++ b/pkg/local_object_storage/shard/refill_test.go @@ -34,7 +34,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) { additionalShardOptions: []Option{WithRefillMetabaseWorkersCount(shardconfig.RefillMetabaseWorkersCountDefault)}, }) - defer func() { require.NoError(b, sh.Close()) }() + defer func() { require.NoError(b, sh.Close(context.Background())) }() var putPrm PutPrm @@ -61,7 +61,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) { require.NoError(b, err) } - require.NoError(b, sh.Close()) + require.NoError(b, sh.Close(context.Background())) require.NoError(b, os.Remove(sh.metaBase.DumpInfo().Path)) require.NoError(b, sh.Open(context.Background())) @@ -72,5 +72,5 @@ func benchRefillMetabase(b *testing.B, objectsCount int) { require.NoError(b, sh.Init(context.Background())) - require.NoError(b, sh.Close()) + require.NoError(b, sh.Close(context.Background())) } diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go index 7dd7189bb..e563f390b 100644 --- a/pkg/local_object_storage/shard/reload_test.go +++ b/pkg/local_object_storage/shard/reload_test.go @@ -59,7 +59,7 @@ func TestShardReload(t *testing.T) { require.NoError(t, sh.Init(context.Background())) defer func() { - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) }() objects := make([]objAddr, 5) diff --git a/pkg/local_object_storage/shard/shutdown_test.go b/pkg/local_object_storage/shard/shutdown_test.go index de00eabd1..b1232707f 100644 --- a/pkg/local_object_storage/shard/shutdown_test.go +++ b/pkg/local_object_storage/shard/shutdown_test.go @@ -52,10 +52,10 @@ func TestWriteCacheObjectLoss(t *testing.T) { }) } require.NoError(t, errG.Wait()) - require.NoError(t, sh.Close()) + require.NoError(t, sh.Close(context.Background())) sh = newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts}) - defer func() { require.NoError(t, sh.Close()) }() + defer func() { require.NoError(t, sh.Close(context.Background())) }() var getPrm GetPrm diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go index 26f47e82e..92fb493e0 100644 --- a/pkg/local_object_storage/writecache/flush_test.go +++ b/pkg/local_object_storage/writecache/flush_test.go @@ -117,8 +117,8 @@ func runFlushTest[Option any]( defer func() { require.NoError(t, wc.Close()) }() objects := putObjects(t, wc) - require.NoError(t, bs.SetMode(mode.ReadWrite)) - require.NoError(t, mb.SetMode(mode.ReadWrite)) + require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite)) + require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite)) require.NoError(t, wc.Flush(context.Background(), false, false)) @@ -131,11 +131,11 @@ func runFlushTest[Option any]( objects := putObjects(t, wc) // Blobstor is read-only, so we expect en error from `flush` here. - require.Error(t, wc.SetMode(mode.Degraded)) + require.Error(t, wc.SetMode(context.Background(), mode.Degraded)) - require.NoError(t, bs.SetMode(mode.ReadWrite)) - require.NoError(t, mb.SetMode(mode.ReadWrite)) - require.NoError(t, wc.SetMode(mode.Degraded)) + require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite)) + require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite)) + require.NoError(t, wc.SetMode(context.Background(), mode.Degraded)) check(t, mb, bs, objects) }) @@ -149,8 +149,8 @@ func runFlushTest[Option any]( objects := putObjects(t, wc) f.InjectFn(t, wc) - require.NoError(t, bs.SetMode(mode.ReadWrite)) - require.NoError(t, mb.SetMode(mode.ReadWrite)) + require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite)) + require.NoError(t, mb.SetMode(context.Background(), mode.ReadWrite)) require.Equal(t, uint32(0), errCount.Load()) require.Error(t, wc.Flush(context.Background(), false, false)) @@ -191,8 +191,8 @@ func newCache[Option any]( require.NoError(t, wc.Init()) // First set mode for metabase and blobstor to prevent background flushes. - require.NoError(t, mb.SetMode(mode.ReadOnly)) - require.NoError(t, bs.SetMode(mode.ReadOnly)) + require.NoError(t, mb.SetMode(context.Background(), mode.ReadOnly)) + require.NoError(t, bs.SetMode(context.Background(), mode.ReadOnly)) return wc, bs, mb } diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go index 26658e9b8..db789d994 100644 --- a/pkg/local_object_storage/writecache/mode.go +++ b/pkg/local_object_storage/writecache/mode.go @@ -23,8 +23,8 @@ type setModePrm struct { // SetMode sets write-cache mode of operation. // When shard is put in read-only mode all objects in memory are flushed to disk // and all background jobs are suspended. -func (c *cache) SetMode(m mode.Mode) error { - ctx, span := tracing.StartSpanFromContext(context.TODO(), "writecache.SetMode", +func (c *cache) SetMode(ctx context.Context, m mode.Mode) error { + ctx, span := tracing.StartSpanFromContext(ctx, "writecache.SetMode", trace.WithAttributes( attribute.String("mode", m.String()), )) diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go index a973df604..d07220b68 100644 --- a/pkg/local_object_storage/writecache/writecache.go +++ b/pkg/local_object_storage/writecache/writecache.go @@ -38,7 +38,7 @@ type Cache interface { // Returns ErrReadOnly if the Cache is currently in the read-only mode. Delete(context.Context, oid.Address) error Put(context.Context, common.PutPrm) (common.PutRes, error) - SetMode(mode.Mode) error + SetMode(context.Context, mode.Mode) error SetLogger(*logger.Logger) DumpInfo() Info Flush(context.Context, bool, bool) error diff --git a/pkg/services/control/server/detach_shards.go b/pkg/services/control/server/detach_shards.go index a4111bddb..ffd36962b 100644 --- a/pkg/services/control/server/detach_shards.go +++ b/pkg/services/control/server/detach_shards.go @@ -11,7 +11,7 @@ import ( "google.golang.org/grpc/status" ) -func (s *Server) DetachShards(_ context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) { +func (s *Server) DetachShards(ctx context.Context, req *control.DetachShardsRequest) (*control.DetachShardsResponse, error) { err := s.isValidRequest(req) if err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) @@ -19,7 +19,7 @@ func (s *Server) DetachShards(_ context.Context, req *control.DetachShardsReques shardIDs := s.getShardIDList(req.GetBody().GetShard_ID()) - if err := s.s.DetachShards(shardIDs); err != nil { + if err := s.s.DetachShards(ctx, shardIDs); err != nil { if errors.As(err, new(logicerr.Logical)) { return nil, status.Error(codes.InvalidArgument, err.Error()) } diff --git a/pkg/services/control/server/set_shard_mode.go b/pkg/services/control/server/set_shard_mode.go index 52835c41d..4f8796263 100644 --- a/pkg/services/control/server/set_shard_mode.go +++ b/pkg/services/control/server/set_shard_mode.go @@ -11,7 +11,7 @@ import ( "google.golang.org/grpc/status" ) -func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) { +func (s *Server) SetShardMode(ctx context.Context, req *control.SetShardModeRequest) (*control.SetShardModeResponse, error) { // verify request err := s.isValidRequest(req) if err != nil { @@ -38,7 +38,7 @@ func (s *Server) SetShardMode(_ context.Context, req *control.SetShardModeReques } for _, shardID := range s.getShardIDList(req.GetBody().GetShard_ID()) { - err = s.s.SetShardMode(shardID, m, req.GetBody().GetResetErrorCounter()) + err = s.s.SetShardMode(ctx, shardID, m, req.GetBody().GetResetErrorCounter()) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } From 6921a890619cc23a2eb2d9e5b4e92cc1bc45ee41 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 21 Oct 2024 12:21:01 +0300 Subject: [PATCH 176/591] [#1437] ir: Fix contextcheck linters Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 2 +- cmd/frostfs-node/container.go | 10 +++--- cmd/frostfs-node/netmap.go | 10 +++--- cmd/frostfs-node/session.go | 2 +- cmd/frostfs-node/tree.go | 12 +++---- pkg/innerring/initialization.go | 12 +++---- pkg/innerring/innerring.go | 28 ++++++++-------- pkg/innerring/notary.go | 6 ++-- pkg/innerring/processors/balance/handlers.go | 6 ++-- .../processors/balance/handlers_test.go | 5 +-- .../processors/container/handlers.go | 12 +++---- .../processors/container/handlers_test.go | 5 +-- pkg/innerring/processors/frostfs/handlers.go | 24 +++++++------- .../processors/frostfs/handlers_test.go | 11 ++++--- .../processors/governance/handlers.go | 8 ++--- .../processors/governance/handlers_test.go | 7 ++-- .../processors/governance/process_update.go | 20 ++++++------ .../processors/governance/processor.go | 3 +- pkg/innerring/processors/netmap/handlers.go | 32 +++++++++---------- .../processors/netmap/handlers_test.go | 15 +++++---- .../processors/netmap/process_epoch.go | 24 +++++++------- pkg/innerring/state.go | 26 +++++++-------- pkg/innerring/state_test.go | 6 ++-- pkg/morph/event/handlers.go | 4 ++- pkg/morph/event/listener.go | 24 +++++++------- pkg/morph/event/listener_test.go | 2 +- pkg/morph/event/utils.go | 6 ++-- 27 files changed, 165 insertions(+), 157 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index bd1b99095..aa92e5ec5 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1089,7 +1089,7 @@ func (c *cfg) LocalAddress() network.AddressGroup { func initLocalStorage(ctx context.Context, c *cfg) { ls := engine.New(c.engineOpts()...) - addNewEpochAsyncNotificationHandler(c, func(ev event.Event) { + addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) { ls.HandleNewEpoch(ctx, ev.(netmap2.NewEpoch).EpochNumber()) }) diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index 1a54f9ffc..3f75be235 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -89,7 +89,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c if c.cfgMorph.containerCacheSize > 0 { containerCache := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize) - subscribeToContainerCreation(c, func(e event.Event) { + subscribeToContainerCreation(c, func(ctx context.Context, e event.Event) { ev := e.(containerEvent.PutSuccess) // read owner of the created container in order to update the reading cache. @@ -102,21 +102,21 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c } else { // unlike removal, we expect successful receive of the container // after successful creation, so logging can be useful - c.log.Error(context.Background(), logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification, + c.log.Error(ctx, logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification, zap.Stringer("id", ev.ID), zap.Error(err), ) } - c.log.Debug(context.Background(), logs.FrostFSNodeContainerCreationEventsReceipt, + c.log.Debug(ctx, logs.FrostFSNodeContainerCreationEventsReceipt, zap.Stringer("id", ev.ID), ) }) - subscribeToContainerRemoval(c, func(e event.Event) { + subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) { ev := e.(containerEvent.DeleteSuccess) containerCache.handleRemoval(ev.ID) - c.log.Debug(context.Background(), logs.FrostFSNodeContainerRemovalEventsReceipt, + c.log.Debug(ctx, logs.FrostFSNodeContainerRemovalEventsReceipt, zap.Stringer("id", ev.ID), ) }) diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 18667e636..35ab4d575 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -175,11 +175,11 @@ func initNetmapService(ctx context.Context, c *cfg) { } func addNewEpochNotificationHandlers(c *cfg) { - addNewEpochNotificationHandler(c, func(ev event.Event) { + addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) { c.cfgNetmap.state.setCurrentEpoch(ev.(netmapEvent.NewEpoch).EpochNumber()) }) - addNewEpochAsyncNotificationHandler(c, func(ev event.Event) { + addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) { e := ev.(netmapEvent.NewEpoch).EpochNumber() c.updateContractNodeInfo(e) @@ -189,15 +189,15 @@ func addNewEpochNotificationHandlers(c *cfg) { } if err := c.bootstrap(); err != nil { - c.log.Warn(context.Background(), logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err)) + c.log.Warn(ctx, logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err)) } }) if c.cfgMorph.notaryEnabled { - addNewEpochAsyncNotificationHandler(c, func(_ event.Event) { + addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) { _, _, err := makeNotaryDeposit(c) if err != nil { - c.log.Error(context.Background(), logs.FrostFSNodeCouldNotMakeNotaryDeposit, + c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit, zap.String("error", err.Error()), ) } diff --git a/cmd/frostfs-node/session.go b/cmd/frostfs-node/session.go index a35d4e470..2f3c9cbfe 100644 --- a/cmd/frostfs-node/session.go +++ b/cmd/frostfs-node/session.go @@ -48,7 +48,7 @@ func initSessionService(c *cfg) { _ = c.privateTokenStore.Close() }) - addNewEpochNotificationHandler(c, func(ev event.Event) { + addNewEpochNotificationHandler(c, func(_ context.Context, ev event.Event) { c.privateTokenStore.RemoveOld(ev.(netmap.NewEpoch).EpochNumber()) }) diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index 59923ee2f..c423c0660 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -80,10 +80,10 @@ func initTreeService(c *cfg) { })) if d := treeConfig.SyncInterval(); d == 0 { - addNewEpochNotificationHandler(c, func(_ event.Event) { + addNewEpochNotificationHandler(c, func(ctx context.Context, _ event.Event) { err := c.treeService.SynchronizeAll() if err != nil { - c.log.Error(context.Background(), logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeCouldNotSynchronizeTreeService, zap.Error(err)) } }) } else { @@ -103,15 +103,15 @@ func initTreeService(c *cfg) { }() } - subscribeToContainerRemoval(c, func(e event.Event) { + subscribeToContainerRemoval(c, func(ctx context.Context, e event.Event) { ev := e.(containerEvent.DeleteSuccess) // This is executed asynchronously, so we don't care about the operation taking some time. - c.log.Debug(context.Background(), logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID)) - err := c.treeService.DropTree(context.Background(), ev.ID, "") + c.log.Debug(ctx, logs.FrostFSNodeRemovingAllTreesForContainer, zap.Stringer("cid", ev.ID)) + err := c.treeService.DropTree(ctx, ev.ID, "") if err != nil && !errors.Is(err, pilorama.ErrTreeNotFound) { // Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged. - c.log.Error(context.Background(), logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved, + c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved, zap.Stringer("cid", ev.ID), zap.String("error", err.Error())) } diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index b8812819e..e08a613c3 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -137,12 +137,12 @@ func (s *Server) enableNotarySupport() error { return nil } -func (s *Server) initNotaryConfig() { +func (s *Server) initNotaryConfig(ctx context.Context) { s.mainNotaryConfig = notaryConfigs( !s.withoutMainNet && s.mainnetClient.ProbeNotary(), // if mainnet disabled then notary flag must be disabled too ) - s.log.Info(context.Background(), logs.InnerringNotarySupport, + s.log.Info(ctx, logs.InnerringNotarySupport, zap.Bool("sidechain_enabled", true), zap.Bool("mainchain_enabled", !s.mainNotaryConfig.disabled), ) @@ -152,8 +152,8 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli var alphaSync event.Handler if s.withoutMainNet || cfg.GetBool("governance.disable") { - alphaSync = func(event.Event) { - s.log.Debug(context.Background(), logs.InnerringAlphabetKeysSyncIsDisabled) + alphaSync = func(ctx context.Context, _ event.Event) { + s.log.Debug(ctx, logs.InnerringAlphabetKeysSyncIsDisabled) } } else { // create governance processor @@ -196,9 +196,9 @@ func (s *Server) createIRFetcher() irFetcher { return irf } -func (s *Server) initTimers(cfg *viper.Viper) { +func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) { s.epochTimer = newEpochTimer(&epochTimerArgs{ - newEpochHandlers: s.newEpochTickHandlers(), + newEpochHandlers: s.newEpochTickHandlers(ctx), epoch: s, }) diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index 67927c10c..e81ec6bca 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -152,7 +152,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { return err } - err = s.initConfigFromBlockchain() + err = s.initConfigFromBlockchain(ctx) if err != nil { return err } @@ -173,14 +173,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { prm.Validators = s.predefinedValidators // vote for sidechain validator if it is prepared in config - err = s.voteForSidechainValidator(prm) + err = s.voteForSidechainValidator(ctx, prm) if err != nil { // we don't stop inner ring execution on this error s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators, zap.String("error", err.Error())) } - s.tickInitialExpoch() + s.tickInitialExpoch(ctx) morphErr := make(chan error) mainnnetErr := make(chan error) @@ -283,11 +283,11 @@ func (s *Server) initSideNotary(ctx context.Context) error { ) } -func (s *Server) tickInitialExpoch() { +func (s *Server) tickInitialExpoch(ctx context.Context) { initialEpochTicker := timer.NewOneTickTimer( timer.StaticBlockMeter(s.initialEpochTickDelta), func() { - s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{}) + s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{}) }) s.addBlockTimer(initialEpochTicker) } @@ -376,7 +376,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan return nil, err } - server.initNotaryConfig() + server.initNotaryConfig(ctx) err = server.initContracts(cfg) if err != nil { @@ -405,7 +405,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan return nil, err } - server.initTimers(cfg) + server.initTimers(ctx, cfg) err = server.initGRPCServer(cfg, log, audit) if err != nil { @@ -573,7 +573,7 @@ func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNe return nc } -func (s *Server) initConfigFromBlockchain() error { +func (s *Server) initConfigFromBlockchain(ctx context.Context) error { // get current epoch epoch, err := s.netmapClient.Epoch() if err != nil { @@ -602,8 +602,8 @@ func (s *Server) initConfigFromBlockchain() error { return err } - s.log.Debug(context.Background(), logs.InnerringReadConfigFromBlockchain, - zap.Bool("active", s.IsActive()), + s.log.Debug(ctx, logs.InnerringReadConfigFromBlockchain, + zap.Bool("active", s.IsActive(ctx)), zap.Bool("alphabet", s.IsAlphabet()), zap.Uint64("epoch", epoch), zap.Uint32("precision", balancePrecision), @@ -635,17 +635,17 @@ func (s *Server) nextEpochBlockDelta() (uint32, error) { // onlyAlphabet wrapper around event handler that executes it // only if inner ring node is alphabet node. func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler { - return func(ev event.Event) { + return func(ctx context.Context, ev event.Event) { if s.IsAlphabet() { - f(ev) + f(ctx, ev) } } } -func (s *Server) newEpochTickHandlers() []newEpochHandler { +func (s *Server) newEpochTickHandlers(ctx context.Context) []newEpochHandler { newEpochHandlers := []newEpochHandler{ func() { - s.netmapProcessor.HandleNewEpochTick(timerEvent.NewEpochTick{}) + s.netmapProcessor.HandleNewEpochTick(ctx, timerEvent.NewEpochTick{}) }, } diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go index 902a4c30a..dd3afa2c2 100644 --- a/pkg/innerring/notary.go +++ b/pkg/innerring/notary.go @@ -50,16 +50,16 @@ func (s *Server) depositSideNotary() (util.Uint256, error) { return tx, err } -func (s *Server) notaryHandler(_ event.Event) { +func (s *Server) notaryHandler(ctx context.Context, _ event.Event) { if !s.mainNotaryConfig.disabled { _, err := s.depositMainNotary() if err != nil { - s.log.Error(context.Background(), logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err)) + s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err)) } } if _, err := s.depositSideNotary(); err != nil { - s.log.Error(context.Background(), logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err)) + s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err)) } } diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go index 3792fc2af..5a89e6f7c 100644 --- a/pkg/innerring/processors/balance/handlers.go +++ b/pkg/innerring/processors/balance/handlers.go @@ -11,9 +11,9 @@ import ( "go.uber.org/zap" ) -func (bp *Processor) handleLock(ev event.Event) { +func (bp *Processor) handleLock(ctx context.Context, ev event.Event) { lock := ev.(balanceEvent.Lock) - bp.log.Info(context.Background(), logs.Notification, + bp.log.Info(ctx, logs.Notification, zap.String("type", "lock"), zap.String("value", hex.EncodeToString(lock.ID()))) @@ -24,7 +24,7 @@ func (bp *Processor) handleLock(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - bp.log.Warn(context.Background(), logs.BalanceBalanceWorkerPoolDrained, + bp.log.Warn(ctx, logs.BalanceBalanceWorkerPoolDrained, zap.Int("capacity", bp.pool.Cap())) } } diff --git a/pkg/innerring/processors/balance/handlers_test.go b/pkg/innerring/processors/balance/handlers_test.go index 86a9e15d0..3ef4959cc 100644 --- a/pkg/innerring/processors/balance/handlers_test.go +++ b/pkg/innerring/processors/balance/handlers_test.go @@ -1,6 +1,7 @@ package balance import ( + "context" "testing" "time" @@ -30,7 +31,7 @@ func TestProcessorCallsFrostFSContractForLockEvent(t *testing.T) { }) require.NoError(t, err, "failed to create processor") - processor.handleLock(balanceEvent.Lock{}) + processor.handleLock(context.Background(), balanceEvent.Lock{}) for processor.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -56,7 +57,7 @@ func TestProcessorDoesntCallFrostFSContractIfNotAlphabet(t *testing.T) { }) require.NoError(t, err, "failed to create processor") - processor.handleLock(balanceEvent.Lock{}) + processor.handleLock(context.Background(), balanceEvent.Lock{}) for processor.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go index b3d50d9d0..45cac513a 100644 --- a/pkg/innerring/processors/container/handlers.go +++ b/pkg/innerring/processors/container/handlers.go @@ -12,11 +12,11 @@ import ( "go.uber.org/zap" ) -func (cp *Processor) handlePut(ev event.Event) { +func (cp *Processor) handlePut(ctx context.Context, ev event.Event) { put := ev.(putEvent) id := sha256.Sum256(put.Container()) - cp.log.Info(context.Background(), logs.Notification, + cp.log.Info(ctx, logs.Notification, zap.String("type", "container put"), zap.String("id", base58.Encode(id[:]))) @@ -27,14 +27,14 @@ func (cp *Processor) handlePut(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - cp.log.Warn(context.Background(), logs.ContainerContainerProcessorWorkerPoolDrained, + cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained, zap.Int("capacity", cp.pool.Cap())) } } -func (cp *Processor) handleDelete(ev event.Event) { +func (cp *Processor) handleDelete(ctx context.Context, ev event.Event) { del := ev.(containerEvent.Delete) - cp.log.Info(context.Background(), logs.Notification, + cp.log.Info(ctx, logs.Notification, zap.String("type", "container delete"), zap.String("id", base58.Encode(del.ContainerID()))) @@ -45,7 +45,7 @@ func (cp *Processor) handleDelete(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - cp.log.Warn(context.Background(), logs.ContainerContainerProcessorWorkerPoolDrained, + cp.log.Warn(ctx, logs.ContainerContainerProcessorWorkerPoolDrained, zap.Int("capacity", cp.pool.Cap())) } } diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go index dc1e919bb..a2fe50fa8 100644 --- a/pkg/innerring/processors/container/handlers_test.go +++ b/pkg/innerring/processors/container/handlers_test.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/ecdsa" "encoding/hex" "testing" @@ -71,7 +72,7 @@ func TestPutEvent(t *testing.T) { nr: nr, } - proc.handlePut(event) + proc.handlePut(context.Background(), event) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -143,7 +144,7 @@ func TestDeleteEvent(t *testing.T) { Signature: signature, } - proc.handleDelete(ev) + proc.handleDelete(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go index 02dfbaf60..d11ad0f5c 100644 --- a/pkg/innerring/processors/frostfs/handlers.go +++ b/pkg/innerring/processors/frostfs/handlers.go @@ -13,11 +13,11 @@ import ( "go.uber.org/zap" ) -func (np *Processor) handleDeposit(ev event.Event) { +func (np *Processor) handleDeposit(ctx context.Context, ev event.Event) { deposit := ev.(frostfsEvent.Deposit) depositIDBin := bytes.Clone(deposit.ID()) slices.Reverse(depositIDBin) - np.log.Info(context.Background(), logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "deposit"), zap.String("id", hex.EncodeToString(depositIDBin))) @@ -28,16 +28,16 @@ func (np *Processor) handleDeposit(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained, + np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } -func (np *Processor) handleWithdraw(ev event.Event) { +func (np *Processor) handleWithdraw(ctx context.Context, ev event.Event) { withdraw := ev.(frostfsEvent.Withdraw) withdrawBin := bytes.Clone(withdraw.ID()) slices.Reverse(withdrawBin) - np.log.Info(context.Background(), logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "withdraw"), zap.String("id", hex.EncodeToString(withdrawBin))) @@ -48,14 +48,14 @@ func (np *Processor) handleWithdraw(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained, + np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } -func (np *Processor) handleCheque(ev event.Event) { +func (np *Processor) handleCheque(ctx context.Context, ev event.Event) { cheque := ev.(frostfsEvent.Cheque) - np.log.Info(context.Background(), logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "cheque"), zap.String("id", hex.EncodeToString(cheque.ID()))) @@ -66,14 +66,14 @@ func (np *Processor) handleCheque(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained, + np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } -func (np *Processor) handleConfig(ev event.Event) { +func (np *Processor) handleConfig(ctx context.Context, ev event.Event) { cfg := ev.(frostfsEvent.Config) - np.log.Info(context.Background(), logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "set config"), zap.String("key", hex.EncodeToString(cfg.Key())), zap.String("value", hex.EncodeToString(cfg.Value()))) @@ -85,7 +85,7 @@ func (np *Processor) handleConfig(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(context.Background(), logs.FrostFSFrostfsProcessorWorkerPoolDrained, + np.log.Warn(ctx, logs.FrostFSFrostfsProcessorWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } diff --git a/pkg/innerring/processors/frostfs/handlers_test.go b/pkg/innerring/processors/frostfs/handlers_test.go index 6425172bd..c1541ca40 100644 --- a/pkg/innerring/processors/frostfs/handlers_test.go +++ b/pkg/innerring/processors/frostfs/handlers_test.go @@ -1,6 +1,7 @@ package frostfs import ( + "context" "testing" "time" @@ -36,7 +37,7 @@ func TestHandleDeposit(t *testing.T) { AmountValue: 1000, } - proc.handleDeposit(ev) + proc.handleDeposit(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -57,7 +58,7 @@ func TestHandleDeposit(t *testing.T) { es.epochCounter = 109 - proc.handleDeposit(ev) + proc.handleDeposit(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -98,7 +99,7 @@ func TestHandleWithdraw(t *testing.T) { AmountValue: 1000, } - proc.handleWithdraw(ev) + proc.handleWithdraw(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -139,7 +140,7 @@ func TestHandleCheque(t *testing.T) { LockValue: util.Uint160{200}, } - proc.handleCheque(ev) + proc.handleCheque(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -176,7 +177,7 @@ func TestHandleConfig(t *testing.T) { TxHashValue: util.Uint256{100}, } - proc.handleConfig(ev) + proc.handleConfig(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) diff --git a/pkg/innerring/processors/governance/handlers.go b/pkg/innerring/processors/governance/handlers.go index dee8c13e2..7e8ab629d 100644 --- a/pkg/innerring/processors/governance/handlers.go +++ b/pkg/innerring/processors/governance/handlers.go @@ -13,7 +13,7 @@ import ( "go.uber.org/zap" ) -func (gp *Processor) HandleAlphabetSync(e event.Event) { +func (gp *Processor) HandleAlphabetSync(ctx context.Context, e event.Event) { var ( typ string hash util.Uint256 @@ -34,16 +34,16 @@ func (gp *Processor) HandleAlphabetSync(e event.Event) { return } - gp.log.Info(context.Background(), logs.GovernanceNewEvent, zap.String("type", typ)) + gp.log.Info(ctx, logs.GovernanceNewEvent, zap.String("type", typ)) // send event to the worker pool err := processors.SubmitEvent(gp.pool, gp.metrics, "alphabet_sync", func() bool { - return gp.processAlphabetSync(hash) + return gp.processAlphabetSync(ctx, hash) }) if err != nil { // there system can be moved into controlled degradation stage - gp.log.Warn(context.Background(), logs.GovernanceGovernanceWorkerPoolDrained, + gp.log.Warn(ctx, logs.GovernanceGovernanceWorkerPoolDrained, zap.Int("capacity", gp.pool.Cap())) } } diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go index 87040bdef..286935129 100644 --- a/pkg/innerring/processors/governance/handlers_test.go +++ b/pkg/innerring/processors/governance/handlers_test.go @@ -1,6 +1,7 @@ package governance import ( + "context" "encoding/binary" "sort" "testing" @@ -57,7 +58,7 @@ func TestHandleAlphabetSyncEvent(t *testing.T) { txHash: util.Uint256{100}, } - proc.HandleAlphabetSync(ev) + proc.HandleAlphabetSync(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -133,7 +134,7 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) { Role: noderoles.NeoFSAlphabet, } - proc.HandleAlphabetSync(ev) + proc.HandleAlphabetSync(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -226,7 +227,7 @@ type testVoter struct { votes []VoteValidatorPrm } -func (v *testVoter) VoteForSidechainValidator(prm VoteValidatorPrm) error { +func (v *testVoter) VoteForSidechainValidator(_ context.Context, prm VoteValidatorPrm) error { v.votes = append(v.votes, prm) return nil } diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go index faca22f67..fdfdfa479 100644 --- a/pkg/innerring/processors/governance/process_update.go +++ b/pkg/innerring/processors/governance/process_update.go @@ -19,39 +19,39 @@ const ( alphabetUpdateIDPrefix = "AlphabetUpdate" ) -func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool { +func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint256) bool { if !gp.alphabetState.IsAlphabet() { - gp.log.Info(context.Background(), logs.GovernanceNonAlphabetModeIgnoreAlphabetSync) + gp.log.Info(ctx, logs.GovernanceNonAlphabetModeIgnoreAlphabetSync) return true } mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList() if err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantFetchAlphabetListFromMainNet, + gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet, zap.String("error", err.Error())) return false } sidechainAlphabet, err := gp.morphClient.Committee() if err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantFetchAlphabetListFromSideChain, + gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain, zap.String("error", err.Error())) return false } newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet) if err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain, + gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain, zap.String("error", err.Error())) return false } if newAlphabet == nil { - gp.log.Info(context.Background(), logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged) + gp.log.Info(ctx, logs.GovernanceNoGovernanceUpdateAlphabetListHasNotBeenChanged) return true } - gp.log.Info(context.Background(), logs.GovernanceAlphabetListHasBeenChangedStartingUpdate, + gp.log.Info(ctx, logs.GovernanceAlphabetListHasBeenChangedStartingUpdate, zap.String("side_chain_alphabet", prettyKeys(sidechainAlphabet)), zap.String("new_alphabet", prettyKeys(newAlphabet)), ) @@ -62,9 +62,9 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool { } // 1. Vote to sidechain committee via alphabet contracts. - err = gp.voter.VoteForSidechainValidator(votePrm) + err = gp.voter.VoteForSidechainValidator(ctx, votePrm) if err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantVoteForSideChainCommittee, + gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee, zap.String("error", err.Error())) } @@ -77,7 +77,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) bool { // 4. Update FrostFS contract in the mainnet. gp.updateFrostFSContractInMainnet(newAlphabet) - gp.log.Info(context.Background(), logs.GovernanceFinishedAlphabetListUpdate) + gp.log.Info(ctx, logs.GovernanceFinishedAlphabetListUpdate) return true } diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go index 6daea417e..eaadfdb4f 100644 --- a/pkg/innerring/processors/governance/processor.go +++ b/pkg/innerring/processors/governance/processor.go @@ -1,6 +1,7 @@ package governance import ( + "context" "errors" "fmt" @@ -38,7 +39,7 @@ type VoteValidatorPrm struct { // Voter is a callback interface for alphabet contract voting. type Voter interface { - VoteForSidechainValidator(VoteValidatorPrm) error + VoteForSidechainValidator(context.Context, VoteValidatorPrm) error } type ( diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go index 478ab5eab..61547e0ba 100644 --- a/pkg/innerring/processors/netmap/handlers.go +++ b/pkg/innerring/processors/netmap/handlers.go @@ -12,13 +12,13 @@ import ( "go.uber.org/zap" ) -func (np *Processor) HandleNewEpochTick(ev event.Event) { +func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) { _ = ev.(timerEvent.NewEpochTick) np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "epoch")) // send an event to the worker pool - err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", np.processNewEpochTick) + err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) }) if err != nil { // there system can be moved into controlled degradation stage np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained, @@ -26,28 +26,28 @@ func (np *Processor) HandleNewEpochTick(ev event.Event) { } } -func (np *Processor) handleNewEpoch(ev event.Event) { +func (np *Processor) handleNewEpoch(ctx context.Context, ev event.Event) { epochEvent := ev.(netmapEvent.NewEpoch) - np.log.Info(context.Background(), logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "new epoch"), zap.Uint64("value", epochEvent.EpochNumber())) // send an event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch", func() bool { - return np.processNewEpoch(epochEvent) + return np.processNewEpoch(ctx, epochEvent) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } -func (np *Processor) handleAddPeer(ev event.Event) { +func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) { newPeer := ev.(netmapEvent.AddPeer) - np.log.Info(context.Background(), logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "add peer"), ) @@ -58,14 +58,14 @@ func (np *Processor) handleAddPeer(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } -func (np *Processor) handleUpdateState(ev event.Event) { +func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) { updPeer := ev.(netmapEvent.UpdatePeer) - np.log.Info(context.Background(), logs.Notification, + np.log.Info(ctx, logs.Notification, zap.String("type", "update peer state"), zap.String("key", hex.EncodeToString(updPeer.PublicKey().Bytes()))) @@ -76,21 +76,21 @@ func (np *Processor) handleUpdateState(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } -func (np *Processor) handleCleanupTick(ev event.Event) { +func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) { if !np.netmapSnapshot.enabled { - np.log.Debug(context.Background(), logs.NetmapNetmapCleanUpRoutineIsDisabled518) + np.log.Debug(ctx, logs.NetmapNetmapCleanUpRoutineIsDisabled518) return } cleanup := ev.(netmapCleanupTick) - np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "netmap cleaner")) + np.log.Info(ctx, logs.NetmapTick, zap.String("type", "netmap cleaner")) // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool { @@ -98,7 +98,7 @@ func (np *Processor) handleCleanupTick(ev event.Event) { }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go index a53458179..1e8be4095 100644 --- a/pkg/innerring/processors/netmap/handlers_test.go +++ b/pkg/innerring/processors/netmap/handlers_test.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "fmt" "testing" "time" @@ -38,7 +39,7 @@ func TestNewEpochTick(t *testing.T) { require.NoError(t, err, "failed to create processor") ev := timerEvent.NewEpochTick{} - proc.HandleNewEpochTick(ev) + proc.HandleNewEpochTick(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -90,7 +91,7 @@ func TestNewEpoch(t *testing.T) { Num: 101, Hash: util.Uint256{101}, } - proc.handleNewEpoch(ev) + proc.handleNewEpoch(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -130,7 +131,7 @@ func TestAddPeer(t *testing.T) { MainTransaction: &transaction.Transaction{}, }, } - proc.handleAddPeer(ev) + proc.handleAddPeer(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -145,7 +146,7 @@ func TestAddPeer(t *testing.T) { MainTransaction: &transaction.Transaction{}, }, } - proc.handleAddPeer(ev) + proc.handleAddPeer(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -188,7 +189,7 @@ func TestUpdateState(t *testing.T) { MainTransaction: &transaction.Transaction{}, }, } - proc.handleUpdateState(ev) + proc.handleUpdateState(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -232,7 +233,7 @@ func TestCleanupTick(t *testing.T) { txHash: util.Uint256{123}, } - proc.handleCleanupTick(ev) + proc.handleCleanupTick(context.Background(), ev) for proc.pool.Running() > 0 { time.Sleep(10 * time.Millisecond) @@ -413,6 +414,6 @@ type testEventHandler struct { handledEvents []event.Event } -func (h *testEventHandler) Handle(e event.Event) { +func (h *testEventHandler) Handle(_ context.Context, e event.Event) { h.handledEvents = append(h.handledEvents, e) } diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go index 8ad295a74..e401ef4f2 100644 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ b/pkg/innerring/processors/netmap/process_epoch.go @@ -11,12 +11,12 @@ import ( // Process new epoch notification by setting global epoch value and resetting // local epoch timer. -func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool { +func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool { epoch := ev.EpochNumber() epochDuration, err := np.netmapClient.EpochDuration() if err != nil { - np.log.Warn(context.Background(), logs.NetmapCantGetEpochDuration, + np.log.Warn(ctx, logs.NetmapCantGetEpochDuration, zap.String("error", err.Error())) } else { np.epochState.SetEpochDuration(epochDuration) @@ -26,46 +26,46 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) bool { h, err := np.netmapClient.MorphTxHeight(ev.TxHash()) if err != nil { - np.log.Warn(context.Background(), logs.NetmapCantGetTransactionHeight, + np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight, zap.String("hash", ev.TxHash().StringLE()), zap.String("error", err.Error())) } if err := np.epochTimer.ResetEpochTimer(h); err != nil { - np.log.Warn(context.Background(), logs.NetmapCantResetEpochTimer, + np.log.Warn(ctx, logs.NetmapCantResetEpochTimer, zap.String("error", err.Error())) } // get new netmap snapshot networkMap, err := np.netmapClient.NetMap() if err != nil { - np.log.Warn(context.Background(), logs.NetmapCantGetNetmapSnapshotToPerformCleanup, + np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup, zap.String("error", err.Error())) return false } np.netmapSnapshot.update(*networkMap, epoch) - np.handleCleanupTick(netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()}) - np.handleAlphabetSync(governance.NewSyncEvent(ev.TxHash())) - np.handleNotaryDeposit(ev) + np.handleCleanupTick(ctx, netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()}) + np.handleAlphabetSync(ctx, governance.NewSyncEvent(ev.TxHash())) + np.handleNotaryDeposit(ctx, ev) return true } // Process new epoch tick by invoking new epoch method in network map contract. -func (np *Processor) processNewEpochTick() bool { +func (np *Processor) processNewEpochTick(ctx context.Context) bool { if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewEpochTick) + np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewEpochTick) return true } nextEpoch := np.epochState.EpochCounter() + 1 - np.log.Debug(context.Background(), logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch)) + np.log.Debug(ctx, logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch)) err := np.netmapClient.NewEpoch(nextEpoch) if err != nil { - np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err)) + np.log.Error(ctx, logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err)) return false } diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go index 2dbcd7494..85f332fb6 100644 --- a/pkg/innerring/state.go +++ b/pkg/innerring/state.go @@ -48,8 +48,8 @@ func (s *Server) SetEpochDuration(val uint64) { } // IsActive is a getter for a global active flag state. -func (s *Server) IsActive() bool { - return s.InnerRingIndex() >= 0 +func (s *Server) IsActive(ctx context.Context) bool { + return s.InnerRingIndex(ctx) >= 0 } // IsAlphabet is a getter for a global alphabet flag state. @@ -59,10 +59,10 @@ func (s *Server) IsAlphabet() bool { // InnerRingIndex is a getter for a global index of node in inner ring list. Negative // index means that node is not in the inner ring list. -func (s *Server) InnerRingIndex() int { +func (s *Server) InnerRingIndex(ctx context.Context) int { index, err := s.statusIndex.InnerRingIndex() if err != nil { - s.log.Error(context.Background(), logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error())) + s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error())) return -1 } @@ -71,10 +71,10 @@ func (s *Server) InnerRingIndex() int { // InnerRingSize is a getter for a global size of inner ring list. This value // paired with inner ring index. -func (s *Server) InnerRingSize() int { +func (s *Server) InnerRingSize(ctx context.Context) int { size, err := s.statusIndex.InnerRingSize() if err != nil { - s.log.Error(context.Background(), logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error())) + s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error())) return 0 } @@ -93,18 +93,18 @@ func (s *Server) AlphabetIndex() int { return int(index) } -func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) error { +func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error { validators := prm.Validators - index := s.InnerRingIndex() + index := s.InnerRingIndex(ctx) if s.contracts.alphabet.indexOutOfRange(index) { - s.log.Info(context.Background(), logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange) + s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteNodeNotInAlphabetRange) return nil } if len(validators) == 0 { - s.log.Info(context.Background(), logs.InnerringIgnoreValidatorVoteEmptyValidatorsList) + s.log.Info(ctx, logs.InnerringIgnoreValidatorVoteEmptyValidatorsList) return nil } @@ -129,7 +129,7 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) { _, err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators) if err != nil { - s.log.Warn(context.Background(), logs.InnerringCantInvokeVoteMethodInAlphabetContract, + s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract, zap.Int8("alphabet_index", int8(letter)), zap.Uint64("epoch", epoch), zap.String("error", err.Error())) @@ -141,9 +141,9 @@ func (s *Server) voteForSidechainValidator(prm governance.VoteValidatorPrm) erro // VoteForSidechainValidator calls vote method on alphabet contracts with // the provided list of keys. -func (s *Server) VoteForSidechainValidator(prm governance.VoteValidatorPrm) error { +func (s *Server) VoteForSidechainValidator(ctx context.Context, prm governance.VoteValidatorPrm) error { sort.Sort(prm.Validators) - return s.voteForSidechainValidator(prm) + return s.voteForSidechainValidator(ctx, prm) } // ResetEpochTimer resets the block timer that produces events to update epoch diff --git a/pkg/innerring/state_test.go b/pkg/innerring/state_test.go index 9313edf78..17ab995af 100644 --- a/pkg/innerring/state_test.go +++ b/pkg/innerring/state_test.go @@ -46,9 +46,9 @@ func TestServerState(t *testing.T) { srv.setHealthStatus(context.Background(), healthStatus) require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status") - require.True(t, srv.IsActive(), "invalid IsActive result") + require.True(t, srv.IsActive(context.Background()), "invalid IsActive result") require.True(t, srv.IsAlphabet(), "invalid IsAlphabet result") - require.Equal(t, 0, srv.InnerRingIndex(), "invalid IR index") - require.Equal(t, 1, srv.InnerRingSize(), "invalid IR index") + require.Equal(t, 0, srv.InnerRingIndex(context.Background()), "invalid IR index") + require.Equal(t, 1, srv.InnerRingSize(context.Background()), "invalid IR index") require.Equal(t, 0, srv.AlphabetIndex(), "invalid alphabet index") } diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go index 182b4667e..bda83ba54 100644 --- a/pkg/morph/event/handlers.go +++ b/pkg/morph/event/handlers.go @@ -1,11 +1,13 @@ package event import ( + "context" + "github.com/nspcc-dev/neo-go/pkg/core/block" ) // Handler is an Event processing function. -type Handler func(Event) +type Handler func(context.Context, Event) // BlockHandler is a chain block processing function. type BlockHandler func(*block.Block) diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index 3d3d806a4..eeec46540 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -280,7 +280,7 @@ loop: continue loop } - l.handleNotaryEvent(notaryEvent) + l.handleNotaryEvent(ctx, notaryEvent) case b, ok := <-chs.BlockCh: if !ok { l.log.Warn(ctx, logs.EventStopEventListenerByBlockChannel) @@ -307,11 +307,11 @@ func (l *listener) handleBlockEvent(b *block.Block) { } } -func (l *listener) handleNotaryEvent(notaryEvent *result.NotaryRequestEvent) { +func (l *listener) handleNotaryEvent(ctx context.Context, notaryEvent *result.NotaryRequestEvent) { if err := l.pool.Submit(func() { - l.parseAndHandleNotary(notaryEvent) + l.parseAndHandleNotary(ctx, notaryEvent) }); err != nil { - l.log.Warn(context.Background(), logs.EventListenerWorkerPoolDrained, + l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } @@ -376,11 +376,11 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent * } for _, handler := range handlers { - handler(event) + handler(ctx, event) } } -func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { +func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRequestEvent) { // prepare the notary event notaryEvent, err := l.notaryEventsPreparator.Prepare(nr.NotaryRequest) if err != nil { @@ -388,13 +388,13 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { switch { case errors.Is(err, ErrTXAlreadyHandled): case errors.As(err, &expErr): - l.log.Warn(context.Background(), logs.EventSkipExpiredMainTXNotaryEvent, + l.log.Warn(ctx, logs.EventSkipExpiredMainTXNotaryEvent, zap.String("error", err.Error()), zap.Uint32("current_block_height", expErr.CurrentBlockHeight), zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight), ) default: - l.log.Warn(context.Background(), logs.EventCouldNotPrepareAndValidateNotaryEvent, + l.log.Warn(ctx, logs.EventCouldNotPrepareAndValidateNotaryEvent, zap.String("error", err.Error()), ) } @@ -418,7 +418,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { l.mtx.RUnlock() if !ok { - log.Debug(context.Background(), logs.EventNotaryParserNotSet) + log.Debug(ctx, logs.EventNotaryParserNotSet) return } @@ -426,7 +426,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { // parse the notary event event, err := parser(notaryEvent) if err != nil { - log.Warn(context.Background(), logs.EventCouldNotParseNotaryEvent, + log.Warn(ctx, logs.EventCouldNotParseNotaryEvent, zap.String("error", err.Error()), ) @@ -439,14 +439,14 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { l.mtx.RUnlock() if !ok { - log.Info(context.Background(), logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered, + log.Info(ctx, logs.EventNotaryHandlersForParsedNotificationEventWereNotRegistered, zap.Any("event", event), ) return } - handler(event) + handler(ctx, event) } // SetNotificationParser sets the parser of particular contract event. diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go index 5f7cf9f43..214daf694 100644 --- a/pkg/morph/event/listener_test.go +++ b/pkg/morph/event/listener_test.go @@ -59,7 +59,7 @@ func TestEventHandling(t *testing.T) { handledNotifications := make([]Event, 0) l.RegisterNotificationHandler(NotificationHandlerInfo{ scriptHashWithType: key, - h: func(e Event) { + h: func(_ context.Context, e Event) { handledNotifications = append(handledNotifications, e) notificationHandled <- true }, diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go index 31bbf4432..99ea9a7f0 100644 --- a/pkg/morph/event/utils.go +++ b/pkg/morph/event/utils.go @@ -85,12 +85,12 @@ func (s typeValue) GetType() Type { // WorkerPoolHandler sets closure over worker pool w with passed handler h. func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handler { - return func(e Event) { + return func(ctx context.Context, e Event) { err := w.Submit(func() { - h(e) + h(ctx, e) }) if err != nil { - log.Warn(context.Background(), logs.EventCouldNotSubmitHandlerToWorkerPool, + log.Warn(ctx, logs.EventCouldNotSubmitHandlerToWorkerPool, zap.String("error", err.Error()), ) } From 74295532665e32efc80fcdcde639b2d9bc872a88 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 21 Oct 2024 16:27:28 +0300 Subject: [PATCH 177/591] [#1437] node: Fix contextcheck linter Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-ir/config.go | 4 +- cmd/frostfs-ir/httpcomponent.go | 30 +++++----- cmd/frostfs-ir/main.go | 12 ++-- cmd/frostfs-ir/pprof.go | 18 +++--- .../internal/blobovnicza/inspect.go | 2 +- cmd/frostfs-lens/internal/blobovnicza/list.go | 2 +- cmd/frostfs-lens/internal/meta/inspect.go | 2 +- .../internal/meta/list-garbage.go | 2 +- .../internal/meta/list-graveyard.go | 2 +- cmd/frostfs-node/config.go | 60 +++++++++---------- cmd/frostfs-node/container.go | 8 +-- cmd/frostfs-node/control.go | 22 +++---- cmd/frostfs-node/grpc.go | 56 ++++++++--------- cmd/frostfs-node/httpcomponent.go | 10 ++-- cmd/frostfs-node/main.go | 56 ++++++++--------- cmd/frostfs-node/morph.go | 8 +-- cmd/frostfs-node/netmap.go | 48 +++++++-------- cmd/frostfs-node/object.go | 4 +- cmd/frostfs-node/pprof.go | 5 +- cmd/frostfs-node/runtime.go | 6 +- internal/audit/request.go | 8 +-- pkg/core/object/fmt.go | 12 ++-- pkg/core/object/sender_classifier.go | 9 +-- pkg/innerring/blocktimer.go | 6 +- pkg/innerring/initialization.go | 49 +++++++++------ pkg/innerring/innerring.go | 20 +++---- pkg/innerring/notary.go | 13 ++-- pkg/innerring/processors/alphabet/handlers.go | 8 +-- .../processors/alphabet/handlers_test.go | 11 ++-- .../processors/alphabet/process_emit.go | 32 +++++----- .../processors/alphabet/processor.go | 8 +-- pkg/innerring/processors/balance/handlers.go | 2 +- .../processors/balance/handlers_test.go | 4 +- .../processors/balance/process_assets.go | 10 ++-- pkg/innerring/processors/balance/processor.go | 8 +-- .../processors/container/handlers.go | 4 +- .../processors/container/handlers_test.go | 2 +- .../processors/container/process_container.go | 26 ++++---- .../processors/container/processor.go | 6 +- pkg/innerring/processors/frostfs/handlers.go | 8 +-- .../processors/frostfs/handlers_test.go | 10 ++-- .../processors/frostfs/process_assets.go | 40 ++++++------- .../processors/frostfs/process_config.go | 10 ++-- pkg/innerring/processors/frostfs/processor.go | 14 ++--- .../processors/governance/handlers_test.go | 8 +-- .../processors/governance/process_update.go | 32 +++++----- .../processors/governance/processor.go | 8 +-- pkg/innerring/processors/netmap/handlers.go | 10 ++-- .../processors/netmap/handlers_test.go | 6 +- .../processors/netmap/process_cleanup.go | 15 ++--- .../processors/netmap/process_epoch.go | 4 +- .../processors/netmap/process_peers.go | 27 +++++---- pkg/innerring/processors/netmap/processor.go | 10 +--- pkg/innerring/processors/netmap/wrappers.go | 18 +++--- pkg/innerring/state.go | 10 ++-- pkg/innerring/state_test.go | 4 +- .../blobovnicza/blobovnicza_test.go | 4 +- .../blobovnicza/control.go | 22 +++---- .../blobovnicza/get_test.go | 6 +- .../blobovnicza/iterate_test.go | 2 +- .../blobstor/blobovniczatree/active.go | 14 ++--- .../blobstor/blobovniczatree/blobovnicza.go | 2 +- .../blobstor/blobovniczatree/cache.go | 4 +- .../blobovniczatree/concurrency_test.go | 2 +- .../blobstor/blobovniczatree/control.go | 6 +- .../blobstor/blobovniczatree/control_test.go | 6 +- .../blobstor/blobovniczatree/count.go | 2 +- .../blobstor/blobovniczatree/delete.go | 4 +- .../blobstor/blobovniczatree/exists.go | 2 +- .../blobstor/blobovniczatree/exists_test.go | 2 +- .../blobstor/blobovniczatree/get.go | 4 +- .../blobstor/blobovniczatree/get_range.go | 4 +- .../blobstor/blobovniczatree/iterate.go | 2 +- .../blobstor/blobovniczatree/manager.go | 16 ++--- .../blobstor/blobovniczatree/option.go | 5 +- .../blobstor/blobovniczatree/put.go | 6 +- .../blobstor/blobovniczatree/rebuild.go | 28 ++++----- .../blobovniczatree/rebuild_failover_test.go | 30 +++++----- .../blobstor/blobovniczatree/rebuild_test.go | 18 +++--- pkg/local_object_storage/blobstor/blobstor.go | 3 +- .../blobstor/blobstor_test.go | 14 ++--- .../blobstor/common/storage.go | 4 +- pkg/local_object_storage/blobstor/control.go | 12 ++-- .../blobstor/exists_test.go | 2 +- .../blobstor/fstree/control.go | 4 +- .../blobstor/fstree/fstree.go | 2 +- .../blobstor/fstree/fstree_test.go | 2 +- .../blobstor/internal/blobstortest/control.go | 2 +- .../blobstor/internal/blobstortest/delete.go | 2 +- .../blobstor/internal/blobstortest/exists.go | 2 +- .../blobstor/internal/blobstortest/get.go | 2 +- .../internal/blobstortest/get_range.go | 2 +- .../blobstor/internal/blobstortest/iterate.go | 2 +- .../blobstor/iterate_test.go | 6 +- .../blobstor/memstore/control.go | 18 +++--- .../blobstor/memstore/memstore_test.go | 2 +- pkg/local_object_storage/blobstor/mode.go | 4 +- .../blobstor/perf_test.go | 6 +- .../blobstor/teststore/option.go | 6 +- .../blobstor/teststore/teststore.go | 6 +- pkg/local_object_storage/engine/container.go | 14 ++--- pkg/local_object_storage/engine/delete.go | 6 +- pkg/local_object_storage/engine/engine.go | 19 +++--- pkg/local_object_storage/engine/exists.go | 2 +- pkg/local_object_storage/engine/get.go | 2 +- pkg/local_object_storage/engine/head.go | 2 +- pkg/local_object_storage/engine/inhume.go | 10 ++-- pkg/local_object_storage/engine/lock.go | 8 +-- pkg/local_object_storage/engine/put.go | 2 +- pkg/local_object_storage/engine/range.go | 2 +- pkg/local_object_storage/engine/select.go | 4 +- pkg/local_object_storage/engine/shards.go | 6 +- pkg/local_object_storage/engine/tree.go | 26 ++++---- .../internal/storagetest/storage.go | 30 +++++----- .../metabase/containers_test.go | 6 +- pkg/local_object_storage/metabase/control.go | 12 ++-- .../metabase/control_test.go | 2 +- .../metabase/counter_test.go | 18 +++--- pkg/local_object_storage/metabase/db_test.go | 2 +- .../metabase/delete_ec_test.go | 8 +-- .../metabase/delete_meta_test.go | 4 +- .../metabase/delete_test.go | 12 ++-- .../metabase/exists_test.go | 3 +- .../metabase/expired_test.go | 2 +- pkg/local_object_storage/metabase/get_test.go | 6 +- .../metabase/graveyard_test.go | 12 ++-- .../metabase/inhume_ec_test.go | 4 +- .../metabase/inhume_test.go | 6 +- .../metabase/iterators_test.go | 4 +- .../metabase/list_test.go | 8 +-- .../metabase/lock_test.go | 6 +- pkg/local_object_storage/metabase/mode.go | 4 +- .../metabase/mode_test.go | 8 +-- pkg/local_object_storage/metabase/put_test.go | 6 +- .../metabase/reset_test.go | 4 +- .../metabase/select_test.go | 34 +++++------ .../metabase/storage_id_test.go | 4 +- .../metabase/upgrade_test.go | 12 ++-- .../metabase/version_test.go | 32 +++++----- .../pilorama/bench_test.go | 4 +- pkg/local_object_storage/pilorama/boltdb.go | 10 ++-- pkg/local_object_storage/pilorama/forest.go | 4 +- .../pilorama/forest_test.go | 54 ++++++++--------- .../pilorama/interface.go | 4 +- .../pilorama/mode_test.go | 8 +-- pkg/local_object_storage/shard/control.go | 16 ++--- pkg/local_object_storage/shard/gc.go | 16 ++--- .../shard/gc_internal_test.go | 4 +- pkg/local_object_storage/shard/lock_test.go | 4 +- pkg/local_object_storage/shard/shard.go | 10 ++-- pkg/local_object_storage/shard/shard_test.go | 4 +- .../writecache/benchmark/writecache_test.go | 8 +-- pkg/local_object_storage/writecache/cache.go | 10 ++-- pkg/local_object_storage/writecache/flush.go | 14 ++--- .../writecache/flush_test.go | 16 ++--- pkg/local_object_storage/writecache/mode.go | 4 +- .../writecache/mode_test.go | 8 +-- .../writecache/options.go | 6 +- .../writecache/writecache.go | 4 +- pkg/morph/client/balance/burn.go | 6 +- pkg/morph/client/balance/lock.go | 6 +- pkg/morph/client/balance/mint.go | 6 +- pkg/morph/client/balance/transfer.go | 5 +- pkg/morph/client/client.go | 4 +- pkg/morph/client/container/delete.go | 9 +-- pkg/morph/client/container/put.go | 9 +-- pkg/morph/client/frostfs/cheque.go | 10 ++-- pkg/morph/client/netmap/config.go | 5 +- pkg/morph/client/netmap/innerring.go | 5 +- pkg/morph/client/netmap/new_epoch.go | 9 +-- pkg/morph/client/netmap/peer.go | 9 +-- pkg/morph/client/netmap/update_state.go | 5 +- pkg/morph/client/notary.go | 40 +++++++------ pkg/morph/client/static.go | 8 ++- pkg/morph/event/handlers.go | 2 +- pkg/morph/event/listener.go | 8 +-- pkg/morph/event/listener_test.go | 4 +- pkg/network/transport/object/grpc/service.go | 4 +- pkg/services/apemanager/audit.go | 6 +- pkg/services/container/audit.go | 8 +-- pkg/services/container/morph/executor.go | 12 ++-- pkg/services/container/morph/executor_test.go | 4 +- pkg/services/control/ir/server/audit.go | 8 +-- pkg/services/control/ir/server/calls.go | 18 +++--- pkg/services/control/server/server.go | 5 +- .../control/server/set_netmap_status.go | 6 +- pkg/services/object/acl/v2/service.go | 34 +++++------ pkg/services/object/ape/service.go | 8 +-- pkg/services/object/audit.go | 34 +++++------ pkg/services/object/common.go | 8 +-- pkg/services/object/common/writer/common.go | 4 +- pkg/services/object/common/writer/ec.go | 2 +- pkg/services/object/get/assembleec.go | 2 +- pkg/services/object/get/container.go | 4 +- pkg/services/object/get/remote.go | 2 +- pkg/services/object/get/request.go | 12 ++-- pkg/services/object/metrics.go | 12 ++-- pkg/services/object/response.go | 8 +-- pkg/services/object/search/search.go | 10 ++-- pkg/services/object/server.go | 4 +- pkg/services/object/sign.go | 8 +-- pkg/services/object/transport_splitter.go | 8 +-- pkg/services/object/util/log.go | 8 +-- .../object_manager/tombstone/checker.go | 6 +- pkg/services/policer/check.go | 8 +-- pkg/services/tree/getsubtree_test.go | 2 +- pkg/services/tree/sync.go | 6 +- pkg/util/http/calls.go | 4 +- scripts/populate-metabase/main.go | 6 +- 209 files changed, 1068 insertions(+), 1036 deletions(-) diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go index 7415e8e70..09af08525 100644 --- a/cmd/frostfs-ir/config.go +++ b/cmd/frostfs-ir/config.go @@ -93,8 +93,8 @@ func watchForSignal(ctx context.Context, cancel func()) { if err != nil { log.Error(ctx, logs.FrostFSNodeConfigurationReading, zap.Error(err)) } - pprofCmp.reload() - metricsCmp.reload() + pprofCmp.reload(ctx) + metricsCmp.reload(ctx) log.Info(ctx, logs.FrostFSIRReloadExtraWallets) err = innerRing.SetExtraWallets(cfg) if err != nil { diff --git a/cmd/frostfs-ir/httpcomponent.go b/cmd/frostfs-ir/httpcomponent.go index 685ef61ad..a8eef6010 100644 --- a/cmd/frostfs-ir/httpcomponent.go +++ b/cmd/frostfs-ir/httpcomponent.go @@ -25,8 +25,8 @@ const ( shutdownTimeoutKeyPostfix = ".shutdown_timeout" ) -func (c *httpComponent) init() { - log.Info(context.Background(), "init "+c.name) +func (c *httpComponent) init(ctx context.Context) { + log.Info(ctx, "init "+c.name) c.enabled = cfg.GetBool(c.name + enabledKeyPostfix) c.address = cfg.GetString(c.name + addressKeyPostfix) c.shutdownDur = cfg.GetDuration(c.name + shutdownTimeoutKeyPostfix) @@ -40,14 +40,14 @@ func (c *httpComponent) init() { httputil.WithShutdownTimeout(c.shutdownDur), ) } else { - log.Info(context.Background(), c.name+" is disabled, skip") + log.Info(ctx, c.name+" is disabled, skip") c.srv = nil } } -func (c *httpComponent) start() { +func (c *httpComponent) start(ctx context.Context) { if c.srv != nil { - log.Info(context.Background(), "start "+c.name) + log.Info(ctx, "start "+c.name) wg.Add(1) go func() { defer wg.Done() @@ -56,10 +56,10 @@ func (c *httpComponent) start() { } } -func (c *httpComponent) shutdown() error { +func (c *httpComponent) shutdown(ctx context.Context) error { if c.srv != nil { - log.Info(context.Background(), "shutdown "+c.name) - return c.srv.Shutdown() + log.Info(ctx, "shutdown "+c.name) + return c.srv.Shutdown(ctx) } return nil } @@ -71,17 +71,17 @@ func (c *httpComponent) needReload() bool { return enabled != c.enabled || enabled && (address != c.address || dur != c.shutdownDur) } -func (c *httpComponent) reload() { - log.Info(context.Background(), "reload "+c.name) +func (c *httpComponent) reload(ctx context.Context) { + log.Info(ctx, "reload "+c.name) if c.needReload() { - log.Info(context.Background(), c.name+" config updated") - if err := c.shutdown(); err != nil { - log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer, + log.Info(ctx, c.name+" config updated") + if err := c.shutdown(ctx); err != nil { + log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error()), ) } else { - c.init() - c.start() + c.init(ctx) + c.start(ctx) } } } diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index bcb2c5dd8..e86c04b9e 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -87,17 +87,17 @@ func main() { ctx, cancel := context.WithCancel(context.Background()) pprofCmp = newPprofComponent() - pprofCmp.init() + pprofCmp.init(ctx) metricsCmp = newMetricsComponent() - metricsCmp.init() + metricsCmp.init(ctx) audit.Store(cfg.GetBool("audit.enabled")) innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode, audit) exitErr(err) - pprofCmp.start() - metricsCmp.start() + pprofCmp.start(ctx) + metricsCmp.start(ctx) // start inner ring err = innerRing.Start(ctx, intErr) @@ -117,12 +117,12 @@ func main() { func shutdown(ctx context.Context) { innerRing.Stop(ctx) - if err := metricsCmp.shutdown(); err != nil { + if err := metricsCmp.shutdown(ctx); err != nil { log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error()), ) } - if err := pprofCmp.shutdown(); err != nil { + if err := pprofCmp.shutdown(ctx); err != nil { log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error()), ) diff --git a/cmd/frostfs-ir/pprof.go b/cmd/frostfs-ir/pprof.go index e95fd117f..8e81d8b85 100644 --- a/cmd/frostfs-ir/pprof.go +++ b/cmd/frostfs-ir/pprof.go @@ -29,8 +29,8 @@ func newPprofComponent() *pprofComponent { } } -func (c *pprofComponent) init() { - c.httpComponent.init() +func (c *pprofComponent) init(ctx context.Context) { + c.httpComponent.init(ctx) if c.enabled { c.blockRate = cfg.GetInt(pprofBlockRateKey) @@ -52,17 +52,17 @@ func (c *pprofComponent) needReload() bool { c.enabled && (c.blockRate != blockRate || c.mutexRate != mutexRate) } -func (c *pprofComponent) reload() { - log.Info(context.Background(), "reload "+c.name) +func (c *pprofComponent) reload(ctx context.Context) { + log.Info(ctx, "reload "+c.name) if c.needReload() { - log.Info(context.Background(), c.name+" config updated") - if err := c.shutdown(); err != nil { - log.Debug(context.Background(), logs.FrostFSIRCouldNotShutdownHTTPServer, + log.Info(ctx, c.name+" config updated") + if err := c.shutdown(ctx); err != nil { + log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, zap.String("error", err.Error())) return } - c.init() - c.start() + c.init(ctx) + c.start(ctx) } } diff --git a/cmd/frostfs-lens/internal/blobovnicza/inspect.go b/cmd/frostfs-lens/internal/blobovnicza/inspect.go index b1a6e3fd2..e7e2c0769 100644 --- a/cmd/frostfs-lens/internal/blobovnicza/inspect.go +++ b/cmd/frostfs-lens/internal/blobovnicza/inspect.go @@ -28,7 +28,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) { common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err)) blz := openBlobovnicza(cmd) - defer blz.Close() + defer blz.Close(cmd.Context()) var prm blobovnicza.GetPrm prm.SetAddress(addr) diff --git a/cmd/frostfs-lens/internal/blobovnicza/list.go b/cmd/frostfs-lens/internal/blobovnicza/list.go index d327dbc41..d41a15bcf 100644 --- a/cmd/frostfs-lens/internal/blobovnicza/list.go +++ b/cmd/frostfs-lens/internal/blobovnicza/list.go @@ -32,7 +32,7 @@ func listFunc(cmd *cobra.Command, _ []string) { } blz := openBlobovnicza(cmd) - defer blz.Close() + defer blz.Close(cmd.Context()) err := blobovnicza.IterateAddresses(context.Background(), blz, wAddr) common.ExitOnErr(cmd, common.Errf("blobovnicza iterator failure: %w", err)) diff --git a/cmd/frostfs-lens/internal/meta/inspect.go b/cmd/frostfs-lens/internal/meta/inspect.go index 9eb60f966..f436343c7 100644 --- a/cmd/frostfs-lens/internal/meta/inspect.go +++ b/cmd/frostfs-lens/internal/meta/inspect.go @@ -31,7 +31,7 @@ func inspectFunc(cmd *cobra.Command, _ []string) { common.ExitOnErr(cmd, common.Errf("invalid address argument: %w", err)) db := openMeta(cmd) - defer db.Close() + defer db.Close(cmd.Context()) storageID := meta.StorageIDPrm{} storageID.SetAddress(addr) diff --git a/cmd/frostfs-lens/internal/meta/list-garbage.go b/cmd/frostfs-lens/internal/meta/list-garbage.go index 61b10ca1f..6b27a232f 100644 --- a/cmd/frostfs-lens/internal/meta/list-garbage.go +++ b/cmd/frostfs-lens/internal/meta/list-garbage.go @@ -19,7 +19,7 @@ func init() { func listGarbageFunc(cmd *cobra.Command, _ []string) { db := openMeta(cmd) - defer db.Close() + defer db.Close(cmd.Context()) var garbPrm meta.GarbageIterationPrm garbPrm.SetHandler( diff --git a/cmd/frostfs-lens/internal/meta/list-graveyard.go b/cmd/frostfs-lens/internal/meta/list-graveyard.go index 19a93691c..45642e74b 100644 --- a/cmd/frostfs-lens/internal/meta/list-graveyard.go +++ b/cmd/frostfs-lens/internal/meta/list-graveyard.go @@ -19,7 +19,7 @@ func init() { func listGraveyardFunc(cmd *cobra.Command, _ []string) { db := openMeta(cmd) - defer db.Close() + defer db.Close(cmd.Context()) var gravePrm meta.GraveyardIterationPrm gravePrm.SetHandler( diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index aa92e5ec5..5af37865f 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -397,16 +397,16 @@ type internals struct { } // starts node's maintenance. -func (c *cfg) startMaintenance() { +func (c *cfg) startMaintenance(ctx context.Context) { c.isMaintenance.Store(true) c.cfgNetmap.state.setControlNetmapStatus(control.NetmapStatus_MAINTENANCE) - c.log.Info(context.Background(), logs.FrostFSNodeStartedLocalNodesMaintenance) + c.log.Info(ctx, logs.FrostFSNodeStartedLocalNodesMaintenance) } // stops node's maintenance. -func (c *internals) stopMaintenance() { +func (c *internals) stopMaintenance(ctx context.Context) { if c.isMaintenance.CompareAndSwap(true, false) { - c.log.Info(context.Background(), logs.FrostFSNodeStoppedLocalNodesMaintenance) + c.log.Info(ctx, logs.FrostFSNodeStoppedLocalNodesMaintenance) } } @@ -1129,10 +1129,10 @@ func initLocalStorage(ctx context.Context, c *cfg) { }) } -func initAccessPolicyEngine(_ context.Context, c *cfg) { +func initAccessPolicyEngine(ctx context.Context, c *cfg) { var localOverrideDB chainbase.LocalOverrideDatabase if nodeconfig.PersistentPolicyRules(c.appCfg).Path() == "" { - c.log.Warn(context.Background(), logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed) + c.log.Warn(ctx, logs.FrostFSNodePersistentRuleStorageDBPathIsNotSetInmemoryWillBeUsed) localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase() } else { localOverrideDB = chainbase.NewBoltLocalOverrideDatabase( @@ -1157,7 +1157,7 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) { c.onShutdown(func() { if err := ape.LocalOverrideDatabaseCore().Close(); err != nil { - c.log.Warn(context.Background(), logs.FrostFSNodeAccessPolicyEngineClosingFailure, + c.log.Warn(ctx, logs.FrostFSNodeAccessPolicyEngineClosingFailure, zap.Error(err), ) } @@ -1206,10 +1206,10 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) { c.cfgNetmap.state.setNodeInfo(ni) } -func (c *cfg) updateContractNodeInfo(epoch uint64) { +func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { ni, err := c.netmapLocalNodeState(epoch) if err != nil { - c.log.Error(context.Background(), logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, + c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, zap.Uint64("epoch", epoch), zap.String("error", err.Error())) return @@ -1221,19 +1221,19 @@ func (c *cfg) updateContractNodeInfo(epoch uint64) { // bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract // with the binary-encoded information from the current node's configuration. // The state is set using the provided setter which MUST NOT be nil. -func (c *cfg) bootstrapWithState(stateSetter func(*netmap.NodeInfo)) error { +func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error { ni := c.cfgNodeInfo.localInfo stateSetter(&ni) prm := nmClient.AddPeerPrm{} prm.SetNodeInfo(ni) - return c.cfgNetmap.wrapper.AddPeer(prm) + return c.cfgNetmap.wrapper.AddPeer(ctx, prm) } // bootstrapOnline calls cfg.bootstrapWithState with "online" state. -func bootstrapOnline(c *cfg) error { - return c.bootstrapWithState(func(ni *netmap.NodeInfo) { +func bootstrapOnline(ctx context.Context, c *cfg) error { + return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Online) }) } @@ -1241,21 +1241,21 @@ func bootstrapOnline(c *cfg) error { // bootstrap calls bootstrapWithState with: // - "maintenance" state if maintenance is in progress on the current node // - "online", otherwise -func (c *cfg) bootstrap() error { +func (c *cfg) bootstrap(ctx context.Context) error { // switch to online except when under maintenance st := c.cfgNetmap.state.controlNetmapStatus() if st == control.NetmapStatus_MAINTENANCE { - c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithTheMaintenanceState) - return c.bootstrapWithState(func(ni *netmap.NodeInfo) { + c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState) + return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) }) } - c.log.Info(context.Background(), logs.FrostFSNodeBootstrappingWithOnlineState, + c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState, zap.Stringer("previous", st), ) - return bootstrapOnline(c) + return bootstrapOnline(ctx, c) } // needBootstrap checks if local node should be registered in network on bootup. @@ -1282,7 +1282,7 @@ func (c *cfg) signalWatcher(ctx context.Context) { case <-ch: c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) - c.shutdown() + c.shutdown(ctx) c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) return @@ -1290,7 +1290,7 @@ func (c *cfg) signalWatcher(ctx context.Context) { c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError, zap.String("message", err.Error())) - c.shutdown() + c.shutdown(ctx) c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete) return @@ -1302,7 +1302,7 @@ func (c *cfg) signalWatcher(ctx context.Context) { case <-ch: c.log.Info(ctx, logs.FrostFSNodeTerminationSignalHasBeenReceivedStopping) - c.shutdown() + c.shutdown(ctx) c.log.Info(ctx, logs.FrostFSNodeTerminationSignalProcessingIsComplete) return @@ -1310,7 +1310,7 @@ func (c *cfg) signalWatcher(ctx context.Context) { c.log.Warn(ctx, logs.FrostFSNodeInternalApplicationError, zap.String("message", err.Error())) - c.shutdown() + c.shutdown(ctx) c.log.Info(ctx, logs.FrostFSNodeInternalErrorProcessingIsComplete) return @@ -1322,11 +1322,11 @@ func (c *cfg) signalWatcher(ctx context.Context) { func (c *cfg) reloadConfig(ctx context.Context) { c.log.Info(ctx, logs.FrostFSNodeSIGHUPHasBeenReceivedRereadingConfiguration) - if !c.compareAndSwapHealthStatus(control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) { + if !c.compareAndSwapHealthStatus(ctx, control.HealthStatus_READY, control.HealthStatus_RECONFIGURING) { c.log.Info(ctx, logs.FrostFSNodeSIGHUPSkip) return } - defer c.compareAndSwapHealthStatus(control.HealthStatus_RECONFIGURING, control.HealthStatus_READY) + defer c.compareAndSwapHealthStatus(ctx, control.HealthStatus_RECONFIGURING, control.HealthStatus_READY) err := c.reloadAppConfig() if err != nil { @@ -1388,7 +1388,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { components = append(components, dCmp{"logger", logPrm.Reload}) components = append(components, dCmp{"runtime", func() error { - setRuntimeParameters(c) + setRuntimeParameters(ctx, c) return nil }}) components = append(components, dCmp{"audit", func() error { @@ -1474,14 +1474,14 @@ func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoPro }) } -func (c *cfg) shutdown() { - old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN) +func (c *cfg) shutdown(ctx context.Context) { + old := c.swapHealthStatus(ctx, control.HealthStatus_SHUTTING_DOWN) if old == control.HealthStatus_SHUTTING_DOWN { - c.log.Info(context.Background(), logs.FrostFSNodeShutdownSkip) + c.log.Info(ctx, logs.FrostFSNodeShutdownSkip) return } if old == control.HealthStatus_STARTING { - c.log.Warn(context.Background(), logs.FrostFSNodeShutdownWhenNotReady) + c.log.Warn(ctx, logs.FrostFSNodeShutdownWhenNotReady) } c.ctxCancel() @@ -1491,6 +1491,6 @@ func (c *cfg) shutdown() { } if err := sdnotify.ClearStatus(); err != nil { - c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err)) + c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) } } diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index 3f75be235..d3e1b2766 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -237,10 +237,10 @@ type morphContainerWriter struct { neoClient *cntClient.Client } -func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) { - return cntClient.Put(m.neoClient, cnr) +func (m morphContainerWriter) Put(ctx context.Context, cnr containerCore.Container) (*cid.ID, error) { + return cntClient.Put(ctx, m.neoClient, cnr) } -func (m morphContainerWriter) Delete(witness containerCore.RemovalWitness) error { - return cntClient.Delete(m.neoClient, witness) +func (m morphContainerWriter) Delete(ctx context.Context, witness containerCore.RemovalWitness) error { + return cntClient.Delete(ctx, m.neoClient, witness) } diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go index ffac23eec..ecd82bba5 100644 --- a/cmd/frostfs-node/control.go +++ b/cmd/frostfs-node/control.go @@ -16,7 +16,7 @@ import ( const serviceNameControl = "control" -func initControlService(c *cfg) { +func initControlService(ctx context.Context, c *cfg) { endpoint := controlconfig.GRPC(c.appCfg).Endpoint() if endpoint == controlconfig.GRPCEndpointDefault { return @@ -46,14 +46,14 @@ func initControlService(c *cfg) { lis, err := net.Listen("tcp", endpoint) if err != nil { - c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpointControl, zap.Error(err)) return } c.cfgControlService.server = grpc.NewServer() c.onShutdown(func() { - stopGRPC("FrostFS Control API", c.cfgControlService.server, c.log) + stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log) }) control.RegisterControlServiceServer(c.cfgControlService.server, ctlSvc) @@ -72,23 +72,23 @@ func (c *cfg) NetmapStatus() control.NetmapStatus { return c.cfgNetmap.state.controlNetmapStatus() } -func (c *cfg) setHealthStatus(st control.HealthStatus) { - c.notifySystemd(st) +func (c *cfg) setHealthStatus(ctx context.Context, st control.HealthStatus) { + c.notifySystemd(ctx, st) c.healthStatus.Store(int32(st)) c.metricsCollector.State().SetHealth(int32(st)) } -func (c *cfg) compareAndSwapHealthStatus(oldSt, newSt control.HealthStatus) (swapped bool) { +func (c *cfg) compareAndSwapHealthStatus(ctx context.Context, oldSt, newSt control.HealthStatus) (swapped bool) { if swapped = c.healthStatus.CompareAndSwap(int32(oldSt), int32(newSt)); swapped { - c.notifySystemd(newSt) + c.notifySystemd(ctx, newSt) c.metricsCollector.State().SetHealth(int32(newSt)) } return } -func (c *cfg) swapHealthStatus(st control.HealthStatus) (old control.HealthStatus) { +func (c *cfg) swapHealthStatus(ctx context.Context, st control.HealthStatus) (old control.HealthStatus) { old = control.HealthStatus(c.healthStatus.Swap(int32(st))) - c.notifySystemd(st) + c.notifySystemd(ctx, st) c.metricsCollector.State().SetHealth(int32(st)) return } @@ -97,7 +97,7 @@ func (c *cfg) HealthStatus() control.HealthStatus { return control.HealthStatus(c.healthStatus.Load()) } -func (c *cfg) notifySystemd(st control.HealthStatus) { +func (c *cfg) notifySystemd(ctx context.Context, st control.HealthStatus) { if !c.sdNotify { return } @@ -113,6 +113,6 @@ func (c *cfg) notifySystemd(st control.HealthStatus) { err = sdnotify.Status(fmt.Sprintf("%v", st)) } if err != nil { - c.log.Error(context.Background(), logs.FailedToReportStatusToSystemd, zap.Error(err)) + c.log.Error(ctx, logs.FailedToReportStatusToSystemd, zap.Error(err)) } } diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go index 271810ee6..6105be861 100644 --- a/cmd/frostfs-node/grpc.go +++ b/cmd/frostfs-node/grpc.go @@ -19,11 +19,11 @@ import ( const maxRecvMsgSize = 256 << 20 -func initGRPC(c *cfg) { +func initGRPC(ctx context.Context, c *cfg) { var endpointsToReconnect []string var successCount int grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) { - serverOpts, ok := getGrpcServerOpts(c, sc) + serverOpts, ok := getGrpcServerOpts(ctx, c, sc) if !ok { return } @@ -31,7 +31,7 @@ func initGRPC(c *cfg) { lis, err := net.Listen("tcp", sc.Endpoint()) if err != nil { c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(sc.Endpoint()) - c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) endpointsToReconnect = append(endpointsToReconnect, sc.Endpoint()) return } @@ -40,7 +40,7 @@ func initGRPC(c *cfg) { srv := grpc.NewServer(serverOpts...) c.onShutdown(func() { - stopGRPC("FrostFS Public API", srv, c.log) + stopGRPC(ctx, "FrostFS Public API", srv, c.log) }) c.cfgGRPC.append(sc.Endpoint(), lis, srv) @@ -53,11 +53,11 @@ func initGRPC(c *cfg) { c.cfgGRPC.reconnectTimeout = grpcconfig.ReconnectTimeout(c.appCfg) for _, endpoint := range endpointsToReconnect { - scheduleReconnect(endpoint, c) + scheduleReconnect(ctx, endpoint, c) } } -func scheduleReconnect(endpoint string, c *cfg) { +func scheduleReconnect(ctx context.Context, endpoint string, c *cfg) { c.wg.Add(1) go func() { defer c.wg.Done() @@ -66,7 +66,7 @@ func scheduleReconnect(endpoint string, c *cfg) { for { select { case <-t.C: - if tryReconnect(endpoint, c) { + if tryReconnect(ctx, endpoint, c) { return } case <-c.done: @@ -76,20 +76,20 @@ func scheduleReconnect(endpoint string, c *cfg) { }() } -func tryReconnect(endpoint string, c *cfg) bool { - c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint)) +func tryReconnect(ctx context.Context, endpoint string, c *cfg) bool { + c.log.Info(ctx, logs.FrostFSNodeGRPCReconnecting, zap.String("endpoint", endpoint)) - serverOpts, found := getGRPCEndpointOpts(endpoint, c) + serverOpts, found := getGRPCEndpointOpts(ctx, endpoint, c) if !found { - c.log.Warn(context.Background(), logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint)) + c.log.Warn(ctx, logs.FrostFSNodeGRPCServerConfigNotFound, zap.String("endpoint", endpoint)) return true } lis, err := net.Listen("tcp", endpoint) if err != nil { c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(endpoint) - c.log.Error(context.Background(), logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) - c.log.Warn(context.Background(), logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout)) + c.log.Error(ctx, logs.FrostFSNodeCantListenGRPCEndpoint, zap.Error(err)) + c.log.Warn(ctx, logs.FrostFSNodeGRPCReconnectFailed, zap.Duration("next_try_in", c.cfgGRPC.reconnectTimeout)) return false } c.metricsCollector.GrpcServerMetrics().MarkHealthy(endpoint) @@ -97,16 +97,16 @@ func tryReconnect(endpoint string, c *cfg) bool { srv := grpc.NewServer(serverOpts...) c.onShutdown(func() { - stopGRPC("FrostFS Public API", srv, c.log) + stopGRPC(ctx, "FrostFS Public API", srv, c.log) }) c.cfgGRPC.appendAndHandle(endpoint, lis, srv) - c.log.Info(context.Background(), logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint)) + c.log.Info(ctx, logs.FrostFSNodeGRPCReconnectedSuccessfully, zap.String("endpoint", endpoint)) return true } -func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, found bool) { +func getGRPCEndpointOpts(ctx context.Context, endpoint string, c *cfg) (result []grpc.ServerOption, found bool) { unlock := c.LockAppConfigShared() defer unlock() grpcconfig.IterateEndpoints(c.appCfg, func(sc *grpcconfig.Config) { @@ -117,7 +117,7 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f return } var ok bool - result, ok = getGrpcServerOpts(c, sc) + result, ok = getGrpcServerOpts(ctx, c, sc) if !ok { return } @@ -126,7 +126,7 @@ func getGRPCEndpointOpts(endpoint string, c *cfg) (result []grpc.ServerOption, f return } -func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) { +func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool) { serverOpts := []grpc.ServerOption{ grpc.MaxRecvMsgSize(maxRecvMsgSize), grpc.ChainUnaryInterceptor( @@ -144,7 +144,7 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool if tlsCfg != nil { cert, err := tls.LoadX509KeyPair(tlsCfg.CertificateFile(), tlsCfg.KeyFile()) if err != nil { - c.log.Error(context.Background(), logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeCouldNotReadCertificateFromFile, zap.Error(err)) return nil, false } @@ -175,38 +175,38 @@ func getGrpcServerOpts(c *cfg, sc *grpcconfig.Config) ([]grpc.ServerOption, bool return serverOpts, true } -func serveGRPC(c *cfg) { +func serveGRPC(ctx context.Context, c *cfg) { c.cfgGRPC.performAndSave(func(e string, l net.Listener, s *grpc.Server) { c.wg.Add(1) go func() { defer func() { - c.log.Info(context.Background(), logs.FrostFSNodeStopListeningGRPCEndpoint, + c.log.Info(ctx, logs.FrostFSNodeStopListeningGRPCEndpoint, zap.Stringer("endpoint", l.Addr()), ) c.wg.Done() }() - c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint, + c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint, zap.String("service", "gRPC"), zap.Stringer("endpoint", l.Addr()), ) if err := s.Serve(l); err != nil { c.metricsCollector.GrpcServerMetrics().MarkUnhealthy(e) - c.log.Error(context.Background(), logs.FrostFSNodeGRPCServerError, zap.Error(err)) + c.log.Error(ctx, logs.FrostFSNodeGRPCServerError, zap.Error(err)) c.cfgGRPC.dropConnection(e) - scheduleReconnect(e, c) + scheduleReconnect(ctx, e, c) } }() }) } -func stopGRPC(name string, s *grpc.Server, l *logger.Logger) { +func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger) { l = l.With(zap.String("name", name)) - l.Info(context.Background(), logs.FrostFSNodeStoppingGRPCServer) + l.Info(ctx, logs.FrostFSNodeStoppingGRPCServer) // GracefulStop() may freeze forever, see #1270 done := make(chan struct{}) @@ -218,9 +218,9 @@ func stopGRPC(name string, s *grpc.Server, l *logger.Logger) { select { case <-done: case <-time.After(1 * time.Minute): - l.Info(context.Background(), logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop) + l.Info(ctx, logs.FrostFSNodeGRPCCannotShutdownGracefullyForcingStop) s.Stop() } - l.Info(context.Background(), logs.FrostFSNodeGRPCServerStoppedSuccessfully) + l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully) } diff --git a/cmd/frostfs-node/httpcomponent.go b/cmd/frostfs-node/httpcomponent.go index a699a72a1..7346206ef 100644 --- a/cmd/frostfs-node/httpcomponent.go +++ b/cmd/frostfs-node/httpcomponent.go @@ -20,9 +20,9 @@ type httpComponent struct { preReload func(c *cfg) } -func (cmp *httpComponent) init(c *cfg) { +func (cmp *httpComponent) init(ctx context.Context, c *cfg) { if !cmp.enabled { - c.log.Info(context.Background(), cmp.name+" is disabled") + c.log.Info(ctx, cmp.name+" is disabled") return } // Init server with parameters @@ -39,14 +39,14 @@ func (cmp *httpComponent) init(c *cfg) { go func() { defer c.wg.Done() - c.log.Info(context.Background(), logs.FrostFSNodeStartListeningEndpoint, + c.log.Info(ctx, logs.FrostFSNodeStartListeningEndpoint, zap.String("service", cmp.name), zap.String("endpoint", cmp.address)) fatalOnErr(srv.Serve()) }() c.closers = append(c.closers, closer{ cmp.name, - func() { stopAndLog(c, cmp.name, srv.Shutdown) }, + func() { stopAndLog(ctx, c, cmp.name, srv.Shutdown) }, }) } @@ -62,7 +62,7 @@ func (cmp *httpComponent) reload(ctx context.Context) error { // Cleanup delCloser(cmp.cfg, cmp.name) // Init server with new parameters - cmp.init(cmp.cfg) + cmp.init(ctx, cmp.cfg) // Start worker if cmp.enabled { startWorker(ctx, cmp.cfg, *getWorker(cmp.cfg, cmp.name)) diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go index cd42d5f1d..f8854ab3c 100644 --- a/cmd/frostfs-node/main.go +++ b/cmd/frostfs-node/main.go @@ -61,21 +61,21 @@ func main() { var ctx context.Context ctx, c.ctxCancel = context.WithCancel(context.Background()) - c.setHealthStatus(control.HealthStatus_STARTING) + c.setHealthStatus(ctx, control.HealthStatus_STARTING) initApp(ctx, c) bootUp(ctx, c) - c.compareAndSwapHealthStatus(control.HealthStatus_STARTING, control.HealthStatus_READY) + c.compareAndSwapHealthStatus(ctx, control.HealthStatus_STARTING, control.HealthStatus_READY) wait(c) } -func initAndLog(c *cfg, name string, initializer func(*cfg)) { - c.log.Info(context.Background(), fmt.Sprintf("initializing %s service...", name)) +func initAndLog(ctx context.Context, c *cfg, name string, initializer func(*cfg)) { + c.log.Info(ctx, fmt.Sprintf("initializing %s service...", name)) initializer(c) - c.log.Info(context.Background(), name+" service has been successfully initialized") + c.log.Info(ctx, name+" service has been successfully initialized") } func initApp(ctx context.Context, c *cfg) { @@ -85,38 +85,38 @@ func initApp(ctx context.Context, c *cfg) { c.wg.Done() }() - setRuntimeParameters(c) + setRuntimeParameters(ctx, c) metrics, _ := metricsComponent(c) - initAndLog(c, "profiler", initProfilerService) - initAndLog(c, metrics.name, metrics.init) + initAndLog(ctx, c, "profiler", func(c *cfg) { initProfilerService(ctx, c) }) + initAndLog(ctx, c, metrics.name, func(c *cfg) { metrics.init(ctx, c) }) - initAndLog(c, "tracing", func(c *cfg) { initTracing(ctx, c) }) + initAndLog(ctx, c, "tracing", func(c *cfg) { initTracing(ctx, c) }) initLocalStorage(ctx, c) - initAndLog(c, "storage engine", func(c *cfg) { + initAndLog(ctx, c, "storage engine", func(c *cfg) { fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Open(ctx)) fatalOnErr(c.cfgObject.cfgLocalStorage.localStorage.Init(ctx)) }) - initAndLog(c, "gRPC", initGRPC) - initAndLog(c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) + initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) }) + initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) initAccessPolicyEngine(ctx, c) - initAndLog(c, "access policy engine", func(c *cfg) { + initAndLog(ctx, c, "access policy engine", func(c *cfg) { fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Open(ctx)) fatalOnErr(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalOverrideDatabaseCore().Init()) }) - initAndLog(c, "accounting", func(c *cfg) { initAccountingService(ctx, c) }) - initAndLog(c, "container", func(c *cfg) { initContainerService(ctx, c) }) - initAndLog(c, "session", initSessionService) - initAndLog(c, "object", initObjectService) - initAndLog(c, "tree", initTreeService) - initAndLog(c, "apemanager", initAPEManagerService) - initAndLog(c, "control", initControlService) + initAndLog(ctx, c, "accounting", func(c *cfg) { initAccountingService(ctx, c) }) + initAndLog(ctx, c, "container", func(c *cfg) { initContainerService(ctx, c) }) + initAndLog(ctx, c, "session", initSessionService) + initAndLog(ctx, c, "object", initObjectService) + initAndLog(ctx, c, "tree", initTreeService) + initAndLog(ctx, c, "apemanager", initAPEManagerService) + initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) }) - initAndLog(c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) }) + initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) }) } func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starter func(context.Context, *cfg)) { @@ -128,24 +128,24 @@ func runAndLog(ctx context.Context, c *cfg, name string, logSuccess bool, starte } } -func stopAndLog(c *cfg, name string, stopper func() error) { - c.log.Debug(context.Background(), fmt.Sprintf("shutting down %s service", name)) +func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.Context) error) { + c.log.Debug(ctx, fmt.Sprintf("shutting down %s service", name)) - err := stopper() + err := stopper(ctx) if err != nil { - c.log.Debug(context.Background(), fmt.Sprintf("could not shutdown %s server", name), + c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name), zap.String("error", err.Error()), ) } - c.log.Debug(context.Background(), name+" service has been stopped") + c.log.Debug(ctx, name+" service has been stopped") } func bootUp(ctx context.Context, c *cfg) { - runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(c) }) + runAndLog(ctx, c, "gRPC", false, func(_ context.Context, c *cfg) { serveGRPC(ctx, c) }) runAndLog(ctx, c, "notary", true, makeAndWaitNotaryDeposit) - bootstrapNode(c) + bootstrapNode(ctx, c) startWorkers(ctx, c) } diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 3e010b181..ca9f4fe3e 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -129,7 +129,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { return } - tx, vub, err := makeNotaryDeposit(c) + tx, vub, err := makeNotaryDeposit(ctx, c) fatalOnErr(err) if tx.Equals(util.Uint256{}) { @@ -144,7 +144,7 @@ func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { fatalOnErr(err) } -func makeNotaryDeposit(c *cfg) (util.Uint256, uint32, error) { +func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error) { const ( // gasMultiplier defines how many times more the notary // balance must be compared to the GAS balance of the node: @@ -161,7 +161,7 @@ func makeNotaryDeposit(c *cfg) (util.Uint256, uint32, error) { return util.Uint256{}, 0, fmt.Errorf("could not calculate notary deposit: %w", err) } - return c.cfgMorph.client.DepositEndlessNotary(depositAmount) + return c.cfgMorph.client.DepositEndlessNotary(ctx, depositAmount) } var ( @@ -256,7 +256,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { registerNotificationHandlers(c.cfgNetmap.scriptHash, lis, c.cfgNetmap.parsers, c.cfgNetmap.subscribers) registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers) - registerBlockHandler(lis, func(block *block.Block) { + registerBlockHandler(lis, func(ctx context.Context, block *block.Block) { c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index) diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 35ab4d575..6df947954 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -145,7 +145,7 @@ func initNetmapService(ctx context.Context, c *cfg) { c.initMorphComponents(ctx) - initNetmapState(c) + initNetmapState(ctx, c) server := netmapTransportGRPC.New( netmapService.NewSignService( @@ -182,20 +182,20 @@ func addNewEpochNotificationHandlers(c *cfg) { addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, ev event.Event) { e := ev.(netmapEvent.NewEpoch).EpochNumber() - c.updateContractNodeInfo(e) + c.updateContractNodeInfo(ctx, e) if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 return } - if err := c.bootstrap(); err != nil { + if err := c.bootstrap(ctx); err != nil { c.log.Warn(ctx, logs.FrostFSNodeCantSendRebootstrapTx, zap.Error(err)) } }) if c.cfgMorph.notaryEnabled { addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) { - _, _, err := makeNotaryDeposit(c) + _, _, err := makeNotaryDeposit(ctx, c) if err != nil { c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit, zap.String("error", err.Error()), @@ -207,13 +207,13 @@ func addNewEpochNotificationHandlers(c *cfg) { // bootstrapNode adds current node to the Network map. // Must be called after initNetmapService. -func bootstrapNode(c *cfg) { +func bootstrapNode(ctx context.Context, c *cfg) { if c.needBootstrap() { if c.IsMaintenance() { - c.log.Info(context.Background(), logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) + c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) return } - err := c.bootstrap() + err := c.bootstrap(ctx) fatalOnErrDetails("bootstrap error", err) } } @@ -240,17 +240,17 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser // initNetmapState inits current Network map state. // Must be called after Morph components initialization. -func initNetmapState(c *cfg) { +func initNetmapState(ctx context.Context, c *cfg) { epoch, err := c.cfgNetmap.wrapper.Epoch() fatalOnErrDetails("could not initialize current epoch number", err) var ni *netmapSDK.NodeInfo - ni, err = c.netmapInitLocalNodeState(epoch) + ni, err = c.netmapInitLocalNodeState(ctx, epoch) fatalOnErrDetails("could not init network state", err) stateWord := nodeState(ni) - c.log.Info(context.Background(), logs.FrostFSNodeInitialNetworkState, + c.log.Info(ctx, logs.FrostFSNodeInitialNetworkState, zap.Uint64("epoch", epoch), zap.String("state", stateWord), ) @@ -279,7 +279,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string { return "undefined" } -func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) { +func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { nmNodes, err := c.cfgNetmap.wrapper.GetCandidates() if err != nil { return nil, err @@ -307,7 +307,7 @@ func (c *cfg) netmapInitLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error if nmState != candidateState { // This happens when the node was switched to maintenance without epoch tick. // We expect it to continue staying in maintenance. - c.log.Info(context.Background(), logs.CandidateStatusPriority, + c.log.Info(ctx, logs.CandidateStatusPriority, zap.String("netmap", nmState), zap.String("candidate", candidateState)) } @@ -353,16 +353,16 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) { var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode") -func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error { +func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error { switch st { default: return fmt.Errorf("unsupported status %v", st) case control.NetmapStatus_MAINTENANCE: - return c.setMaintenanceStatus(false) + return c.setMaintenanceStatus(ctx, false) case control.NetmapStatus_ONLINE, control.NetmapStatus_OFFLINE: } - c.stopMaintenance() + c.stopMaintenance(ctx) if !c.needBootstrap() { return errRelayBootstrap @@ -370,12 +370,12 @@ func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error { if st == control.NetmapStatus_ONLINE { c.cfgNetmap.reBoostrapTurnedOff.Store(false) - return bootstrapOnline(c) + return bootstrapOnline(ctx, c) } c.cfgNetmap.reBoostrapTurnedOff.Store(true) - return c.updateNetMapState(func(*nmClient.UpdatePeerPrm) {}) + return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {}) } func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) { @@ -387,11 +387,11 @@ func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) { return st, epoch, nil } -func (c *cfg) ForceMaintenance() error { - return c.setMaintenanceStatus(true) +func (c *cfg) ForceMaintenance(ctx context.Context) error { + return c.setMaintenanceStatus(ctx, true) } -func (c *cfg) setMaintenanceStatus(force bool) error { +func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error { netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration() if err != nil { err = fmt.Errorf("read network settings to check maintenance allowance: %w", err) @@ -400,10 +400,10 @@ func (c *cfg) setMaintenanceStatus(force bool) error { } if err == nil || force { - c.startMaintenance() + c.startMaintenance(ctx) if err == nil { - err = c.updateNetMapState((*nmClient.UpdatePeerPrm).SetMaintenance) + err = c.updateNetMapState(ctx, (*nmClient.UpdatePeerPrm).SetMaintenance) } if err != nil { @@ -416,12 +416,12 @@ func (c *cfg) setMaintenanceStatus(force bool) error { // calls UpdatePeerState operation of Netmap contract's client for the local node. // State setter is used to specify node state to switch to. -func (c *cfg) updateNetMapState(stateSetter func(*nmClient.UpdatePeerPrm)) error { +func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient.UpdatePeerPrm)) error { var prm nmClient.UpdatePeerPrm prm.SetKey(c.key.PublicKey().Bytes()) stateSetter(&prm) - _, err := c.cfgNetmap.wrapper.UpdatePeerState(prm) + _, err := c.cfgNetmap.wrapper.UpdatePeerState(ctx, prm) return err } diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index cad6d5ee3..c4205a620 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -66,11 +66,11 @@ func (c *cfg) MaxObjectSize() uint64 { return sz } -func (s *objectSvc) Put() (objectService.PutObjectStream, error) { +func (s *objectSvc) Put(_ context.Context) (objectService.PutObjectStream, error) { return s.put.Put() } -func (s *objectSvc) Patch() (objectService.PatchObjectStream, error) { +func (s *objectSvc) Patch(_ context.Context) (objectService.PatchObjectStream, error) { return s.patch.Patch() } diff --git a/cmd/frostfs-node/pprof.go b/cmd/frostfs-node/pprof.go index dcd320146..5b40c8a88 100644 --- a/cmd/frostfs-node/pprof.go +++ b/cmd/frostfs-node/pprof.go @@ -1,17 +1,18 @@ package main import ( + "context" "runtime" profilerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/profiler" httputil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/http" ) -func initProfilerService(c *cfg) { +func initProfilerService(ctx context.Context, c *cfg) { tuneProfilers(c) pprof, _ := pprofComponent(c) - pprof.init(c) + pprof.init(ctx, c) } func pprofComponent(c *cfg) (*httpComponent, bool) { diff --git a/cmd/frostfs-node/runtime.go b/cmd/frostfs-node/runtime.go index 0b1000e70..f6d398574 100644 --- a/cmd/frostfs-node/runtime.go +++ b/cmd/frostfs-node/runtime.go @@ -10,17 +10,17 @@ import ( "go.uber.org/zap" ) -func setRuntimeParameters(c *cfg) { +func setRuntimeParameters(ctx context.Context, c *cfg) { if len(os.Getenv("GOMEMLIMIT")) != 0 { // default limit < yaml limit < app env limit < GOMEMLIMIT - c.log.Warn(context.Background(), logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT) + c.log.Warn(ctx, logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT) return } memLimitBytes := runtime.GCMemoryLimitBytes(c.appCfg) previous := debug.SetMemoryLimit(memLimitBytes) if memLimitBytes != previous { - c.log.Info(context.Background(), logs.RuntimeSoftMemoryLimitUpdated, + c.log.Info(ctx, logs.RuntimeSoftMemoryLimitUpdated, zap.Int64("new_value", memLimitBytes), zap.Int64("old_value", previous)) } diff --git a/internal/audit/request.go b/internal/audit/request.go index 15a4a7960..17666ab4b 100644 --- a/internal/audit/request.go +++ b/internal/audit/request.go @@ -19,15 +19,15 @@ type Target interface { String() string } -func LogRequest(log *logger.Logger, operation string, req Request, target Target, status bool) { +func LogRequest(ctx context.Context, log *logger.Logger, operation string, req Request, target Target, status bool) { var key []byte if req != nil { key = req.GetVerificationHeader().GetBodySignature().GetKey() } - LogRequestWithKey(log, operation, key, target, status) + LogRequestWithKey(ctx, log, operation, key, target, status) } -func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target Target, status bool) { +func LogRequestWithKey(ctx context.Context, log *logger.Logger, operation string, key []byte, target Target, status bool) { object, subject := NotDefined, NotDefined publicKey := crypto.UnmarshalPublicKey(key) @@ -39,7 +39,7 @@ func LogRequestWithKey(log *logger.Logger, operation string, key []byte, target object = target.String() } - log.Info(context.Background(), logs.AuditEventLogRecord, + log.Info(ctx, logs.AuditEventLogRecord, zap.String("operation", operation), zap.String("object", object), zap.String("subject", subject), diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go index 5bc5c8bea..19b5d34e4 100644 --- a/pkg/core/object/fmt.go +++ b/pkg/core/object/fmt.go @@ -117,7 +117,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u } if !unprepared { - if err := v.validateSignatureKey(obj); err != nil { + if err := v.validateSignatureKey(ctx, obj); err != nil { return fmt.Errorf("(%T) could not validate signature key: %w", v, err) } @@ -134,7 +134,7 @@ func (v *FormatValidator) Validate(ctx context.Context, obj *objectSDK.Object, u return nil } -func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error { +func (v *FormatValidator) validateSignatureKey(ctx context.Context, obj *objectSDK.Object) error { sig := obj.Signature() if sig == nil { return errMissingSignature @@ -156,7 +156,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error { ownerID := obj.OwnerID() if token == nil && obj.ECHeader() != nil { - role, err := v.isIROrContainerNode(obj, binKey) + role, err := v.isIROrContainerNode(ctx, obj, binKey) if err != nil { return err } @@ -172,7 +172,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error { } if v.verifyTokenIssuer { - role, err := v.isIROrContainerNode(obj, binKey) + role, err := v.isIROrContainerNode(ctx, obj, binKey) if err != nil { return err } @@ -190,7 +190,7 @@ func (v *FormatValidator) validateSignatureKey(obj *objectSDK.Object) error { return nil } -func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey []byte) (acl.Role, error) { +func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSDK.Object, signerKey []byte) (acl.Role, error) { cnrID, containerIDSet := obj.ContainerID() if !containerIDSet { return acl.RoleOthers, errNilCID @@ -204,7 +204,7 @@ func (v *FormatValidator) isIROrContainerNode(obj *objectSDK.Object, signerKey [ return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err) } - res, err := v.senderClassifier.IsInnerRingOrContainerNode(signerKey, cnrID, cnr.Value) + res, err := v.senderClassifier.IsInnerRingOrContainerNode(ctx, signerKey, cnrID, cnr.Value) if err != nil { return acl.RoleOthers, err } diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go index ed438c0b9..3b3650134 100644 --- a/pkg/core/object/sender_classifier.go +++ b/pkg/core/object/sender_classifier.go @@ -41,6 +41,7 @@ type ClassifyResult struct { } func (c SenderClassifier) Classify( + ctx context.Context, ownerID *user.ID, ownerKey *keys.PublicKey, idCnr cid.ID, @@ -58,14 +59,14 @@ func (c SenderClassifier) Classify( }, nil } - return c.IsInnerRingOrContainerNode(ownerKeyInBytes, idCnr, cnr) + return c.IsInnerRingOrContainerNode(ctx, ownerKeyInBytes, idCnr, cnr) } -func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) { +func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) { isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes) if err != nil { // do not throw error, try best case matching - c.log.Debug(context.Background(), logs.V2CantCheckIfRequestFromInnerRing, + c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing, zap.String("error", err.Error())) } else if isInnerRingNode { return &ClassifyResult{ @@ -82,7 +83,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ownerKeyInBytes []byte, idC // error might happen if request has `RoleOther` key and placement // is not possible for previous epoch, so // do not throw error, try best case matching - c.log.Debug(context.Background(), logs.V2CantCheckIfRequestFromContainerNode, + c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode, zap.String("error", err.Error())) } else if isContainerNode { return &ClassifyResult{ diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go index 3db504368..3f9d8df5f 100644 --- a/pkg/innerring/blocktimer.go +++ b/pkg/innerring/blocktimer.go @@ -29,7 +29,7 @@ type ( emitDuration uint32 // in blocks } - depositor func() (util.Uint256, error) + depositor func(context.Context) (util.Uint256, error) awaiter func(context.Context, util.Uint256) error ) @@ -66,11 +66,11 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { ) } -func newEmissionTimer(args *emitTimerArgs) *timer.BlockTimer { +func newEmissionTimer(ctx context.Context, args *emitTimerArgs) *timer.BlockTimer { return timer.NewBlockTimer( timer.StaticBlockMeter(args.emitDuration), func() { - args.ap.HandleGasEmission(timerEvent.NewAlphabetEmitTick{}) + args.ap.HandleGasEmission(ctx, timerEvent.NewAlphabetEmitTick{}) }, ) } diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index e08a613c3..25f4ff034 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -35,7 +35,7 @@ import ( "google.golang.org/grpc" ) -func (s *Server) initNetmapProcessor(cfg *viper.Viper, +func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, alphaSync event.Handler, ) error { locodeValidator, err := s.newLocodeValidator(cfg) @@ -48,10 +48,13 @@ func (s *Server) initNetmapProcessor(cfg *viper.Viper, var netMapCandidateStateValidator statevalidation.NetMapCandidateValidator netMapCandidateStateValidator.SetNetworkSettings(netSettings) + poolSize := cfg.GetInt("workers.netmap") + s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize)) + s.netmapProcessor, err = netmap.New(&netmap.Params{ Log: s.log, Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.netmap"), + PoolSize: poolSize, NetmapClient: netmap.NewNetmapClient(s.netmapClient), EpochTimer: s, EpochState: s, @@ -205,7 +208,7 @@ func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) { s.addBlockTimer(s.epochTimer) // initialize emission timer - emissionTimer := newEmissionTimer(&emitTimerArgs{ + emissionTimer := newEmissionTimer(ctx, &emitTimerArgs{ ap: s.alphabetProcessor, emitDuration: cfg.GetUint32("timers.emit"), }) @@ -213,18 +216,20 @@ func (s *Server) initTimers(ctx context.Context, cfg *viper.Viper) { s.addBlockTimer(emissionTimer) } -func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error { +func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) error { parsedWallets, err := parseWalletAddressesFromStrings(cfg.GetStringSlice("emit.extra_wallets")) if err != nil { return err } + poolSize := cfg.GetInt("workers.alphabet") + s.log.Debug(ctx, logs.AlphabetAlphabetWorkerPool, zap.Int("size", poolSize)) // create alphabet processor s.alphabetProcessor, err = alphabet.New(&alphabet.Params{ ParsedWallets: parsedWallets, Log: s.log, Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.alphabet"), + PoolSize: poolSize, AlphabetContracts: s.contracts.alphabet, NetmapClient: s.netmapClient, MorphClient: s.morphClient, @@ -239,12 +244,14 @@ func (s *Server) initAlphabetProcessor(cfg *viper.Viper) error { return err } -func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error { +func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, cnrClient *container.Client, frostfsIDClient *frostfsid.Client) error { + poolSize := cfg.GetInt("workers.container") + s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize)) // container processor containerProcessor, err := cont.New(&cont.Params{ Log: s.log, Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.container"), + PoolSize: poolSize, AlphabetState: s, ContainerClient: cnrClient, MorphClient: cnrClient.Morph(), @@ -258,12 +265,14 @@ func (s *Server) initContainerProcessor(cfg *viper.Viper, cnrClient *container.C return bindMorphProcessor(containerProcessor, s) } -func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClient.Client) error { +func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, frostfsCli *frostfsClient.Client) error { + poolSize := cfg.GetInt("workers.balance") + s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize)) // create balance processor balanceProcessor, err := balance.New(&balance.Params{ Log: s.log, Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.balance"), + PoolSize: poolSize, FrostFSClient: frostfsCli, BalanceSC: s.contracts.balance, AlphabetState: s, @@ -276,15 +285,17 @@ func (s *Server) initBalanceProcessor(cfg *viper.Viper, frostfsCli *frostfsClien return bindMorphProcessor(balanceProcessor, s) } -func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error { +func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Viper) error { if s.withoutMainNet { return nil } + poolSize := cfg.GetInt("workers.frostfs") + s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize)) frostfsProcessor, err := frostfs.New(&frostfs.Params{ Log: s.log, Metrics: s.irMetrics, - PoolSize: cfg.GetInt("workers.frostfs"), + PoolSize: poolSize, FrostFSContract: s.contracts.frostfs, BalanceClient: s.balanceClient, NetmapClient: s.netmapClient, @@ -304,10 +315,10 @@ func (s *Server) initFrostFSMainnetProcessor(cfg *viper.Viper) error { return bindMainnetProcessor(frostfsProcessor, s) } -func (s *Server) initGRPCServer(cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error { +func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logger.Logger, audit *atomic.Bool) error { controlSvcEndpoint := cfg.GetString("control.grpc.endpoint") if controlSvcEndpoint == "" { - s.log.Info(context.Background(), logs.InnerringNoControlServerEndpointSpecified) + s.log.Info(ctx, logs.InnerringNoControlServerEndpointSpecified) return nil } @@ -403,7 +414,7 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) { return result, nil } -func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClients) error { +func (s *Server) initProcessors(ctx context.Context, cfg *viper.Viper, morphClients *serverMorphClients) error { irf := s.createIRFetcher() s.statusIndex = newInnerRingIndexer( @@ -418,27 +429,27 @@ func (s *Server) initProcessors(cfg *viper.Viper, morphClients *serverMorphClien return err } - err = s.initNetmapProcessor(cfg, alphaSync) + err = s.initNetmapProcessor(ctx, cfg, alphaSync) if err != nil { return err } - err = s.initContainerProcessor(cfg, morphClients.CnrClient, morphClients.FrostFSIDClient) + err = s.initContainerProcessor(ctx, cfg, morphClients.CnrClient, morphClients.FrostFSIDClient) if err != nil { return err } - err = s.initBalanceProcessor(cfg, morphClients.FrostFSClient) + err = s.initBalanceProcessor(ctx, cfg, morphClients.FrostFSClient) if err != nil { return err } - err = s.initFrostFSMainnetProcessor(cfg) + err = s.initFrostFSMainnetProcessor(ctx, cfg) if err != nil { return err } - err = s.initAlphabetProcessor(cfg) + err = s.initAlphabetProcessor(ctx, cfg) return err } diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index e81ec6bca..4fe9cc084 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -157,7 +157,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { return err } - if s.IsAlphabet() { + if s.IsAlphabet(ctx) { err = s.initMainNotary(ctx) if err != nil { return err @@ -217,14 +217,14 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { } func (s *Server) registerMorphNewBlockEventHandler() { - s.morphListener.RegisterBlockHandler(func(b *block.Block) { - s.log.Debug(context.Background(), logs.InnerringNewBlock, + s.morphListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) { + s.log.Debug(ctx, logs.InnerringNewBlock, zap.Uint32("index", b.Index), ) err := s.persistate.SetUInt32(persistateSideChainLastBlockKey, b.Index) if err != nil { - s.log.Warn(context.Background(), logs.InnerringCantUpdatePersistentState, + s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", b.Index)) } @@ -235,10 +235,10 @@ func (s *Server) registerMorphNewBlockEventHandler() { func (s *Server) registerMainnetNewBlockEventHandler() { if !s.withoutMainNet { - s.mainnetListener.RegisterBlockHandler(func(b *block.Block) { + s.mainnetListener.RegisterBlockHandler(func(ctx context.Context, b *block.Block) { err := s.persistate.SetUInt32(persistateMainChainLastBlockKey, b.Index) if err != nil { - s.log.Warn(context.Background(), logs.InnerringCantUpdatePersistentState, + s.log.Warn(ctx, logs.InnerringCantUpdatePersistentState, zap.String("chain", "main"), zap.Uint32("block_index", b.Index)) } @@ -400,14 +400,14 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan return nil, err } - err = server.initProcessors(cfg, morphClients) + err = server.initProcessors(ctx, cfg, morphClients) if err != nil { return nil, err } server.initTimers(ctx, cfg) - err = server.initGRPCServer(cfg, log, audit) + err = server.initGRPCServer(ctx, cfg, log, audit) if err != nil { return nil, err } @@ -604,7 +604,7 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error { s.log.Debug(ctx, logs.InnerringReadConfigFromBlockchain, zap.Bool("active", s.IsActive(ctx)), - zap.Bool("alphabet", s.IsAlphabet()), + zap.Bool("alphabet", s.IsAlphabet(ctx)), zap.Uint64("epoch", epoch), zap.Uint32("precision", balancePrecision), zap.Uint32("init_epoch_tick_delta", s.initialEpochTickDelta), @@ -636,7 +636,7 @@ func (s *Server) nextEpochBlockDelta() (uint32, error) { // only if inner ring node is alphabet node. func (s *Server) onlyAlphabetEventHandler(f event.Handler) event.Handler { return func(ctx context.Context, ev event.Event) { - if s.IsAlphabet() { + if s.IsAlphabet(ctx) { f(ctx, ev) } } diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go index dd3afa2c2..c8a69962f 100644 --- a/pkg/innerring/notary.go +++ b/pkg/innerring/notary.go @@ -28,37 +28,38 @@ const ( gasDivisor = 2 ) -func (s *Server) depositMainNotary() (tx util.Uint256, err error) { +func (s *Server) depositMainNotary(ctx context.Context) (tx util.Uint256, err error) { depositAmount, err := client.CalculateNotaryDepositAmount(s.mainnetClient, gasMultiplier, gasDivisor) if err != nil { return util.Uint256{}, fmt.Errorf("could not calculate main notary deposit amount: %w", err) } return s.mainnetClient.DepositNotary( + ctx, depositAmount, uint32(s.epochDuration.Load())+notaryExtraBlocks, ) } -func (s *Server) depositSideNotary() (util.Uint256, error) { +func (s *Server) depositSideNotary(ctx context.Context) (util.Uint256, error) { depositAmount, err := client.CalculateNotaryDepositAmount(s.morphClient, gasMultiplier, gasDivisor) if err != nil { return util.Uint256{}, fmt.Errorf("could not calculate side notary deposit amount: %w", err) } - tx, _, err := s.morphClient.DepositEndlessNotary(depositAmount) + tx, _, err := s.morphClient.DepositEndlessNotary(ctx, depositAmount) return tx, err } func (s *Server) notaryHandler(ctx context.Context, _ event.Event) { if !s.mainNotaryConfig.disabled { - _, err := s.depositMainNotary() + _, err := s.depositMainNotary(ctx) if err != nil { s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInMainChain, zap.Error(err)) } } - if _, err := s.depositSideNotary(); err != nil { + if _, err := s.depositSideNotary(ctx); err != nil { s.log.Error(ctx, logs.InnerringCantMakeNotaryDepositInSideChain, zap.Error(err)) } } @@ -72,7 +73,7 @@ func (s *Server) awaitSideNotaryDeposit(ctx context.Context, tx util.Uint256) er } func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaiter, msg string) error { - tx, err := deposit() + tx, err := deposit(ctx) if err != nil { return err } diff --git a/pkg/innerring/processors/alphabet/handlers.go b/pkg/innerring/processors/alphabet/handlers.go index 0cc2a5f39..d6b653282 100644 --- a/pkg/innerring/processors/alphabet/handlers.go +++ b/pkg/innerring/processors/alphabet/handlers.go @@ -10,16 +10,16 @@ import ( "go.uber.org/zap" ) -func (ap *Processor) HandleGasEmission(ev event.Event) { +func (ap *Processor) HandleGasEmission(ctx context.Context, ev event.Event) { _ = ev.(timers.NewAlphabetEmitTick) - ap.log.Info(context.Background(), logs.AlphabetTick, zap.String("type", "alphabet gas emit")) + ap.log.Info(ctx, logs.AlphabetTick, zap.String("type", "alphabet gas emit")) // send event to the worker pool - err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", ap.processEmit) + err := processors.SubmitEvent(ap.pool, ap.metrics, "alphabet_emit_gas", func() bool { return ap.processEmit(ctx) }) if err != nil { // there system can be moved into controlled degradation stage - ap.log.Warn(context.Background(), logs.AlphabetAlphabetProcessorWorkerPoolDrained, + ap.log.Warn(ctx, logs.AlphabetAlphabetProcessorWorkerPoolDrained, zap.Int("capacity", ap.pool.Cap())) } } diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go index c7a004b54..036b8055c 100644 --- a/pkg/innerring/processors/alphabet/handlers_test.go +++ b/pkg/innerring/processors/alphabet/handlers_test.go @@ -1,6 +1,7 @@ package alphabet_test import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" @@ -60,7 +61,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) { processor, err := alphabet.New(params) require.NoError(t, err, "failed to create processor instance") - processor.HandleGasEmission(timers.NewAlphabetEmitTick{}) + processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{}) processor.WaitPoolRunning() @@ -137,7 +138,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) { processor, err := alphabet.New(params) require.NoError(t, err, "failed to create processor instance") - processor.HandleGasEmission(timers.NewAlphabetEmitTick{}) + processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{}) processor.WaitPoolRunning() @@ -198,7 +199,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) { processor, err := alphabet.New(params) require.NoError(t, err, "failed to create processor instance") - processor.HandleGasEmission(timers.NewAlphabetEmitTick{}) + processor.HandleGasEmission(context.Background(), timers.NewAlphabetEmitTick{}) processor.WaitPoolRunning() @@ -219,7 +220,7 @@ type testIndexer struct { index int } -func (i *testIndexer) AlphabetIndex() int { +func (i *testIndexer) AlphabetIndex(context.Context) int { return i.index } @@ -246,7 +247,7 @@ type testMorphClient struct { batchTransferedGas []batchTransferGas } -func (c *testMorphClient) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) { +func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) { c.invokedMethods = append(c.invokedMethods, invokedMethod{ contract: contract, diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go index 142409631..229261250 100644 --- a/pkg/innerring/processors/alphabet/process_emit.go +++ b/pkg/innerring/processors/alphabet/process_emit.go @@ -14,39 +14,39 @@ import ( const emitMethod = "emit" -func (ap *Processor) processEmit() bool { - index := ap.irList.AlphabetIndex() +func (ap *Processor) processEmit(ctx context.Context) bool { + index := ap.irList.AlphabetIndex(ctx) if index < 0 { - ap.log.Info(context.Background(), logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent) + ap.log.Info(ctx, logs.AlphabetNonAlphabetModeIgnoreGasEmissionEvent) return true } contract, ok := ap.alphabetContracts.GetByIndex(index) if !ok { - ap.log.Debug(context.Background(), logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent, + ap.log.Debug(ctx, logs.AlphabetNodeIsOutOfAlphabetRangeIgnoreGasEmissionEvent, zap.Int("index", index)) return false } // there is no signature collecting, so we don't need extra fee - _, err := ap.morphClient.Invoke(contract, 0, emitMethod) + _, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod) if err != nil { - ap.log.Warn(context.Background(), logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error())) + ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error())) return false } if ap.storageEmission == 0 { - ap.log.Info(context.Background(), logs.AlphabetStorageNodeEmissionIsOff) + ap.log.Info(ctx, logs.AlphabetStorageNodeEmissionIsOff) return true } networkMap, err := ap.netmapClient.NetMap() if err != nil { - ap.log.Warn(context.Background(), logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, + ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, zap.String("error", err.Error())) return false @@ -59,7 +59,7 @@ func (ap *Processor) processEmit() bool { ap.pwLock.RUnlock() extraLen := len(pw) - ap.log.Debug(context.Background(), logs.AlphabetGasEmission, + ap.log.Debug(ctx, logs.AlphabetGasEmission, zap.Int("network_map", nmLen), zap.Int("extra_wallets", extraLen)) @@ -69,20 +69,20 @@ func (ap *Processor) processEmit() bool { gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(nmLen+extraLen)) - ap.transferGasToNetmapNodes(nmNodes, gasPerNode) + ap.transferGasToNetmapNodes(ctx, nmNodes, gasPerNode) - ap.transferGasToExtraNodes(pw, gasPerNode) + ap.transferGasToExtraNodes(ctx, pw, gasPerNode) return true } -func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) { +func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []netmap.NodeInfo, gasPerNode fixedn.Fixed8) { for i := range nmNodes { keyBytes := nmNodes[i].PublicKey() key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256()) if err != nil { - ap.log.Warn(context.Background(), logs.AlphabetCantParseNodePublicKey, + ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey, zap.String("error", err.Error())) continue @@ -90,7 +90,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN err = ap.morphClient.TransferGas(key.GetScriptHash(), gasPerNode) if err != nil { - ap.log.Warn(context.Background(), logs.AlphabetCantTransferGas, + ap.log.Warn(ctx, logs.AlphabetCantTransferGas, zap.String("receiver", key.Address()), zap.Int64("amount", int64(gasPerNode)), zap.String("error", err.Error()), @@ -99,7 +99,7 @@ func (ap *Processor) transferGasToNetmapNodes(nmNodes []netmap.NodeInfo, gasPerN } } -func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixedn.Fixed8) { +func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint160, gasPerNode fixedn.Fixed8) { if len(pw) > 0 { err := ap.morphClient.BatchTransferGas(pw, gasPerNode) if err != nil { @@ -107,7 +107,7 @@ func (ap *Processor) transferGasToExtraNodes(pw []util.Uint160, gasPerNode fixed for i, addr := range pw { receiversLog[i] = addr.StringLE() } - ap.log.Warn(context.Background(), logs.AlphabetCantTransferGasToWallet, + ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet, zap.Strings("receivers", receiversLog), zap.Int64("amount", int64(gasPerNode)), zap.String("error", err.Error()), diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go index 8dbef1e20..bf74834ed 100644 --- a/pkg/innerring/processors/alphabet/processor.go +++ b/pkg/innerring/processors/alphabet/processor.go @@ -7,7 +7,6 @@ import ( "sync" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -15,13 +14,12 @@ import ( "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( // Indexer is a callback interface for inner ring global state. Indexer interface { - AlphabetIndex() int + AlphabetIndex(context.Context) int } // Contracts is an interface of the storage @@ -41,7 +39,7 @@ type ( } morphClient interface { - Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) + Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error } @@ -86,8 +84,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/alphabet: global state is not set") } - p.Log.Debug(context.Background(), logs.AlphabetAlphabetWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err) diff --git a/pkg/innerring/processors/balance/handlers.go b/pkg/innerring/processors/balance/handlers.go index 5a89e6f7c..b5d05a02e 100644 --- a/pkg/innerring/processors/balance/handlers.go +++ b/pkg/innerring/processors/balance/handlers.go @@ -20,7 +20,7 @@ func (bp *Processor) handleLock(ctx context.Context, ev event.Event) { // send an event to the worker pool err := processors.SubmitEvent(bp.pool, bp.metrics, "lock", func() bool { - return bp.processLock(&lock) + return bp.processLock(ctx, &lock) }) if err != nil { // there system can be moved into controlled degradation stage diff --git a/pkg/innerring/processors/balance/handlers_test.go b/pkg/innerring/processors/balance/handlers_test.go index 3ef4959cc..0fd23d8ab 100644 --- a/pkg/innerring/processors/balance/handlers_test.go +++ b/pkg/innerring/processors/balance/handlers_test.go @@ -70,7 +70,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } @@ -84,7 +84,7 @@ type testFrostFSContractClient struct { chequeCalls int } -func (c *testFrostFSContractClient) Cheque(p frostfscontract.ChequePrm) error { +func (c *testFrostFSContractClient) Cheque(_ context.Context, p frostfscontract.ChequePrm) error { c.chequeCalls++ return nil } diff --git a/pkg/innerring/processors/balance/process_assets.go b/pkg/innerring/processors/balance/process_assets.go index ac6a1e493..60475908c 100644 --- a/pkg/innerring/processors/balance/process_assets.go +++ b/pkg/innerring/processors/balance/process_assets.go @@ -11,9 +11,9 @@ import ( // Process lock event by invoking Cheque method in main net to send assets // back to the withdraw issuer. -func (bp *Processor) processLock(lock *balanceEvent.Lock) bool { - if !bp.alphabetState.IsAlphabet() { - bp.log.Info(context.Background(), logs.BalanceNonAlphabetModeIgnoreBalanceLock) +func (bp *Processor) processLock(ctx context.Context, lock *balanceEvent.Lock) bool { + if !bp.alphabetState.IsAlphabet(ctx) { + bp.log.Info(ctx, logs.BalanceNonAlphabetModeIgnoreBalanceLock) return true } @@ -25,9 +25,9 @@ func (bp *Processor) processLock(lock *balanceEvent.Lock) bool { prm.SetLock(lock.LockAccount()) prm.SetHash(lock.TxHash()) - err := bp.frostfsClient.Cheque(prm) + err := bp.frostfsClient.Cheque(ctx, prm) if err != nil { - bp.log.Error(context.Background(), logs.BalanceCantSendLockAssetTx, zap.Error(err)) + bp.log.Error(ctx, logs.BalanceCantSendLockAssetTx, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go index c4078461e..e2f649600 100644 --- a/pkg/innerring/processors/balance/processor.go +++ b/pkg/innerring/processors/balance/processor.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" @@ -13,13 +12,12 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( // AlphabetState is a callback interface for inner ring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } // PrecisionConverter converts balance amount values. @@ -28,7 +26,7 @@ type ( } FrostFSClient interface { - Cheque(p frostfscontract.ChequePrm) error + Cheque(ctx context.Context, p frostfscontract.ChequePrm) error } // Processor of events produced by balance contract in the morphchain. @@ -69,8 +67,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/balance: balance precision converter is not set") } - p.Log.Debug(context.Background(), logs.BalanceBalanceWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/balance: can't create worker pool: %w", err) diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go index 45cac513a..bb038a3cb 100644 --- a/pkg/innerring/processors/container/handlers.go +++ b/pkg/innerring/processors/container/handlers.go @@ -23,7 +23,7 @@ func (cp *Processor) handlePut(ctx context.Context, ev event.Event) { // send an event to the worker pool err := processors.SubmitEvent(cp.pool, cp.metrics, "container_put", func() bool { - return cp.processContainerPut(put) + return cp.processContainerPut(ctx, put) }) if err != nil { // there system can be moved into controlled degradation stage @@ -41,7 +41,7 @@ func (cp *Processor) handleDelete(ctx context.Context, ev event.Event) { // send an event to the worker pool err := processors.SubmitEvent(cp.pool, cp.metrics, "container_delete", func() bool { - return cp.processContainerDelete(del) + return cp.processContainerDelete(ctx, del) }) if err != nil { // there system can be moved into controlled degradation stage diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go index a2fe50fa8..f28e5372a 100644 --- a/pkg/innerring/processors/container/handlers_test.go +++ b/pkg/innerring/processors/container/handlers_test.go @@ -161,7 +161,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go index 2b9c5995c..16c450166 100644 --- a/pkg/innerring/processors/container/process_container.go +++ b/pkg/innerring/processors/container/process_container.go @@ -37,27 +37,27 @@ var errContainerAndOwnerNamespaceDontMatch = errors.New("container and owner nam // Process a new container from the user by checking the container sanity // and sending approve tx back to the morph. -func (cp *Processor) processContainerPut(put putEvent) bool { - if !cp.alphabetState.IsAlphabet() { - cp.log.Info(context.Background(), logs.ContainerNonAlphabetModeIgnoreContainerPut) +func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool { + if !cp.alphabetState.IsAlphabet(ctx) { + cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerPut) return true } - ctx := &putContainerContext{ + pctx := &putContainerContext{ e: put, } - err := cp.checkPutContainer(ctx) + err := cp.checkPutContainer(pctx) if err != nil { - cp.log.Error(context.Background(), logs.ContainerPutContainerCheckFailed, + cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed, zap.String("error", err.Error()), ) return false } - if err := cp.morphClient.NotarySignAndInvokeTX(ctx.e.NotaryRequest().MainTransaction); err != nil { - cp.log.Error(context.Background(), logs.ContainerCouldNotApprovePutContainer, + if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil { + cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer, zap.String("error", err.Error()), ) return false @@ -104,15 +104,15 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { // Process delete container operation from the user by checking container sanity // and sending approve tx back to morph. -func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool { - if !cp.alphabetState.IsAlphabet() { - cp.log.Info(context.Background(), logs.ContainerNonAlphabetModeIgnoreContainerDelete) +func (cp *Processor) processContainerDelete(ctx context.Context, e containerEvent.Delete) bool { + if !cp.alphabetState.IsAlphabet(ctx) { + cp.log.Info(ctx, logs.ContainerNonAlphabetModeIgnoreContainerDelete) return true } err := cp.checkDeleteContainer(e) if err != nil { - cp.log.Error(context.Background(), logs.ContainerDeleteContainerCheckFailed, + cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed, zap.String("error", err.Error()), ) @@ -120,7 +120,7 @@ func (cp *Processor) processContainerDelete(e containerEvent.Delete) bool { } if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil { - cp.log.Error(context.Background(), logs.ContainerCouldNotApproveDeleteContainer, + cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer, zap.String("error", err.Error()), ) diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go index 7a50ca773..58b90457c 100644 --- a/pkg/innerring/processors/container/processor.go +++ b/pkg/innerring/processors/container/processor.go @@ -6,7 +6,6 @@ import ( "fmt" frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" @@ -16,13 +15,12 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( // AlphabetState is a callback interface for inner ring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } ContClient interface { @@ -98,8 +96,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/container: FrostFSID client is not set") } - p.Log.Debug(context.Background(), logs.ContainerContainerWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/container: can't create worker pool: %w", err) diff --git a/pkg/innerring/processors/frostfs/handlers.go b/pkg/innerring/processors/frostfs/handlers.go index d11ad0f5c..936de2e77 100644 --- a/pkg/innerring/processors/frostfs/handlers.go +++ b/pkg/innerring/processors/frostfs/handlers.go @@ -24,7 +24,7 @@ func (np *Processor) handleDeposit(ctx context.Context, ev event.Event) { // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_deposit", func() bool { - return np.processDeposit(deposit) + return np.processDeposit(ctx, deposit) }) if err != nil { // there system can be moved into controlled degradation stage @@ -44,7 +44,7 @@ func (np *Processor) handleWithdraw(ctx context.Context, ev event.Event) { // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_withdraw", func() bool { - return np.processWithdraw(withdraw) + return np.processWithdraw(ctx, withdraw) }) if err != nil { // there system can be moved into controlled degradation stage @@ -62,7 +62,7 @@ func (np *Processor) handleCheque(ctx context.Context, ev event.Event) { // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_cheque", func() bool { - return np.processCheque(cheque) + return np.processCheque(ctx, cheque) }) if err != nil { // there system can be moved into controlled degradation stage @@ -81,7 +81,7 @@ func (np *Processor) handleConfig(ctx context.Context, ev event.Event) { // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "frostfs_config", func() bool { - return np.processConfig(cfg) + return np.processConfig(ctx, cfg) }) if err != nil { // there system can be moved into controlled degradation stage diff --git a/pkg/innerring/processors/frostfs/handlers_test.go b/pkg/innerring/processors/frostfs/handlers_test.go index c1541ca40..72310f6f9 100644 --- a/pkg/innerring/processors/frostfs/handlers_test.go +++ b/pkg/innerring/processors/frostfs/handlers_test.go @@ -226,7 +226,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } @@ -242,17 +242,17 @@ type testBalaceClient struct { burn []balance.BurnPrm } -func (c *testBalaceClient) Mint(p balance.MintPrm) error { +func (c *testBalaceClient) Mint(_ context.Context, p balance.MintPrm) error { c.mint = append(c.mint, p) return nil } -func (c *testBalaceClient) Lock(p balance.LockPrm) error { +func (c *testBalaceClient) Lock(_ context.Context, p balance.LockPrm) error { c.lock = append(c.lock, p) return nil } -func (c *testBalaceClient) Burn(p balance.BurnPrm) error { +func (c *testBalaceClient) Burn(_ context.Context, p balance.BurnPrm) error { c.burn = append(c.burn, p) return nil } @@ -261,7 +261,7 @@ type testNetmapClient struct { config []nmClient.SetConfigPrm } -func (c *testNetmapClient) SetConfig(p nmClient.SetConfigPrm) error { +func (c *testNetmapClient) SetConfig(_ context.Context, p nmClient.SetConfigPrm) error { c.config = append(c.config, p) return nil } diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go index 3bee6ed96..ee824ea31 100644 --- a/pkg/innerring/processors/frostfs/process_assets.go +++ b/pkg/innerring/processors/frostfs/process_assets.go @@ -17,9 +17,9 @@ const ( // Process deposit event by invoking a balance contract and sending native // gas in the sidechain. -func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreDeposit) +func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.Deposit) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreDeposit) return true } @@ -30,9 +30,9 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { prm.SetID(deposit.ID()) // send transferX to a balance contract - err := np.balanceClient.Mint(prm) + err := np.balanceClient.Mint(ctx, prm) if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantTransferAssetsToBalanceContract, zap.Error(err)) } curEpoch := np.epochState.EpochCounter() @@ -46,7 +46,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { val, ok := np.mintEmitCache.Get(receiver.String()) if ok && val+np.mintEmitThreshold >= curEpoch { - np.log.Warn(context.Background(), logs.FrostFSDoubleMintEmissionDeclined, + np.log.Warn(ctx, logs.FrostFSDoubleMintEmissionDeclined, zap.Stringer("receiver", receiver), zap.Uint64("last_emission", val), zap.Uint64("current_epoch", curEpoch)) @@ -58,12 +58,12 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { // before gas transfer check if the balance is greater than the threshold balance, err := np.morphClient.GasBalance() if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantGetGasBalanceOfTheNode, zap.Error(err)) return false } if balance < np.gasBalanceThreshold { - np.log.Warn(context.Background(), logs.FrostFSGasBalanceThresholdHasBeenReached, + np.log.Warn(ctx, logs.FrostFSGasBalanceThresholdHasBeenReached, zap.Int64("balance", balance), zap.Int64("threshold", np.gasBalanceThreshold)) @@ -72,7 +72,7 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { err = np.morphClient.TransferGas(receiver, np.mintEmitValue) if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantTransferNativeGasToReceiver, + np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver, zap.String("error", err.Error())) return false @@ -84,16 +84,16 @@ func (np *Processor) processDeposit(deposit frostfsEvent.Deposit) bool { } // Process withdraw event by locking assets in the balance account. -func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreWithdraw) +func (np *Processor) processWithdraw(ctx context.Context, withdraw frostfsEvent.Withdraw) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreWithdraw) return true } // create lock account lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size]) if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantCreateLockAccount, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantCreateLockAccount, zap.Error(err)) return false } @@ -107,9 +107,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool { prm.SetAmount(np.converter.ToBalancePrecision(withdraw.Amount())) prm.SetDueEpoch(int64(curEpoch + lockAccountLifetime)) - err = np.balanceClient.Lock(prm) + err = np.balanceClient.Lock(ctx, prm) if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantLockAssetsForWithdraw, zap.Error(err)) return false } @@ -118,9 +118,9 @@ func (np *Processor) processWithdraw(withdraw frostfsEvent.Withdraw) bool { // Process cheque event by transferring assets from the lock account back to // the reserve account. -func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreCheque) +func (np *Processor) processCheque(ctx context.Context, cheque frostfsEvent.Cheque) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreCheque) return true } @@ -130,9 +130,9 @@ func (np *Processor) processCheque(cheque frostfsEvent.Cheque) bool { prm.SetAmount(np.converter.ToBalancePrecision(cheque.Amount())) prm.SetID(cheque.ID()) - err := np.balanceClient.Burn(prm) + err := np.balanceClient.Burn(ctx, prm) if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantTransferAssetsToFedContract, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/frostfs/process_config.go b/pkg/innerring/processors/frostfs/process_config.go index 814dd40b4..dc579f6bb 100644 --- a/pkg/innerring/processors/frostfs/process_config.go +++ b/pkg/innerring/processors/frostfs/process_config.go @@ -11,9 +11,9 @@ import ( // Process config event by setting configuration value from the mainchain in // the sidechain. -func (np *Processor) processConfig(config frostfsEvent.Config) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.FrostFSNonAlphabetModeIgnoreConfig) +func (np *Processor) processConfig(ctx context.Context, config frostfsEvent.Config) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.FrostFSNonAlphabetModeIgnoreConfig) return true } @@ -24,9 +24,9 @@ func (np *Processor) processConfig(config frostfsEvent.Config) bool { prm.SetValue(config.Value()) prm.SetHash(config.TxHash()) - err := np.netmapClient.SetConfig(prm) + err := np.netmapClient.SetConfig(ctx, prm) if err != nil { - np.log.Error(context.Background(), logs.FrostFSCantRelaySetConfigEvent, zap.Error(err)) + np.log.Error(ctx, logs.FrostFSCantRelaySetConfigEvent, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go index fdc31d82e..6c29d330d 100644 --- a/pkg/innerring/processors/frostfs/processor.go +++ b/pkg/innerring/processors/frostfs/processor.go @@ -6,7 +6,6 @@ import ( "fmt" "sync" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/balance" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -17,7 +16,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( @@ -28,7 +26,7 @@ type ( // AlphabetState is a callback interface for inner ring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } // PrecisionConverter converts balance amount values. @@ -37,13 +35,13 @@ type ( } BalanceClient interface { - Mint(p balance.MintPrm) error - Lock(p balance.LockPrm) error - Burn(p balance.BurnPrm) error + Mint(ctx context.Context, p balance.MintPrm) error + Lock(ctx context.Context, p balance.LockPrm) error + Burn(ctx context.Context, p balance.BurnPrm) error } NetmapClient interface { - SetConfig(p nmClient.SetConfigPrm) error + SetConfig(ctx context.Context, p nmClient.SetConfigPrm) error } MorphClient interface { @@ -111,8 +109,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/frostfs: balance precision converter is not set") } - p.Log.Debug(context.Background(), logs.FrostFSFrostfsWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err) diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go index 286935129..5a6126249 100644 --- a/pkg/innerring/processors/governance/handlers_test.go +++ b/pkg/innerring/processors/governance/handlers_test.go @@ -219,7 +219,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } @@ -251,12 +251,12 @@ func (c *testMorphClient) Committee() (res keys.PublicKeys, err error) { return c.commiteeKeys, nil } -func (c *testMorphClient) UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error { +func (c *testMorphClient) UpdateNeoFSAlphabetList(_ context.Context, prm client.UpdateAlphabetListPrm) error { c.alphabetUpdates = append(c.alphabetUpdates, prm) return nil } -func (c *testMorphClient) UpdateNotaryList(prm client.UpdateNotaryListPrm) error { +func (c *testMorphClient) UpdateNotaryList(_ context.Context, prm client.UpdateNotaryListPrm) error { c.notaryUpdates = append(c.notaryUpdates, prm) return nil } @@ -278,7 +278,7 @@ type testFrostFSClient struct { updates []frostfscontract.AlphabetUpdatePrm } -func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error { +func (c *testFrostFSClient) AlphabetUpdate(_ context.Context, p frostfscontract.AlphabetUpdatePrm) error { c.updates = append(c.updates, p) return nil } diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go index fdfdfa479..73d21a7d2 100644 --- a/pkg/innerring/processors/governance/process_update.go +++ b/pkg/innerring/processors/governance/process_update.go @@ -20,7 +20,7 @@ const ( ) func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint256) bool { - if !gp.alphabetState.IsAlphabet() { + if !gp.alphabetState.IsAlphabet(ctx) { gp.log.Info(ctx, logs.GovernanceNonAlphabetModeIgnoreAlphabetSync) return true } @@ -69,13 +69,13 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25 } // 2. Update NeoFSAlphabet role in the sidechain. - gp.updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet, txHash) + gp.updateNeoFSAlphabetRoleInSidechain(ctx, sidechainAlphabet, newAlphabet, txHash) // 3. Update notary role in the sidechain. - gp.updateNotaryRoleInSidechain(newAlphabet, txHash) + gp.updateNotaryRoleInSidechain(ctx, newAlphabet, txHash) // 4. Update FrostFS contract in the mainnet. - gp.updateFrostFSContractInMainnet(newAlphabet) + gp.updateFrostFSContractInMainnet(ctx, newAlphabet) gp.log.Info(ctx, logs.GovernanceFinishedAlphabetListUpdate) @@ -94,24 +94,24 @@ func prettyKeys(keys keys.PublicKeys) string { return strings.TrimRight(sb.String(), delimiter) } -func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { +func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { innerRing, err := gp.irFetcher.InnerRingKeys() if err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantFetchInnerRingListFromSideChain, + gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain, zap.String("error", err.Error())) return } newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet) if err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys, + gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys, zap.String("error", err.Error())) return } sort.Sort(newInnerRing) - gp.log.Info(context.Background(), logs.GovernanceUpdateOfTheInnerRingList, + gp.log.Info(ctx, logs.GovernanceUpdateOfTheInnerRingList, zap.String("before", prettyKeys(innerRing)), zap.String("after", prettyKeys(newInnerRing)), ) @@ -120,26 +120,26 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(sidechainAlphabet, newAl updPrm.SetList(newInnerRing) updPrm.SetHash(txHash) - if err = gp.morphClient.UpdateNeoFSAlphabetList(updPrm); err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys, + if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil { + gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys, zap.String("error", err.Error())) } } -func (gp *Processor) updateNotaryRoleInSidechain(newAlphabet keys.PublicKeys, txHash util.Uint256) { +func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabet keys.PublicKeys, txHash util.Uint256) { updPrm := client.UpdateNotaryListPrm{} updPrm.SetList(newAlphabet) updPrm.SetHash(txHash) - err := gp.morphClient.UpdateNotaryList(updPrm) + err := gp.morphClient.UpdateNotaryList(ctx, updPrm) if err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantUpdateListOfNotaryNodesInSideChain, + gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain, zap.String("error", err.Error())) } } -func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) { +func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlphabet keys.PublicKeys) { epoch := gp.epochState.EpochCounter() buf := make([]byte, 8) @@ -152,9 +152,9 @@ func (gp *Processor) updateFrostFSContractInMainnet(newAlphabet keys.PublicKeys) prm.SetID(id) prm.SetPubs(newAlphabet) - err := gp.frostfsClient.AlphabetUpdate(prm) + err := gp.frostfsClient.AlphabetUpdate(ctx, prm) if err != nil { - gp.log.Error(context.Background(), logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract, + gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract, zap.String("error", err.Error())) } } diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go index eaadfdb4f..565f4c27d 100644 --- a/pkg/innerring/processors/governance/processor.go +++ b/pkg/innerring/processors/governance/processor.go @@ -26,7 +26,7 @@ const ProcessorPoolSize = 1 type ( // AlphabetState is a callback interface for innerring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } ) @@ -56,7 +56,7 @@ type ( } FrostFSClient interface { - AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm) error + AlphabetUpdate(ctx context.Context, p frostfscontract.AlphabetUpdatePrm) error } NetmapClient interface { @@ -70,8 +70,8 @@ type ( MorphClient interface { Committee() (res keys.PublicKeys, err error) - UpdateNeoFSAlphabetList(prm client.UpdateAlphabetListPrm) error - UpdateNotaryList(prm client.UpdateNotaryListPrm) error + UpdateNeoFSAlphabetList(ctx context.Context, prm client.UpdateAlphabetListPrm) error + UpdateNotaryList(ctx context.Context, prm client.UpdateNotaryListPrm) error } // Processor of events related to governance in the network. diff --git a/pkg/innerring/processors/netmap/handlers.go b/pkg/innerring/processors/netmap/handlers.go index 61547e0ba..4c7199a49 100644 --- a/pkg/innerring/processors/netmap/handlers.go +++ b/pkg/innerring/processors/netmap/handlers.go @@ -14,14 +14,14 @@ import ( func (np *Processor) HandleNewEpochTick(ctx context.Context, ev event.Event) { _ = ev.(timerEvent.NewEpochTick) - np.log.Info(context.Background(), logs.NetmapTick, zap.String("type", "epoch")) + np.log.Info(ctx, logs.NetmapTick, zap.String("type", "epoch")) // send an event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_new_epoch_tick", func() bool { return np.processNewEpochTick(ctx) }) if err != nil { // there system can be moved into controlled degradation stage - np.log.Warn(context.Background(), logs.NetmapNetmapWorkerPoolDrained, + np.log.Warn(ctx, logs.NetmapNetmapWorkerPoolDrained, zap.Int("capacity", np.pool.Cap())) } } @@ -54,7 +54,7 @@ func (np *Processor) handleAddPeer(ctx context.Context, ev event.Event) { // send an event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_add_peer", func() bool { - return np.processAddPeer(newPeer) + return np.processAddPeer(ctx, newPeer) }) if err != nil { // there system can be moved into controlled degradation stage @@ -72,7 +72,7 @@ func (np *Processor) handleUpdateState(ctx context.Context, ev event.Event) { // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_update_peer", func() bool { - return np.processUpdatePeer(updPeer) + return np.processUpdatePeer(ctx, updPeer) }) if err != nil { // there system can be moved into controlled degradation stage @@ -94,7 +94,7 @@ func (np *Processor) handleCleanupTick(ctx context.Context, ev event.Event) { // send event to the worker pool err := processors.SubmitEvent(np.pool, np.metrics, "netmap_cleanup_tick", func() bool { - return np.processNetmapCleanupTick(cleanup) + return np.processNetmapCleanupTick(ctx, cleanup) }) if err != nil { // there system can be moved into controlled degradation stage diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go index 1e8be4095..5a5adfb2d 100644 --- a/pkg/innerring/processors/netmap/handlers_test.go +++ b/pkg/innerring/processors/netmap/handlers_test.go @@ -341,7 +341,7 @@ type testAlphabetState struct { isAlphabet bool } -func (s *testAlphabetState) IsAlphabet() bool { +func (s *testAlphabetState) IsAlphabet(context.Context) bool { return s.isAlphabet } @@ -365,7 +365,7 @@ type testNetmapClient struct { invokedTxs []*transaction.Transaction } -func (c *testNetmapClient) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { +func (c *testNetmapClient) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { c.notaryInvokes = append(c.notaryInvokes, notaryInvoke{ contract: contract, fee: fee, @@ -396,7 +396,7 @@ func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) { return c.netmap, nil } -func (c *testNetmapClient) NewEpoch(epoch uint64) error { +func (c *testNetmapClient) NewEpoch(_ context.Context, epoch uint64) error { c.newEpochs = append(c.newEpochs, epoch) return nil } diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go index 9529d3a0c..a43005ffb 100644 --- a/pkg/innerring/processors/netmap/process_cleanup.go +++ b/pkg/innerring/processors/netmap/process_cleanup.go @@ -9,9 +9,9 @@ import ( "go.uber.org/zap" ) -func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick) +func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapCleanupTick) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewNetmapCleanupTick) return true } @@ -19,13 +19,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { err := np.netmapSnapshot.forEachRemoveCandidate(ev.epoch, func(s string) error { key, err := keys.NewPublicKeyFromString(s) if err != nil { - np.log.Warn(context.Background(), logs.NetmapCantDecodePublicKeyOfNetmapNode, + np.log.Warn(ctx, logs.NetmapCantDecodePublicKeyOfNetmapNode, zap.String("key", s)) return nil } - np.log.Info(context.Background(), logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s)) + np.log.Info(ctx, logs.NetmapVoteToRemoveNodeFromNetmap, zap.String("key", s)) // In notary environments we call UpdateStateIR method instead of UpdateState. // It differs from UpdateState only by name, so we can do this in the same form. @@ -33,6 +33,7 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { const methodUpdateStateNotary = "updateStateIR" err = np.netmapClient.MorphNotaryInvoke( + ctx, np.netmapClient.ContractAddress(), 0, uint32(ev.epoch), @@ -41,13 +42,13 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) bool { int64(v2netmap.Offline), key.Bytes(), ) if err != nil { - np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err)) + np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdateState, zap.Error(err)) } return nil }) if err != nil { - np.log.Warn(context.Background(), logs.NetmapCantIterateOnNetmapCleanerCache, + np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache, zap.String("error", err.Error())) return false } diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go index e401ef4f2..237c4e512 100644 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ b/pkg/innerring/processors/netmap/process_epoch.go @@ -55,7 +55,7 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc // Process new epoch tick by invoking new epoch method in network map contract. func (np *Processor) processNewEpochTick(ctx context.Context) bool { - if !np.alphabetState.IsAlphabet() { + if !np.alphabetState.IsAlphabet(ctx) { np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewEpochTick) return true } @@ -63,7 +63,7 @@ func (np *Processor) processNewEpochTick(ctx context.Context) bool { nextEpoch := np.epochState.EpochCounter() + 1 np.log.Debug(ctx, logs.NetmapNextEpoch, zap.Uint64("value", nextEpoch)) - err := np.netmapClient.NewEpoch(nextEpoch) + err := np.netmapClient.NewEpoch(ctx, nextEpoch) if err != nil { np.log.Error(ctx, logs.NetmapCantInvokeNetmapNewEpoch, zap.Error(err)) return false diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go index 42d1b5ec6..72aa08f76 100644 --- a/pkg/innerring/processors/netmap/process_peers.go +++ b/pkg/innerring/processors/netmap/process_peers.go @@ -13,9 +13,9 @@ import ( // Process add peer notification by sanity check of new node // local epoch timer. -func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreNewPeerNotification) +func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreNewPeerNotification) return true } @@ -23,7 +23,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { tx := ev.NotaryRequest().MainTransaction ok, err := np.netmapClient.MorphIsValidScript(tx.Script, tx.Signers) if err != nil || !ok { - np.log.Warn(context.Background(), logs.NetmapNonhaltNotaryTransaction, + np.log.Warn(ctx, logs.NetmapNonhaltNotaryTransaction, zap.String("method", "netmap.AddPeer"), zap.String("hash", tx.Hash().StringLE()), zap.Error(err)) @@ -34,14 +34,14 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { var nodeInfo netmap.NodeInfo if err := nodeInfo.Unmarshal(ev.Node()); err != nil { // it will be nice to have tx id at event structure to log it - np.log.Warn(context.Background(), logs.NetmapCantParseNetworkMapCandidate) + np.log.Warn(ctx, logs.NetmapCantParseNetworkMapCandidate) return false } // validate and update node info err = np.nodeValidator.VerifyAndUpdate(&nodeInfo) if err != nil { - np.log.Warn(context.Background(), logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, + np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, zap.String("error", err.Error()), ) @@ -64,7 +64,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { // That is why we need to perform `addPeerIR` only in case when node is online, // because in scope of this method, contract set state `ONLINE` for the node. if updated && nodeInfo.Status().IsOnline() { - np.log.Info(context.Background(), logs.NetmapApprovingNetworkMapCandidate, + np.log.Info(ctx, logs.NetmapApprovingNetworkMapCandidate, zap.String("key", keyString)) prm := netmapclient.AddPeerPrm{} @@ -77,6 +77,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { // create new notary request with the original nonce err = np.netmapClient.MorphNotaryInvoke( + ctx, np.netmapClient.ContractAddress(), 0, ev.NotaryRequest().MainTransaction.Nonce, @@ -85,7 +86,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { nodeInfoBinary, ) if err != nil { - np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err)) + np.log.Error(ctx, logs.NetmapCantInvokeNetmapAddPeer, zap.Error(err)) return false } } @@ -94,9 +95,9 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool { } // Process update peer notification by sending approval tx to the smart contract. -func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool { - if !np.alphabetState.IsAlphabet() { - np.log.Info(context.Background(), logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification) +func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.UpdatePeer) bool { + if !np.alphabetState.IsAlphabet(ctx) { + np.log.Info(ctx, logs.NetmapNonAlphabetModeIgnoreUpdatePeerNotification) return true } @@ -109,7 +110,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool { if ev.Maintenance() { err = np.nodeStateSettings.MaintenanceModeAllowed() if err != nil { - np.log.Info(context.Background(), logs.NetmapPreventSwitchingNodeToMaintenanceState, + np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState, zap.Error(err), ) @@ -118,7 +119,7 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) bool { } if err = np.netmapClient.MorphNotarySignAndInvokeTX(ev.NotaryRequest().MainTransaction); err != nil { - np.log.Error(context.Background(), logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err)) + np.log.Error(ctx, logs.NetmapCantInvokeNetmapUpdatePeer, zap.Error(err)) return false } diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go index bbd60c1e1..b3d57e85b 100644 --- a/pkg/innerring/processors/netmap/processor.go +++ b/pkg/innerring/processors/netmap/processor.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" @@ -17,7 +16,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/panjf2000/ants/v2" - "go.uber.org/zap" ) type ( @@ -36,7 +34,7 @@ type ( // AlphabetState is a callback interface for inner ring global state. AlphabetState interface { - IsAlphabet() bool + IsAlphabet(context.Context) bool } // NodeValidator wraps basic method of checking the correctness @@ -55,12 +53,12 @@ type ( } Client interface { - MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error + MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error ContractAddress() util.Uint160 EpochDuration() (uint64, error) MorphTxHeight(h util.Uint256) (res uint32, err error) NetMap() (*netmap.NetMap, error) - NewEpoch(epoch uint64) error + NewEpoch(ctx context.Context, epoch uint64) error MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error } @@ -133,8 +131,6 @@ func New(p *Params) (*Processor, error) { return nil, errors.New("ir/netmap: node state settings is not set") } - p.Log.Debug(context.Background(), logs.NetmapNetmapWorkerPool, zap.Int("size", p.PoolSize)) - pool, err := ants.NewPool(p.PoolSize, ants.WithNonblocking(true)) if err != nil { return nil, fmt.Errorf("ir/netmap: can't create worker pool: %w", err) diff --git a/pkg/innerring/processors/netmap/wrappers.go b/pkg/innerring/processors/netmap/wrappers.go index e75fdaf40..9cd71ae48 100644 --- a/pkg/innerring/processors/netmap/wrappers.go +++ b/pkg/innerring/processors/netmap/wrappers.go @@ -1,6 +1,8 @@ package netmap import ( + "context" + netmapclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/core/transaction" @@ -18,13 +20,13 @@ type netmapClientWrapper struct { netmapClient *netmapclient.Client } -func (w *netmapClientWrapper) UpdatePeerState(p netmapclient.UpdatePeerPrm) error { - _, err := w.netmapClient.UpdatePeerState(p) +func (w *netmapClientWrapper) UpdatePeerState(ctx context.Context, p netmapclient.UpdatePeerPrm) error { + _, err := w.netmapClient.UpdatePeerState(ctx, p) return err } -func (w *netmapClientWrapper) MorphNotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { - _, err := w.netmapClient.Morph().NotaryInvoke(contract, fee, nonce, vub, method, args...) +func (w *netmapClientWrapper) MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error { + _, err := w.netmapClient.Morph().NotaryInvoke(ctx, contract, fee, nonce, vub, method, args...) return err } @@ -44,16 +46,16 @@ func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) { return w.netmapClient.NetMap() } -func (w *netmapClientWrapper) NewEpoch(epoch uint64) error { - return w.netmapClient.NewEpoch(epoch) +func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error { + return w.netmapClient.NewEpoch(ctx, epoch) } func (w *netmapClientWrapper) MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) { return w.netmapClient.Morph().IsValidScript(script, signers) } -func (w *netmapClientWrapper) AddPeer(p netmapclient.AddPeerPrm) error { - return w.netmapClient.AddPeer(p) +func (w *netmapClientWrapper) AddPeer(ctx context.Context, p netmapclient.AddPeerPrm) error { + return w.netmapClient.AddPeer(ctx, p) } func (w *netmapClientWrapper) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error { diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go index 85f332fb6..77c2af2ce 100644 --- a/pkg/innerring/state.go +++ b/pkg/innerring/state.go @@ -53,8 +53,8 @@ func (s *Server) IsActive(ctx context.Context) bool { } // IsAlphabet is a getter for a global alphabet flag state. -func (s *Server) IsAlphabet() bool { - return s.AlphabetIndex() >= 0 +func (s *Server) IsAlphabet(ctx context.Context) bool { + return s.AlphabetIndex(ctx) >= 0 } // InnerRingIndex is a getter for a global index of node in inner ring list. Negative @@ -83,10 +83,10 @@ func (s *Server) InnerRingSize(ctx context.Context) int { // AlphabetIndex is a getter for a global index of node in alphabet list. // Negative index means that node is not in the alphabet list. -func (s *Server) AlphabetIndex() int { +func (s *Server) AlphabetIndex(ctx context.Context) int { index, err := s.statusIndex.AlphabetIndex() if err != nil { - s.log.Error(context.Background(), logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error())) + s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error())) return -1 } @@ -127,7 +127,7 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V } s.contracts.alphabet.iterate(func(letter GlagoliticLetter, contract util.Uint160) { - _, err := s.morphClient.NotaryInvoke(contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators) + _, err := s.morphClient.NotaryInvoke(ctx, contract, s.feeConfig.SideChainFee(), nonce, vubP, voteMethod, epoch, validators) if err != nil { s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract, zap.Int8("alphabet_index", int8(letter)), diff --git a/pkg/innerring/state_test.go b/pkg/innerring/state_test.go index 17ab995af..f60ca87c4 100644 --- a/pkg/innerring/state_test.go +++ b/pkg/innerring/state_test.go @@ -47,8 +47,8 @@ func TestServerState(t *testing.T) { require.Equal(t, healthStatus, srv.HealthStatus(), "invalid health status") require.True(t, srv.IsActive(context.Background()), "invalid IsActive result") - require.True(t, srv.IsAlphabet(), "invalid IsAlphabet result") + require.True(t, srv.IsAlphabet(context.Background()), "invalid IsAlphabet result") require.Equal(t, 0, srv.InnerRingIndex(context.Background()), "invalid IR index") require.Equal(t, 1, srv.InnerRingSize(context.Background()), "invalid IR index") - require.Equal(t, 0, srv.AlphabetIndex(), "invalid alphabet index") + require.Equal(t, 0, srv.AlphabetIndex(context.Background()), "invalid alphabet index") } diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go index 10cb6f368..95fdd844b 100644 --- a/pkg/local_object_storage/blobovnicza/blobovnicza_test.go +++ b/pkg/local_object_storage/blobovnicza/blobovnicza_test.go @@ -72,7 +72,7 @@ func TestBlobovnicza(t *testing.T) { require.NoError(t, blz.Open(context.Background())) // initialize Blobovnicza - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) // try to read non-existent address testGet(t, blz, oidtest.Address(), nil, client.IsErrObjectNotFound) @@ -102,5 +102,5 @@ func TestBlobovnicza(t *testing.T) { return err == nil }, nil) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) } diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go index a317279a4..d0e71a876 100644 --- a/pkg/local_object_storage/blobovnicza/control.go +++ b/pkg/local_object_storage/blobovnicza/control.go @@ -56,7 +56,7 @@ func (b *Blobovnicza) Open(ctx context.Context) error { // // If Blobovnicza is already initialized, no action is taken. // Blobovnicza must be open, otherwise an error will return. -func (b *Blobovnicza) Init() error { +func (b *Blobovnicza) Init(ctx context.Context) error { b.controlMtx.Lock() defer b.controlMtx.Unlock() @@ -64,7 +64,7 @@ func (b *Blobovnicza) Init() error { return errors.New("blobovnicza is not open") } - b.log.Debug(context.Background(), logs.BlobovniczaInitializing, + b.log.Debug(ctx, logs.BlobovniczaInitializing, zap.Uint64("object size limit", b.objSizeLimit), zap.Uint64("storage size limit", b.fullSizeLimit), ) @@ -72,7 +72,7 @@ func (b *Blobovnicza) Init() error { size := b.dataSize.Load() items := b.itemsCount.Load() if size != 0 || items != 0 { - b.log.Debug(context.Background(), logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(ctx, logs.BlobovniczaAlreadyInitialized, zap.Uint64("size", size), zap.Uint64("items", items)) return nil } @@ -82,7 +82,7 @@ func (b *Blobovnicza) Init() error { // create size range bucket rangeStr := stringifyBounds(lower, upper) - b.log.Debug(context.Background(), logs.BlobovniczaCreatingBucketForSizeRange, + b.log.Debug(ctx, logs.BlobovniczaCreatingBucketForSizeRange, zap.String("range", rangeStr)) _, err := tx.CreateBucketIfNotExists(key) @@ -99,14 +99,14 @@ func (b *Blobovnicza) Init() error { } } - return b.initializeCounters() + return b.initializeCounters(ctx) } func (b *Blobovnicza) ObjectsCount() uint64 { return b.itemsCount.Load() } -func (b *Blobovnicza) initializeCounters() error { +func (b *Blobovnicza) initializeCounters(ctx context.Context) error { var size uint64 var items uint64 var sizeExists bool @@ -132,17 +132,17 @@ func (b *Blobovnicza) initializeCounters() error { return fmt.Errorf("can't determine DB size: %w", err) } if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly { - b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items)) if err := b.boltDB.Update(func(tx *bbolt.Tx) error { if err := saveDataSize(tx, size); err != nil { return err } return saveItemsCount(tx, items) }); err != nil { - b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items)) return fmt.Errorf("can't save blobovnicza's size and items count: %w", err) } - b.log.Debug(context.Background(), logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items)) + b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items)) } b.dataSize.Store(size) @@ -155,7 +155,7 @@ func (b *Blobovnicza) initializeCounters() error { // Close releases all internal database resources. // // If blobovnicza is already closed, does nothing. -func (b *Blobovnicza) Close() error { +func (b *Blobovnicza) Close(ctx context.Context) error { b.controlMtx.Lock() defer b.controlMtx.Unlock() @@ -163,7 +163,7 @@ func (b *Blobovnicza) Close() error { return nil } - b.log.Debug(context.Background(), logs.BlobovniczaClosingBoltDB, + b.log.Debug(ctx, logs.BlobovniczaClosingBoltDB, zap.String("path", b.path), ) diff --git a/pkg/local_object_storage/blobovnicza/get_test.go b/pkg/local_object_storage/blobovnicza/get_test.go index 8c435af89..5a382c159 100644 --- a/pkg/local_object_storage/blobovnicza/get_test.go +++ b/pkg/local_object_storage/blobovnicza/get_test.go @@ -14,11 +14,11 @@ func TestBlobovnicza_Get(t *testing.T) { filename := filepath.Join(t.TempDir(), "blob") var blz *Blobovnicza - defer func() { require.NoError(t, blz.Close()) }() + defer func() { require.NoError(t, blz.Close(context.Background())) }() fnInit := func(szLimit uint64) { if blz != nil { - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) } blz = New( @@ -27,7 +27,7 @@ func TestBlobovnicza_Get(t *testing.T) { ) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) } // initial distribution: [0:32K] (32K:64K] diff --git a/pkg/local_object_storage/blobovnicza/iterate_test.go b/pkg/local_object_storage/blobovnicza/iterate_test.go index bec23bb96..717274781 100644 --- a/pkg/local_object_storage/blobovnicza/iterate_test.go +++ b/pkg/local_object_storage/blobovnicza/iterate_test.go @@ -16,7 +16,7 @@ func TestBlobovniczaIterate(t *testing.T) { filename := filepath.Join(t.TempDir(), "blob") b := New(WithPath(filename)) require.NoError(t, b.Open(context.Background())) - require.NoError(t, b.Init()) + require.NoError(t, b.Init(context.Background())) data := [][]byte{{0, 1, 2, 3}, {5, 6, 7, 8}} addr := oidtest.Address() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/active.go b/pkg/local_object_storage/blobstor/blobovniczatree/active.go index 0ac15df82..dbaa7387a 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/active.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/active.go @@ -18,8 +18,8 @@ func (db *activeDB) Blobovnicza() *blobovnicza.Blobovnicza { return db.blz } -func (db *activeDB) Close() { - db.shDB.Close() +func (db *activeDB) Close(ctx context.Context) { + db.shDB.Close(ctx) } func (db *activeDB) SystemPath() string { @@ -73,12 +73,12 @@ func (m *activeDBManager) Open() { m.closed = false } -func (m *activeDBManager) Close() { +func (m *activeDBManager) Close(ctx context.Context) { m.levelToActiveDBGuard.Lock() defer m.levelToActiveDBGuard.Unlock() for _, db := range m.levelToActiveDB { - db.Close() + db.Close(ctx) } m.levelToActiveDB = make(map[string]*sharedDB) m.closed = true @@ -103,7 +103,7 @@ func (m *activeDBManager) getCurrentActiveIfOk(ctx context.Context, lvlPath stri } if blz.IsFull() { - db.Close() + db.Close(ctx) return nil, nil } @@ -168,10 +168,10 @@ func (m *activeDBManager) getNextSharedDB(ctx context.Context, lvlPath string) ( previous, updated := m.replace(lvlPath, next) if !updated && next != nil { - next.Close() // manager is closed, so don't hold active DB open + next.Close(ctx) // manager is closed, so don't hold active DB open } if updated && previous != nil { - previous.Close() + previous.Close(ctx) } return next, nil } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go index c909113c7..d9e99d0d1 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go @@ -167,7 +167,7 @@ func (b *Blobovniczas) Compressor() *compression.Config { } // SetReportErrorFunc implements common.Storage. -func (b *Blobovniczas) SetReportErrorFunc(f func(string, error)) { +func (b *Blobovniczas) SetReportErrorFunc(f func(context.Context, string, error)) { b.reportError = f } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go index e8016781a..04ff5120c 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/cache.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/cache.go @@ -32,7 +32,7 @@ func newDBCache(parentCtx context.Context, size int, ch := cache.NewCache[string, *sharedDB](). WithTTL(ttl).WithLRU().WithMaxKeys(size). WithOnEvicted(func(_ string, db *sharedDB) { - db.Close() + db.Close(parentCtx) }) ctx, cancel := context.WithCancel(parentCtx) res := &dbCache{ @@ -138,7 +138,7 @@ func (c *dbCache) create(ctx context.Context, path string) *sharedDB { return value } if added := c.put(path, value); !added { - value.Close() + value.Close(ctx) } return value } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go index cc8a52d03..ec9743b57 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go @@ -27,7 +27,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) { require.NoError(t, st.Open(mode.ComponentReadWrite)) require.NoError(t, st.Init()) defer func() { - require.NoError(t, st.Close()) + require.NoError(t, st.Close(context.Background())) }() objGen := &testutil.SeqObjGenerator{ObjSize: 1} diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go index 7c0a9edd6..c77df63bf 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go @@ -50,7 +50,7 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error { if err != nil { return err } - defer shBlz.Close() + defer shBlz.Close(egCtx) moveInfo, err := blz.ListMoveInfo(egCtx) if err != nil { @@ -80,9 +80,9 @@ func (b *Blobovniczas) openManagers() { } // Close implements common.Storage. -func (b *Blobovniczas) Close() error { +func (b *Blobovniczas) Close(ctx context.Context) error { b.dbCache.Close() // order important - b.activeDBManager.Close() + b.activeDBManager.Close(ctx) b.commondbManager.Close() return nil diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go index f0a32ded1..b26323bd0 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go @@ -51,7 +51,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) { require.NoError(t, err) require.EqualValues(t, obj35, gRes.Object) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) // change depth and width blz = NewBlobovniczaTree( @@ -89,7 +89,7 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) { }) require.NoError(t, err) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) // change depth and width back blz = NewBlobovniczaTree( @@ -127,5 +127,5 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) { require.NoError(t, err) require.EqualValues(t, obj52, gRes.Object) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/count.go b/pkg/local_object_storage/blobstor/blobovniczatree/count.go index 1137b9eb2..b83849c77 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/count.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/count.go @@ -26,7 +26,7 @@ func (b *Blobovniczas) ObjectsCount(ctx context.Context) (uint64, error) { if err != nil { return true, err } - defer shDB.Close() + defer shDB.Close(ctx) result += blz.ObjectsCount() return false, nil diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go index b26e44144..8c2d7aa67 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go @@ -66,7 +66,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co if err != nil { return res, err } - defer shBlz.Close() + defer shBlz.Close(ctx) if res, err = b.deleteObject(ctx, blz, bPrm); err == nil { success = true @@ -114,7 +114,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz if err != nil { return common.DeleteRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) return b.deleteObject(ctx, blz, prm) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go index 528dbfed7..63d2f21e1 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go @@ -42,7 +42,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common if err != nil { return common.ExistsRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) exists, err := blz.Exists(ctx, prm.Address) return common.ExistsRes{Exists: exists}, err diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go index d6ffd8bce..5414140f0 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go @@ -27,7 +27,7 @@ func TestExistsInvalidStorageID(t *testing.T) { WithBlobovniczaSize(1<<20)) require.NoError(t, b.Open(mode.ComponentReadWrite)) require.NoError(t, b.Init()) - defer func() { require.NoError(t, b.Close()) }() + defer func() { require.NoError(t, b.Close(context.Background())) }() obj := blobstortest.NewObject(1024) addr := object.AddressOf(obj) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go index fc017f22d..b7ef8d8a5 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go @@ -53,7 +53,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G if err != nil { return res, err } - defer shBlz.Close() + defer shBlz.Close(ctx) res, err = b.getObject(ctx, blz, bPrm) if err == nil { @@ -100,7 +100,7 @@ func (b *Blobovniczas) getObjectFromLevel(ctx context.Context, prm blobovnicza.G if err != nil { return common.GetRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) return b.getObject(ctx, blz, prm) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go index 384544d7b..b24f1b881 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go @@ -52,7 +52,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re if err != nil { return common.GetRangeRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) res, err := b.getObjectRange(ctx, blz, prm) if err == nil { @@ -108,7 +108,7 @@ func (b *Blobovniczas) getRangeFromLevel(ctx context.Context, prm common.GetRang if err != nil { return common.GetRangeRes{}, err } - defer shBlz.Close() + defer shBlz.Close(ctx) return b.getObjectRange(ctx, blz, prm) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go index 049a61d72..b120c22f7 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go @@ -84,7 +84,7 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo } return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err) } - defer shBlz.Close() + defer shBlz.Close(ctx) err = f(p, blz) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go index 502202d68..b35e052cf 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go @@ -71,7 +71,7 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) { if err := blz.Open(ctx); err != nil { return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err) } - if err := blz.Init(); err != nil { + if err := blz.Init(ctx); err != nil { return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err) } @@ -82,20 +82,20 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) { return blz, nil } -func (b *sharedDB) Close() { +func (b *sharedDB) Close(ctx context.Context) { b.cond.L.Lock() defer b.cond.L.Unlock() if b.refCount == 0 { - b.log.Error(context.Background(), logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path)) + b.log.Error(ctx, logs.AttemtToCloseAlreadyClosedBlobovnicza, zap.String("id", b.path)) b.cond.Broadcast() return } if b.refCount == 1 { b.refCount = 0 - if err := b.blcza.Close(); err != nil { - b.log.Error(context.Background(), logs.BlobovniczatreeCouldNotCloseBlobovnicza, + if err := b.blcza.Close(ctx); err != nil { + b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", b.path), zap.String("error", err.Error()), ) @@ -111,7 +111,7 @@ func (b *sharedDB) Close() { } } -func (b *sharedDB) CloseAndRemoveFile() error { +func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error { b.cond.L.Lock() if b.refCount > 1 { b.cond.Wait() @@ -122,8 +122,8 @@ func (b *sharedDB) CloseAndRemoveFile() error { return errClosingClosedBlobovnicza } - if err := b.blcza.Close(); err != nil { - b.log.Error(context.Background(), logs.BlobovniczatreeCouldNotCloseBlobovnicza, + if err := b.blcza.Close(ctx); err != nil { + b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", b.path), zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go index b56251772..0e1b2022e 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go @@ -1,6 +1,7 @@ package blobovniczatree import ( + "context" "io/fs" "time" @@ -20,7 +21,7 @@ type cfg struct { blzShallowWidth uint64 compression *compression.Config blzOpts []blobovnicza.Option - reportError func(string, error) // reportError is the function called when encountering disk errors. + reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors. metrics Metrics waitBeforeDropDB time.Duration blzInitWorkerCount int @@ -54,7 +55,7 @@ func initConfig(c *cfg) { openedCacheExpInterval: defaultOpenedCacheInterval, blzShallowDepth: defaultBlzShallowDepth, blzShallowWidth: defaultBlzShallowWidth, - reportError: func(string, error) {}, + reportError: func(context.Context, string, error) {}, metrics: &noopMetrics{}, waitBeforeDropDB: defaultWaitBeforeDropDB, blzInitWorkerCount: defaultBlzInitWorkerCount, diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go index 8dff436d3..1678e578c 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go @@ -80,7 +80,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) active, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath) if err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.String("error", err.Error()), @@ -95,14 +95,14 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return false, nil } - defer active.Close() + defer active.Close(ctx) i.AllFull = false _, err = active.Blobovnicza().Put(ctx, i.PutPrm) if err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) } else { i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", active.SystemPath()), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index e137bdd99..16ef2b180 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -186,7 +186,7 @@ func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFil if err != nil { return false, err } - defer shDB.Close() + defer shDB.Close(ctx) fp := blz.FillPercent() // accepted fill percent defines as // |----|+++++++++++++++++|+++++++++++++++++|--------------- @@ -206,9 +206,9 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M if shDBClosed { return } - shDB.Close() + shDB.Close(ctx) }() - dropTempFile, err := b.addRebuildTempFile(path) + dropTempFile, err := b.addRebuildTempFile(ctx, path) if err != nil { return 0, err } @@ -224,7 +224,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M return migratedObjects, err } -func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) { +func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) { sysPath := filepath.Join(b.rootPath, path) sysPath = sysPath + rebuildSuffix _, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm) @@ -233,7 +233,7 @@ func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) { } return func() { if err := os.Remove(sysPath); err != nil { - b.log.Warn(context.Background(), logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) + b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) } }, nil } @@ -330,7 +330,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) b.dbFilesGuard.Lock() defer b.dbFilesGuard.Unlock() - if err := shDb.CloseAndRemoveFile(); err != nil { + if err := shDb.CloseAndRemoveFile(ctx); err != nil { return false, err } b.commondbManager.CleanResources(path) @@ -370,7 +370,7 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co if err != nil { return true, err } - defer shDB.Close() + defer shDB.Close(ctx) incompletedMoves, err := blz.ListMoveInfo(ctx) if err != nil { @@ -403,7 +403,7 @@ func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blob if err != nil { return err } - defer targetDB.Close() + defer targetDB.Close(ctx) existsInSource := true var gPrm blobovnicza.GetPrm @@ -480,7 +480,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, target, err := i.B.activeDBManager.GetOpenedActiveDBForLevel(ctx, lvlPath) if err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, zap.Error(err)) } @@ -491,7 +491,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, i.B.log.Warn(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath)) return false, nil } - defer target.Close() + defer target.Close(ctx) i.AllFull = false @@ -503,7 +503,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, TargetStorageID: targetStorageID.Bytes(), }); err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, err) } else { i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutMoveInfoToSourceBlobovnicza, zap.String("path", i.SourceSysPath), zap.Error(err)) } @@ -519,7 +519,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, _, err = target.Blobovnicza().Put(ctx, putPrm) if err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, err) } else { i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", target.SystemPath()), zap.Error(err)) } @@ -535,7 +535,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, deletePrm.SetAddress(i.Address) if _, err = i.Source.Delete(ctx, deletePrm); err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotDeleteFromSource, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, err) } else { i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDeleteFromSource, zap.String("path", i.SourceSysPath), zap.Error(err)) } @@ -544,7 +544,7 @@ func (i *moveIterator) tryMoveToLvl(ctx context.Context, lvlPath string) (bool, if err = i.Source.DropMoveInfo(ctx, i.Address); err != nil { if !isLogical(err) { - i.B.reportError(logs.BlobovniczatreeCouldNotDropMoveInfo, err) + i.B.reportError(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, err) } else { i.B.log.Warn(ctx, logs.BlobovniczatreeCouldNotDropMoveInfo, zap.String("path", i.SourceSysPath), zap.Error(err)) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go index bfea97afe..2f58624aa 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go @@ -36,7 +36,7 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) { blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) obj := blobstortest.NewObject(1024) data, err := obj.Marshal() @@ -53,7 +53,7 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) { TargetStorageID: []byte("0/0/0"), })) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm) require.NoError(t, err) @@ -66,7 +66,7 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) { blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) obj := blobstortest.NewObject(1024) data, err := obj.Marshal() @@ -83,19 +83,19 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) { TargetStorageID: []byte("0/0/0"), })) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm) require.NoError(t, err) blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) _, err = blz.Put(context.Background(), pPrm) require.NoError(t, err) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) testRebuildFailoverValidate(t, dir, obj, true) } @@ -106,7 +106,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) obj := blobstortest.NewObject(1024) data, err := obj.Marshal() @@ -117,14 +117,14 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { TargetStorageID: []byte("0/0/0"), })) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm) require.NoError(t, err) blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) var pPrm blobovnicza.PutPrm pPrm.SetAddress(object.AddressOf(obj)) @@ -132,7 +132,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { _, err = blz.Put(context.Background(), pPrm) require.NoError(t, err) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) testRebuildFailoverValidate(t, dir, obj, false) } @@ -170,11 +170,11 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object require.Equal(t, uint64(1), rRes.ObjectsMoved) require.Equal(t, uint64(0), rRes.FilesRemoved) - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) moveInfo, err := blz.ListMoveInfo(context.Background()) require.NoError(t, err) @@ -185,11 +185,11 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object _, err = blz.Get(context.Background(), gPrm) require.True(t, client.IsErrObjectNotFound(err)) - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db"))) require.NoError(t, blz.Open(context.Background())) - require.NoError(t, blz.Init()) + require.NoError(t, blz.Init(context.Background())) moveInfo, err = blz.ListMoveInfo(context.Background()) require.NoError(t, err) @@ -203,7 +203,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object require.True(t, bytes.Equal([]byte("0/0/0"), metaStub.storageIDs[object.AddressOf(obj)])) } - require.NoError(t, blz.Close()) + require.NoError(t, blz.Close(context.Background())) _, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild")) require.True(t, os.IsNotExist(err)) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index dff4e9024..aae72b5ff 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -93,7 +93,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) }) t.Run("no rebuild single db", func(t *testing.T) { @@ -145,7 +145,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) }) t.Run("rebuild by fill percent", func(t *testing.T) { @@ -214,7 +214,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) }) t.Run("rebuild by overflow", func(t *testing.T) { @@ -251,7 +251,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) b = NewBlobovniczaTree( context.Background(), WithLogger(test.NewLogger(t)), @@ -284,7 +284,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) }) } @@ -318,7 +318,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { storageIDs := make(map[oid.Address][]byte) storageIDs[prm.Address] = res.StorageID - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) b = NewBlobovniczaTree( context.Background(), @@ -355,7 +355,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) } func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) { @@ -399,7 +399,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta } require.NoError(t, eg.Wait()) - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) b = NewBlobovniczaTree( context.Background(), @@ -444,7 +444,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta require.NoError(t, err) } - require.NoError(t, b.Close()) + require.NoError(t, b.Close(context.Background())) } type storageIDUpdateStub struct { diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go index 41c6cf161..f850f48b4 100644 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ b/pkg/local_object_storage/blobstor/blobstor.go @@ -1,6 +1,7 @@ package blobstor import ( + "context" "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -139,7 +140,7 @@ func WithUncompressableContentTypes(values []string) Option { // SetReportErrorFunc allows to provide a function to be called on disk errors. // This function MUST be called before Open. -func (b *BlobStor) SetReportErrorFunc(f func(string, error)) { +func (b *BlobStor) SetReportErrorFunc(f func(context.Context, string, error)) { for i := range b.storage { b.storage[i].Storage.SetReportErrorFunc(f) } diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go index bed5e0eb9..6cc56fa3b 100644 --- a/pkg/local_object_storage/blobstor/blobstor_test.go +++ b/pkg/local_object_storage/blobstor/blobstor_test.go @@ -54,7 +54,7 @@ func TestCompression(t *testing.T) { WithCompressObjects(compress), WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bs.Init()) + require.NoError(t, bs.Init(context.Background())) return bs } @@ -91,20 +91,20 @@ func TestCompression(t *testing.T) { blobStor := newBlobStor(t, false) testPut(t, blobStor, 0) testGet(t, blobStor, 0) - require.NoError(t, blobStor.Close()) + require.NoError(t, blobStor.Close(context.Background())) blobStor = newBlobStor(t, true) testGet(t, blobStor, 0) // get uncompressed object with compress enabled testPut(t, blobStor, 1) testGet(t, blobStor, 1) - require.NoError(t, blobStor.Close()) + require.NoError(t, blobStor.Close(context.Background())) blobStor = newBlobStor(t, false) testGet(t, blobStor, 0) // get old uncompressed object testGet(t, blobStor, 1) // get compressed object with compression disabled testPut(t, blobStor, 2) testGet(t, blobStor, 2) - require.NoError(t, blobStor.Close()) + require.NoError(t, blobStor.Close(context.Background())) } func TestBlobstor_needsCompression(t *testing.T) { @@ -130,7 +130,7 @@ func TestBlobstor_needsCompression(t *testing.T) { }, })) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bs.Init()) + require.NoError(t, bs.Init(context.Background())) return bs } @@ -192,7 +192,7 @@ func TestConcurrentPut(t *testing.T) { blobStor := New( WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, blobStor.Init()) + require.NoError(t, blobStor.Init(context.Background())) testGet := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { res, err := b.Get(context.Background(), common.GetPrm{Address: object.AddressOf(obj)}) @@ -272,7 +272,7 @@ func TestConcurrentDelete(t *testing.T) { blobStor := New( WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, blobStor.Init()) + require.NoError(t, blobStor.Init(context.Background())) testPut := func(t *testing.T, b *BlobStor, obj *objectSDK.Object) { var prm common.PutPrm diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go index 4f3a20993..6ecef48cd 100644 --- a/pkg/local_object_storage/blobstor/common/storage.go +++ b/pkg/local_object_storage/blobstor/common/storage.go @@ -12,7 +12,7 @@ import ( type Storage interface { Open(mode mode.ComponentMode) error Init() error - Close() error + Close(context.Context) error Type() string Path() string @@ -23,7 +23,7 @@ type Storage interface { // SetReportErrorFunc allows to provide a function to be called on disk errors. // This function MUST be called before Open. - SetReportErrorFunc(f func(string, error)) + SetReportErrorFunc(f func(context.Context, string, error)) SetParentID(parentID string) Get(context.Context, GetPrm) (GetRes, error) diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go index 43436b4eb..44685524f 100644 --- a/pkg/local_object_storage/blobstor/control.go +++ b/pkg/local_object_storage/blobstor/control.go @@ -50,8 +50,8 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag // If BlobStor is already initialized, no action is taken. // // Returns wrapped ErrInitBlobovniczas on blobovnicza tree's initializaiton failure. -func (b *BlobStor) Init() error { - b.log.Debug(context.Background(), logs.BlobstorInitializing) +func (b *BlobStor) Init(ctx context.Context) error { + b.log.Debug(ctx, logs.BlobstorInitializing) if err := b.compression.Init(); err != nil { return err @@ -67,14 +67,14 @@ func (b *BlobStor) Init() error { } // Close releases all internal resources of BlobStor. -func (b *BlobStor) Close() error { - b.log.Debug(context.Background(), logs.BlobstorClosing) +func (b *BlobStor) Close(ctx context.Context) error { + b.log.Debug(ctx, logs.BlobstorClosing) var firstErr error for i := range b.storage { - err := b.storage[i].Storage.Close() + err := b.storage[i].Storage.Close(ctx) if err != nil { - b.log.Info(context.Background(), logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error())) + b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error())) if firstErr == nil { firstErr = err } diff --git a/pkg/local_object_storage/blobstor/exists_test.go b/pkg/local_object_storage/blobstor/exists_test.go index 783c198b2..7eb7d49bf 100644 --- a/pkg/local_object_storage/blobstor/exists_test.go +++ b/pkg/local_object_storage/blobstor/exists_test.go @@ -22,7 +22,7 @@ func TestExists(t *testing.T) { b := New(WithStorages(storages)) require.NoError(t, b.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, b.Init()) + require.NoError(t, b.Init(context.Background())) objects := []*objectSDK.Object{ testObject(smallSizeLimit / 2), diff --git a/pkg/local_object_storage/blobstor/fstree/control.go b/pkg/local_object_storage/blobstor/fstree/control.go index c21d79f09..2544729f7 100644 --- a/pkg/local_object_storage/blobstor/fstree/control.go +++ b/pkg/local_object_storage/blobstor/fstree/control.go @@ -1,6 +1,8 @@ package fstree import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" ) @@ -28,7 +30,7 @@ func (t *FSTree) Init() error { } // Close implements common.Storage. -func (t *FSTree) Close() error { +func (t *FSTree) Close(_ context.Context) error { t.metrics.Close() return nil } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go index 7f52762a7..53eb0395a 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree.go @@ -606,7 +606,7 @@ func (t *FSTree) Compressor() *compression.Config { } // SetReportErrorFunc implements common.Storage. -func (t *FSTree) SetReportErrorFunc(_ func(string, error)) { +func (t *FSTree) SetReportErrorFunc(_ func(context.Context, string, error)) { // Do nothing, FSTree can encounter only one error which is returned. } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go index eb2126b6c..50dae46a7 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go @@ -52,7 +52,7 @@ func TestObjectCounter(t *testing.T) { require.Equal(t, uint64(0), size) defer func() { - require.NoError(t, fst.Close()) + require.NoError(t, fst.Close(context.Background())) }() addr := oidtest.Address() diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go index 21c80b089..b8e88f84a 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/control.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/control.go @@ -19,7 +19,7 @@ func TestControl(t *testing.T, cons Constructor, minSize, maxSize uint64) { require.NoError(t, s.Init()) objects := prepare(t, 10, s, minSize, maxSize) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) require.NoError(t, s.Open(mode.ComponentReadOnly)) for i := range objects { diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go index cf4e76513..3a163f6b1 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/delete.go @@ -15,7 +15,7 @@ func TestDelete(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() objects := prepare(t, 4, s, minSize, maxSize) diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go index 08465ed5e..f34fe5f97 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/exists.go @@ -14,7 +14,7 @@ func TestExists(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() objects := prepare(t, 1, s, minSize, maxSize) diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go index d1f709b0c..af0f4b45d 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/get.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get.go @@ -15,7 +15,7 @@ func TestGet(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() objects := prepare(t, 2, s, minSize, maxSize) diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go index fcbeddac7..13032048c 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/get_range.go @@ -17,7 +17,7 @@ func TestGetRange(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() objects := prepare(t, 1, s, minSize, maxSize) diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go index 3a6c8b699..36b2c33f8 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go @@ -14,7 +14,7 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) { s := cons(t) require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() objects := prepare(t, 10, s, minSize, maxSize) diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go index 3c9457db2..ccfa510fe 100644 --- a/pkg/local_object_storage/blobstor/iterate_test.go +++ b/pkg/local_object_storage/blobstor/iterate_test.go @@ -33,9 +33,9 @@ func TestIterateObjects(t *testing.T) { require.NoError(t, blobStor.Open(context.Background(), mode.ReadWrite)) // initialize Blobstor - require.NoError(t, blobStor.Init()) + require.NoError(t, blobStor.Init(context.Background())) - defer blobStor.Close() + defer blobStor.Close(context.Background()) const objNum = 5 @@ -118,7 +118,7 @@ func TestIterate_IgnoreErrors(t *testing.T) { })} bs := New(bsOpts...) require.NoError(t, bs.Open(ctx, mode.ReadWrite)) - require.NoError(t, bs.Init()) + require.NoError(t, bs.Init(ctx)) nopHandler := func(e common.IterationElement) error { return nil diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go index 83da52eb7..95a916662 100644 --- a/pkg/local_object_storage/blobstor/memstore/control.go +++ b/pkg/local_object_storage/blobstor/memstore/control.go @@ -1,6 +1,8 @@ package memstore import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" ) @@ -10,11 +12,11 @@ func (s *memstoreImpl) Open(mod mode.ComponentMode) error { return nil } -func (s *memstoreImpl) Init() error { return nil } -func (s *memstoreImpl) Close() error { return nil } -func (s *memstoreImpl) Type() string { return Type } -func (s *memstoreImpl) Path() string { return s.rootPath } -func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc } -func (s *memstoreImpl) Compressor() *compression.Config { return s.compression } -func (s *memstoreImpl) SetReportErrorFunc(func(string, error)) {} -func (s *memstoreImpl) SetParentID(string) {} +func (s *memstoreImpl) Init() error { return nil } +func (s *memstoreImpl) Close(context.Context) error { return nil } +func (s *memstoreImpl) Type() string { return Type } +func (s *memstoreImpl) Path() string { return s.rootPath } +func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc } +func (s *memstoreImpl) Compressor() *compression.Config { return s.compression } +func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {} +func (s *memstoreImpl) SetParentID(string) {} diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go index dd130e5f9..f904d4232 100644 --- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go +++ b/pkg/local_object_storage/blobstor/memstore/memstore_test.go @@ -16,7 +16,7 @@ func TestSimpleLifecycle(t *testing.T) { s := New( WithRootPath("memstore"), ) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() require.NoError(t, s.Open(mode.ComponentReadWrite)) require.NoError(t, s.Init()) diff --git a/pkg/local_object_storage/blobstor/mode.go b/pkg/local_object_storage/blobstor/mode.go index f081ff645..af19e398e 100644 --- a/pkg/local_object_storage/blobstor/mode.go +++ b/pkg/local_object_storage/blobstor/mode.go @@ -20,10 +20,10 @@ func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error { return nil } - err := b.Close() + err := b.Close(ctx) if err == nil { if err = b.openBlobStor(ctx, m); err == nil { - err = b.Init() + err = b.Init(ctx) } } if err != nil { diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go index 1ac769e36..64e3c8da1 100644 --- a/pkg/local_object_storage/blobstor/perf_test.go +++ b/pkg/local_object_storage/blobstor/perf_test.go @@ -106,7 +106,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) { b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) { objGen := tt.objGen() st := stEntry.open(b) - defer func() { require.NoError(b, st.Close()) }() + defer func() { require.NoError(b, st.Close(context.Background())) }() // Fill database var errG errgroup.Group @@ -161,7 +161,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) { b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) { gen := genEntry.create() st := stEntry.open(b) - defer func() { require.NoError(b, st.Close()) }() + defer func() { require.NoError(b, st.Close(context.Background())) }() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { @@ -200,7 +200,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) { b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) { objGen := tt.objGen() st := stEntry.open(b) - defer func() { require.NoError(b, st.Close()) }() + defer func() { require.NoError(b, st.Close(context.Background())) }() // Fill database for range tt.size { diff --git a/pkg/local_object_storage/blobstor/teststore/option.go b/pkg/local_object_storage/blobstor/teststore/option.go index bc0bed49d..fb1188751 100644 --- a/pkg/local_object_storage/blobstor/teststore/option.go +++ b/pkg/local_object_storage/blobstor/teststore/option.go @@ -1,6 +1,8 @@ package teststore import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -17,7 +19,7 @@ type cfg struct { Path func() string SetCompressor func(cc *compression.Config) Compressor func() *compression.Config - SetReportErrorFunc func(f func(string, error)) + SetReportErrorFunc func(f func(context.Context, string, error)) Get func(common.GetPrm) (common.GetRes, error) GetRange func(common.GetRangePrm) (common.GetRangeRes, error) @@ -51,7 +53,7 @@ func WithCompressor(f func() *compression.Config) Option { return func(c *cfg) { c.overrides.Compressor = f } } -func WithReportErrorFunc(f func(func(string, error))) Option { +func WithReportErrorFunc(f func(func(context.Context, string, error))) Option { return func(c *cfg) { c.overrides.SetReportErrorFunc = f } } diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go index fea4a2d49..626ba0023 100644 --- a/pkg/local_object_storage/blobstor/teststore/teststore.go +++ b/pkg/local_object_storage/blobstor/teststore/teststore.go @@ -77,14 +77,14 @@ func (s *TestStore) Init() error { } } -func (s *TestStore) Close() error { +func (s *TestStore) Close(ctx context.Context) error { s.mu.RLock() defer s.mu.RUnlock() switch { case s.overrides.Close != nil: return s.overrides.Close() case s.st != nil: - return s.st.Close() + return s.st.Close(ctx) default: panic("unexpected storage call: Close()") } @@ -142,7 +142,7 @@ func (s *TestStore) Compressor() *compression.Config { } } -func (s *TestStore) SetReportErrorFunc(f func(string, error)) { +func (s *TestStore) SetReportErrorFunc(f func(context.Context, string, error)) { s.mu.RLock() defer s.mu.RUnlock() switch { diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index a52436175..24059a3f9 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -44,11 +44,11 @@ func (r ListContainersRes) Containers() []cid.ID { // ContainerSize returns the sum of estimation container sizes among all shards. // // Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) ContainerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) { +func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) { defer elapsed("ContainerSize", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res, err = e.containerSize(prm) + res, err = e.containerSize(ctx, prm) return err }) @@ -56,12 +56,12 @@ func (e *StorageEngine) ContainerSize(prm ContainerSizePrm) (res ContainerSizeRe } // ContainerSize calls ContainerSize method on engine to calculate sum of estimation container sizes among all shards. -func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) { +func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, error) { var prm ContainerSizePrm prm.SetContainerID(id) - res, err := e.ContainerSize(prm) + res, err := e.ContainerSize(ctx, prm) if err != nil { return 0, err } @@ -69,14 +69,14 @@ func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) { return res.Size(), nil } -func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) { +func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) { e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { var csPrm shard.ContainerSizePrm csPrm.SetContainerID(prm.cnr) csRes, err := sh.Shard.ContainerSize(csPrm) if err != nil { - e.reportShardError(sh, "can't get container size", err, + e.reportShardError(ctx, sh, "can't get container size", err, zap.Stringer("container_id", prm.cnr)) return false } @@ -121,7 +121,7 @@ func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{}) if err != nil { - e.reportShardError(sh, "can't get list of containers", err) + e.reportShardError(ctx, sh, "can't get list of containers", err) return false } diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 2e957eb04..7164ff21f 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -97,7 +97,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e return false } else { if !client.IsErrObjectNotFound(err) { - e.reportShardError(sh, "could not check object existence", err, zap.Stringer("address", prm.addr)) + e.reportShardError(ctx, sh, "could not check object existence", err, zap.Stringer("address", prm.addr)) } return false } @@ -113,7 +113,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e _, err = sh.Inhume(ctx, shPrm) if err != nil { - e.reportShardError(sh, "could not inhume object in shard", err, zap.Stringer("address", prm.addr)) + e.reportShardError(ctx, sh, "could not inhume object in shard", err, zap.Stringer("address", prm.addr)) var target *apistatus.ObjectLocked locked.is = errors.As(err, &target) @@ -188,7 +188,7 @@ func (e *StorageEngine) deleteChunks( var objID oid.ID err := objID.ReadFromV2(chunk.ID) if err != nil { - e.reportShardError(sh, "could not delete EC chunk", err, zap.Stringer("address", prm.addr)) + e.reportShardError(ctx, sh, "could not delete EC chunk", err, zap.Stringer("address", prm.addr)) } addr.SetObject(objID) inhumePrm.MarkAsGarbage(addr) diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 6e30ee9de..029904046 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -99,24 +99,24 @@ func (e *StorageEngine) moveToDegraded(ctx context.Context, sh *shard.Shard, err if isMeta { err := sh.SetMode(ctx, mode.DegradedReadOnly) if err == nil { - log.Info(context.Background(), logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold) + log.Info(ctx, logs.EngineShardIsMovedInDegradedModeDueToErrorThreshold) return } - log.Error(context.Background(), logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly, + log.Error(ctx, logs.EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly, zap.Error(err)) } err := sh.SetMode(ctx, mode.ReadOnly) if err != nil { - log.Error(context.Background(), logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err)) + log.Error(ctx, logs.EngineFailedToMoveShardInReadonlyMode, zap.Error(err)) return } - log.Info(context.Background(), logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold) + log.Info(ctx, logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold) } // reportShardErrorByID increases shard error counter and logs an error. -func (e *StorageEngine) reportShardErrorByID(id string, msg string, err error) { +func (e *StorageEngine) reportShardErrorByID(ctx context.Context, id string, msg string, err error) { e.mtx.RLock() sh, ok := e.shards[id] e.mtx.RUnlock() @@ -125,19 +125,20 @@ func (e *StorageEngine) reportShardErrorByID(id string, msg string, err error) { return } - e.reportShardError(sh, msg, err) + e.reportShardError(ctx, sh, msg, err) } // reportShardError checks that the amount of errors doesn't exceed the configured threshold. // If it does, shard is set to read-only mode. func (e *StorageEngine) reportShardError( + ctx context.Context, sh hashedShard, msg string, err error, fields ...zap.Field, ) { if isLogical(err) { - e.log.Warn(context.Background(), msg, + e.log.Warn(ctx, msg, zap.Stringer("shard_id", sh.ID()), zap.String("error", err.Error())) return @@ -147,7 +148,7 @@ func (e *StorageEngine) reportShardError( e.metrics.IncErrorCounter(sh.ID().String()) sid := sh.ID() - e.log.Warn(context.Background(), msg, append([]zap.Field{ + e.log.Warn(ctx, msg, append([]zap.Field{ zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount), zap.String("error", err.Error()), @@ -168,7 +169,7 @@ func (e *StorageEngine) reportShardError( default: // For background workers we can have a lot of such errors, // thus logging is done with DEBUG level. - e.log.Debug(context.Background(), logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest, + e.log.Debug(ctx, logs.EngineModeChangeIsInProgressIgnoringSetmodeRequest, zap.Stringer("shard_id", sid), zap.Uint32("error_count", errCount)) } diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go index d98101306..9d2b1c1b7 100644 --- a/pkg/local_object_storage/engine/exists.go +++ b/pkg/local_object_storage/engine/exists.go @@ -37,7 +37,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool } if !client.IsErrObjectNotFound(err) { - e.reportShardError(sh, "could not check existence of object in shard", err, zap.Stringer("address", shPrm.Address)) + e.reportShardError(ctx, sh, "could not check existence of object in shard", err, zap.Stringer("address", shPrm.Address)) } return false } diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index d6827e6c3..c7145889b 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -183,7 +183,7 @@ func (i *getShardIterator) tryGetWithMeta(ctx context.Context) { i.ObjectExpired = true return true default: - i.Engine.reportShardError(sh, "could not get object from shard", err, zap.Stringer("address", i.Address)) + i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address)) return false } }) diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go index d2e3cfd99..d6892f129 100644 --- a/pkg/local_object_storage/engine/head.go +++ b/pkg/local_object_storage/engine/head.go @@ -117,7 +117,7 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) outError = new(apistatus.ObjectNotFound) return true default: - e.reportShardError(sh, "could not head object from shard", err, zap.Stringer("address", prm.addr)) + e.reportShardError(ctx, sh, "could not head object from shard", err, zap.Stringer("address", prm.addr)) return false } } diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index 1dc64c174..e89a8d048 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -151,7 +151,7 @@ func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm sh var siErr *objectSDK.SplitInfoError var ecErr *objectSDK.ECInfoError if !(errors.As(err, &siErr) || errors.As(err, &ecErr)) { - e.reportShardError(sh, "could not check for presents in shard", err, zap.Stringer("address", addr)) + e.reportShardError(ctx, sh, "could not check for presents in shard", err, zap.Stringer("address", addr)) return } @@ -176,7 +176,7 @@ func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm sh return true } - e.reportShardError(sh, "could not inhume object in shard", err, zap.Stringer("address", addr)) + e.reportShardError(ctx, sh, "could not inhume object in shard", err, zap.Stringer("address", addr)) return false } @@ -202,7 +202,7 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { locked, err = h.Shard.IsLocked(ctx, addr) if err != nil { - e.reportShardError(h, "can't check object's lockers", err, zap.Stringer("address", addr), + e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) outErr = err return false @@ -232,7 +232,7 @@ func (e *StorageEngine) GetLocked(ctx context.Context, addr oid.Address) ([]oid. e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { ld, err := h.Shard.GetLocked(ctx, addr) if err != nil { - e.reportShardError(h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr), + e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) outErr = err } @@ -274,7 +274,7 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) { e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { - sh.HandleDeletedLocks(lockers) + sh.HandleDeletedLocks(ctx, lockers) select { case <-ctx.Done(): diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go index bbab59bfa..5d43e59df 100644 --- a/pkg/local_object_storage/engine/lock.go +++ b/pkg/local_object_storage/engine/lock.go @@ -89,7 +89,7 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo var objID oid.ID err = objID.ReadFromV2(chunk.ID) if err != nil { - e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) return false } @@ -97,7 +97,7 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo } err = sh.Lock(ctx, idCnr, locker, eclocked) if err != nil { - e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) return false } @@ -109,7 +109,7 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo // do not lock it return true } - e.reportShardError(sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr), + e.reportShardError(ctx, sh, "could not check locked object for presence in shard", err, zap.Stringer("container_id", idCnr), zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) return } @@ -122,7 +122,7 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo err := sh.Lock(ctx, idCnr, locker, []oid.ID{locked}) if err != nil { - e.reportShardError(sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) var errIrregular *apistatus.LockNonRegularObject diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 635f0e302..e080191ae 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -179,7 +179,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti return } - e.reportShardError(sh, "could not put object to shard", err, zap.Stringer("address", addr)) + e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr)) return } diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index c5c94eef7..0c9cea903 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -205,7 +205,7 @@ func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) { return true // stop, return it back default: - i.Engine.reportShardError(sh, "could not get object from shard", err, zap.Stringer("address", i.Address)) + i.Engine.reportShardError(ctx, sh, "could not get object from shard", err, zap.Stringer("address", i.Address)) return false } }) diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index 0f1341f85..02149b4c8 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -74,7 +74,7 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { res, err := sh.Select(ctx, shPrm) if err != nil { - e.reportShardError(sh, "could not select objects from shard", err) + e.reportShardError(ctx, sh, "could not select objects from shard", err) return false } @@ -116,7 +116,7 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { res, err := sh.List(ctx) // consider limit result of shard iterator if err != nil { - e.reportShardError(sh, "could not select objects from shard", err) + e.reportShardError(ctx, sh, "could not select objects from shard", err) } else { for _, addr := range res.AddressList() { // save only unique values if _, ok := uniqueMap[addr.EncodeToString()]; !ok { diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 2b94103e9..898f685ec 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -351,7 +351,7 @@ func (e *StorageEngine) DetachShards(ctx context.Context, ids []*shard.ID) error return logicerr.New("ids must be non-empty") } - deletedShards, err := e.deleteShards(ids) + deletedShards, err := e.deleteShards(ctx, ids) if err != nil { return err } @@ -400,7 +400,7 @@ func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedS // deleteShards deletes shards with specified ids from engine shard list // and releases all engine resources associated with shards. // Returns deleted shards or error if some shard could not be deleted. -func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) { +func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]hashedShard, error) { ss := make([]hashedShard, 0, len(ids)) e.mtx.Lock() @@ -432,7 +432,7 @@ func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) { delete(e.shardPools, idStr) } - e.log.Info(context.Background(), logs.EngineShardHasBeenRemoved, + e.log.Info(ctx, logs.EngineShardHasBeenRemoved, zap.String("id", idStr)) } diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go index 6bb5e3a41..268b4adfa 100644 --- a/pkg/local_object_storage/engine/tree.go +++ b/pkg/local_object_storage/engine/tree.go @@ -37,7 +37,7 @@ func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, lm, err := lst[index].TreeMove(ctx, d, treeID, m) if err != nil { if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(lst[index], "can't perform `TreeMove`", err, + e.reportShardError(ctx, lst[index], "can't perform `TreeMove`", err, zap.Stringer("cid", d.CID), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -71,7 +71,7 @@ func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescrip lm, err := lst[index].TreeAddByPath(ctx, d, treeID, attr, path, m) if err != nil { if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(lst[index], "can't perform `TreeAddByPath`", err, + e.reportShardError(ctx, lst[index], "can't perform `TreeAddByPath`", err, zap.Stringer("cid", d.CID), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -100,7 +100,7 @@ func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID str err = lst[index].TreeApply(ctx, cnr, treeID, m, backgroundSync) if err != nil { if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(lst[index], "can't perform `TreeApply`", err, + e.reportShardError(ctx, lst[index], "can't perform `TreeApply`", err, zap.Stringer("cid", cnr), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -128,7 +128,7 @@ func (e *StorageEngine) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeI err = lst[index].TreeApplyBatch(ctx, cnr, treeID, m) if err != nil { if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(lst[index], "can't perform `TreeApplyBatch`", err, + e.reportShardError(ctx, lst[index], "can't perform `TreeApplyBatch`", err, zap.Stringer("cid", cnr), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -160,7 +160,7 @@ func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeGetByPath`", err, + e.reportShardError(ctx, sh, "can't perform `TreeGetByPath`", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -193,7 +193,7 @@ func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID s break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeGetMeta`", err, + e.reportShardError(ctx, sh, "can't perform `TreeGetMeta`", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -225,7 +225,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeGetChildren`", err, + e.reportShardError(ctx, sh, "can't perform `TreeGetChildren`", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -257,7 +257,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeSortedByFilename`", err, + e.reportShardError(ctx, sh, "can't perform `TreeSortedByFilename`", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -289,7 +289,7 @@ func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't perform `TreeGetOpLog`", err, + e.reportShardError(ctx, sh, "can't perform `TreeGetOpLog`", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -319,7 +319,7 @@ func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID stri break } if !errors.Is(err, pilorama.ErrTreeNotFound) && !errors.Is(err, shard.ErrReadOnlyMode) { - e.reportShardError(sh, "can't perform `TreeDrop`", err, + e.reportShardError(ctx, sh, "can't perform `TreeDrop`", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -349,7 +349,7 @@ func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, return nil, err } - e.reportShardError(sh, "can't perform `TreeList`", err, + e.reportShardError(ctx, sh, "can't perform `TreeList`", err, zap.Stringer("cid", cid), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -415,7 +415,7 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK err = lst[index].TreeUpdateLastSyncHeight(ctx, cid, treeID, height) if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { - e.reportShardError(lst[index], "can't update tree synchronization height", err, + e.reportShardError(ctx, lst[index], "can't update tree synchronization height", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) @@ -442,7 +442,7 @@ func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, t break } if !errors.Is(err, pilorama.ErrTreeNotFound) { - e.reportShardError(sh, "can't read tree synchronization height", err, + e.reportShardError(ctx, sh, "can't read tree synchronization height", err, zap.Stringer("cid", cid), zap.String("tree", treeID), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) diff --git a/pkg/local_object_storage/internal/storagetest/storage.go b/pkg/local_object_storage/internal/storagetest/storage.go index d8ac106dd..d46365296 100644 --- a/pkg/local_object_storage/internal/storagetest/storage.go +++ b/pkg/local_object_storage/internal/storagetest/storage.go @@ -12,8 +12,8 @@ import ( type Component interface { Open(context.Context, mode.Mode) error SetMode(context.Context, mode.Mode) error - Init() error - Close() error + Init(context.Context) error + Close(context.Context) error } // Constructor constructs storage component. @@ -59,18 +59,18 @@ func TestCloseAfterOpen(t *testing.T, cons Constructor) { // Use-case: irrecoverable error on some components, close everything. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) }) t.Run("RO", func(t *testing.T) { // Use-case: irrecoverable error on some components, close everything. // Open in read-only must be done after the db is here. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init()) - require.NoError(t, s.Close()) + require.NoError(t, s.Init(context.Background())) + require.NoError(t, s.Close(context.Background())) require.NoError(t, s.Open(context.Background(), mode.ReadOnly)) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) }) } @@ -79,9 +79,9 @@ func TestCloseTwice(t *testing.T, cons Constructor) { // Use-case: move to maintenance mode twice, first time failed. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init()) - require.NoError(t, s.Close()) - require.NoError(t, s.Close()) // already closed, no-op + require.NoError(t, s.Init(context.Background())) + require.NoError(t, s.Close(context.Background())) + require.NoError(t, s.Close(context.Background())) // already closed, no-op } // TestSetMode checks that any mode transition can be done safely. @@ -94,20 +94,20 @@ func TestSetMode(t *testing.T, cons Constructor, m mode.Mode) { require.NoError(t, s.SetMode(context.Background(), m)) t.Run("after open in RO", func(t *testing.T) { - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) require.NoError(t, s.Open(context.Background(), mode.ReadOnly)) require.NoError(t, s.SetMode(context.Background(), m)) }) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) }) t.Run("after init", func(t *testing.T) { s := cons(t) // Use-case: notmal node operation. require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init()) + require.NoError(t, s.Init(context.Background())) require.NoError(t, s.SetMode(context.Background(), m)) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) }) } @@ -115,8 +115,8 @@ func TestModeTransition(t *testing.T, cons Constructor, from, to mode.Mode) { // Use-case: normal node operation. s := cons(t) require.NoError(t, s.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, s.Init()) + require.NoError(t, s.Init(context.Background())) require.NoError(t, s.SetMode(context.Background(), from)) require.NoError(t, s.SetMode(context.Background(), to)) - require.NoError(t, s.Close()) + require.NoError(t, s.Close(context.Background())) } diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go index 110be68ad..8d8d91dc7 100644 --- a/pkg/local_object_storage/metabase/containers_test.go +++ b/pkg/local_object_storage/metabase/containers_test.go @@ -18,7 +18,7 @@ func TestDB_Containers(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const N = 10 @@ -79,7 +79,7 @@ func TestDB_ContainersCount(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const R, T, SG, L = 10, 11, 12, 13 // amount of object per type @@ -116,7 +116,7 @@ func TestDB_ContainerSize(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const ( C = 3 diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go index 54bea4204..07fa7e9cf 100644 --- a/pkg/local_object_storage/metabase/control.go +++ b/pkg/local_object_storage/metabase/control.go @@ -57,7 +57,7 @@ func (db *DB) openDB(ctx context.Context, mode mode.Mode) error { return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err) } - db.log.Debug(context.Background(), logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) + db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) if db.boltOptions == nil { opts := *bbolt.DefaultOptions @@ -78,9 +78,9 @@ func (db *DB) openBolt(ctx context.Context) error { db.boltDB.MaxBatchDelay = db.boltBatchDelay db.boltDB.MaxBatchSize = db.boltBatchSize - db.log.Debug(context.Background(), logs.MetabaseOpenedBoltDBInstanceForMetabase) + db.log.Debug(ctx, logs.MetabaseOpenedBoltDBInstanceForMetabase) - db.log.Debug(context.Background(), logs.MetabaseCheckingMetabaseVersion) + db.log.Debug(ctx, logs.MetabaseCheckingMetabaseVersion) return db.boltDB.View(func(tx *bbolt.Tx) error { // The safest way to check if the metabase is fresh is to check if it has no buckets. // However, shard info can be present. So here we check that the number of buckets is @@ -109,7 +109,7 @@ func (db *DB) openBolt(ctx context.Context) error { // // Does nothing if metabase has already been initialized and filled. To roll back the database to its initial state, // use Reset. -func (db *DB) Init() error { +func (db *DB) Init(_ context.Context) error { return metaerr.Wrap(db.init(false)) } @@ -205,7 +205,7 @@ func (db *DB) SyncCounters() error { // Close closes boltDB instance // and reports metabase metric. -func (db *DB) Close() error { +func (db *DB) Close(context.Context) error { var err error if db.boltDB != nil { err = db.close() @@ -236,7 +236,7 @@ func (db *DB) Reload(ctx context.Context, opts ...Option) (bool, error) { defer db.modeMtx.Unlock() if db.mode.NoMetabase() || c.info.Path != "" && filepath.Clean(db.info.Path) != filepath.Clean(c.info.Path) { - if err := db.Close(); err != nil { + if err := db.Close(ctx); err != nil { return false, err } diff --git a/pkg/local_object_storage/metabase/control_test.go b/pkg/local_object_storage/metabase/control_test.go index 2a64881cb..d26402675 100644 --- a/pkg/local_object_storage/metabase/control_test.go +++ b/pkg/local_object_storage/metabase/control_test.go @@ -15,7 +15,7 @@ import ( func TestReset(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() err := db.Reset() require.NoError(t, err) diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go index dccccd456..950385a29 100644 --- a/pkg/local_object_storage/metabase/counter_test.go +++ b/pkg/local_object_storage/metabase/counter_test.go @@ -22,7 +22,7 @@ func TestCounters(t *testing.T) { t.Run("defaults", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() c, err := db.ObjectCounters() require.NoError(t, err) require.Zero(t, c.Phy) @@ -37,7 +37,7 @@ func TestCounters(t *testing.T) { t.Run("put", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := make([]*objectSDK.Object, 0, objCount) for range objCount { oo = append(oo, testutil.GenerateObject()) @@ -75,7 +75,7 @@ func TestCounters(t *testing.T) { t.Run("delete", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := putObjs(t, db, objCount, false) exp := make(map[cid.ID]meta.ObjectCounters) @@ -120,7 +120,7 @@ func TestCounters(t *testing.T) { t.Run("inhume", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := putObjs(t, db, objCount, false) exp := make(map[cid.ID]meta.ObjectCounters) @@ -185,7 +185,7 @@ func TestCounters(t *testing.T) { t.Run("put_split", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() parObj := testutil.GenerateObject() exp := make(map[cid.ID]meta.ObjectCounters) @@ -223,7 +223,7 @@ func TestCounters(t *testing.T) { t.Run("delete_split", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := putObjs(t, db, objCount, true) exp := make(map[cid.ID]meta.ObjectCounters) @@ -265,7 +265,7 @@ func TestCounters(t *testing.T) { t.Run("inhume_split", func(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := putObjs(t, db, objCount, true) exp := make(map[cid.ID]meta.ObjectCounters) @@ -329,7 +329,7 @@ func TestCounters(t *testing.T) { func TestDoublePut(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() obj := testutil.GenerateObject() exp := make(map[cid.ID]meta.ObjectCounters) @@ -387,7 +387,7 @@ func TestCounters_Expired(t *testing.T) { es := &epochState{epoch} db := newDB(t, meta.WithEpochState(es)) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() oo := make([]oid.Address, objCount) for i := range oo { diff --git a/pkg/local_object_storage/metabase/db_test.go b/pkg/local_object_storage/metabase/db_test.go index c61d762bc..edaeb13c5 100644 --- a/pkg/local_object_storage/metabase/db_test.go +++ b/pkg/local_object_storage/metabase/db_test.go @@ -61,7 +61,7 @@ func newDB(t testing.TB, opts ...meta.Option) *meta.DB { ) require.NoError(t, bdb.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bdb.Init()) + require.NoError(t, bdb.Init(context.Background())) return bdb } diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go index a25627990..9f1f91e14 100644 --- a/pkg/local_object_storage/metabase/delete_ec_test.go +++ b/pkg/local_object_storage/metabase/delete_ec_test.go @@ -30,8 +30,8 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) { ) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - defer func() { require.NoError(t, db.Close()) }() + require.NoError(t, db.Init(context.Background())) + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() ecChunk := oidtest.ID() @@ -194,8 +194,8 @@ func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool ) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - defer func() { require.NoError(t, db.Close()) }() + require.NoError(t, db.Init(context.Background())) + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() ecChunks := make([]oid.ID, chunksCount) diff --git a/pkg/local_object_storage/metabase/delete_meta_test.go b/pkg/local_object_storage/metabase/delete_meta_test.go index cdfe2a203..0329e3a73 100644 --- a/pkg/local_object_storage/metabase/delete_meta_test.go +++ b/pkg/local_object_storage/metabase/delete_meta_test.go @@ -23,8 +23,8 @@ func TestPutDeleteIndexAttributes(t *testing.T) { }...) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - defer func() { require.NoError(t, db.Close()) }() + require.NoError(t, db.Init(context.Background())) + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() obj1 := testutil.GenerateObjectWithCID(cnr) diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go index fe5f7833b..c0762a377 100644 --- a/pkg/local_object_storage/metabase/delete_test.go +++ b/pkg/local_object_storage/metabase/delete_test.go @@ -18,7 +18,7 @@ import ( func TestDB_Delete(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() parent := testutil.GenerateObjectWithCID(cnr) @@ -65,7 +65,7 @@ func TestDB_Delete(t *testing.T) { func TestDeleteAllChildren(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -103,7 +103,7 @@ func TestDeleteAllChildren(t *testing.T) { func TestGraveOnlyDelete(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() addr := oidtest.Address() @@ -116,7 +116,7 @@ func TestGraveOnlyDelete(t *testing.T) { func TestExpiredObject(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) { // removing expired object should be error-free @@ -128,7 +128,7 @@ func TestExpiredObject(t *testing.T) { func TestDelete(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() for range 10 { @@ -170,7 +170,7 @@ func TestDelete(t *testing.T) { func TestDeleteDropsGCMarkIfObjectNotFound(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() addr := oidtest.Address() diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go index 1e4148eba..3045e17f1 100644 --- a/pkg/local_object_storage/metabase/exists_test.go +++ b/pkg/local_object_storage/metabase/exists_test.go @@ -1,6 +1,7 @@ package meta_test import ( + "context" "errors" "testing" @@ -18,7 +19,7 @@ const currEpoch = 1000 func TestDB_Exists(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() t.Run("no object", func(t *testing.T) { nonExist := testutil.GenerateObject() diff --git a/pkg/local_object_storage/metabase/expired_test.go b/pkg/local_object_storage/metabase/expired_test.go index bb98745ee..495c1eee7 100644 --- a/pkg/local_object_storage/metabase/expired_test.go +++ b/pkg/local_object_storage/metabase/expired_test.go @@ -13,7 +13,7 @@ import ( func TestDB_SelectExpired(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() containerID1 := cidtest.ID() diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go index f0caaea70..c93d2c992 100644 --- a/pkg/local_object_storage/metabase/get_test.go +++ b/pkg/local_object_storage/metabase/get_test.go @@ -25,7 +25,7 @@ import ( func TestDB_Get(t *testing.T) { db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() raw := testutil.GenerateObject() @@ -219,7 +219,7 @@ func benchmarkGet(b *testing.B, numOfObj int) { meta.WithMaxBatchSize(batchSize), meta.WithMaxBatchDelay(10*time.Millisecond), ) - defer func() { require.NoError(b, db.Close()) }() + defer func() { require.NoError(b, db.Close(context.Background())) }() addrs := make([]oid.Address, 0, numOfObj) for range numOfObj { @@ -253,7 +253,7 @@ func benchmarkGet(b *testing.B, numOfObj int) { }) }) - require.NoError(b, db.Close()) + require.NoError(b, db.Close(context.Background())) require.NoError(b, os.RemoveAll(b.Name())) db, addrs = prepareDb(1) diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go index b9c6ce28c..99794e609 100644 --- a/pkg/local_object_storage/metabase/graveyard_test.go +++ b/pkg/local_object_storage/metabase/graveyard_test.go @@ -15,7 +15,7 @@ import ( func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() var counter int var iterGravePRM meta.GraveyardIterationPrm @@ -42,7 +42,7 @@ func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) { func TestDB_Iterate_OffsetNotFound(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() obj1 := testutil.GenerateObject() obj2 := testutil.GenerateObject() @@ -113,7 +113,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) { func TestDB_IterateDeletedObjects(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() // generate and put 4 objects @@ -202,7 +202,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) { func TestDB_IterateOverGraveyard_Offset(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() // generate and put 4 objects @@ -303,7 +303,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) { func TestDB_IterateOverGarbage_Offset(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() // generate and put 4 objects obj1 := testutil.GenerateObject() @@ -395,7 +395,7 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) { func TestDB_DropGraves(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() // generate and put 2 objects diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go index 32e412c79..180713287 100644 --- a/pkg/local_object_storage/metabase/inhume_ec_test.go +++ b/pkg/local_object_storage/metabase/inhume_ec_test.go @@ -25,8 +25,8 @@ func TestInhumeECObject(t *testing.T) { ) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - defer func() { require.NoError(t, db.Close()) }() + require.NoError(t, db.Init(context.Background())) + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() ecChunk := oidtest.ID() diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go index 277316f7b..786d10396 100644 --- a/pkg/local_object_storage/metabase/inhume_test.go +++ b/pkg/local_object_storage/metabase/inhume_test.go @@ -17,7 +17,7 @@ import ( func TestDB_Inhume(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() raw := testutil.GenerateObject() testutil.AddAttribute(raw, "foo", "bar") @@ -37,7 +37,7 @@ func TestDB_Inhume(t *testing.T) { func TestInhumeTombOnTomb(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() var ( err error @@ -107,7 +107,7 @@ func TestInhumeTombOnTomb(t *testing.T) { func TestInhumeLocked(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() locked := oidtest.Address() diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go index 646dc196c..7eed32c55 100644 --- a/pkg/local_object_storage/metabase/iterators_test.go +++ b/pkg/local_object_storage/metabase/iterators_test.go @@ -18,7 +18,7 @@ import ( func TestDB_IterateExpired(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const epoch = 13 @@ -70,7 +70,7 @@ func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt func TestDB_IterateCoveredByTombstones(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() ts := oidtest.Address() diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index 203802ec0..6f6463071 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -33,7 +33,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB { db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{ NoSync: true, })) // faster single-thread generation - defer func() { require.NoError(b, db.Close()) }() + defer func() { require.NoError(b, db.Close(context.Background())) }() obj := testutil.GenerateObject() for i := range 100_000 { // should be a multiple of all batch sizes @@ -71,7 +71,7 @@ func TestLisObjectsWithCursor(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const ( containers = 5 @@ -163,7 +163,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const total = 5 @@ -225,7 +225,7 @@ func TestIterateOver(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() const total uint64 = 5 for _, typ := range []objectSDK.Type{objectSDK.TypeRegular, objectSDK.TypeTombstone, objectSDK.TypeLock} { diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go index 9601cb2be..341ff9ad1 100644 --- a/pkg/local_object_storage/metabase/lock_test.go +++ b/pkg/local_object_storage/metabase/lock_test.go @@ -21,7 +21,7 @@ func TestDB_Lock(t *testing.T) { cnr := cidtest.ID() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() t.Run("empty locked list", func(t *testing.T) { require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) }) @@ -187,7 +187,7 @@ func TestDB_Lock_Expired(t *testing.T) { es := &epochState{e: 123} db := newDB(t, meta.WithEpochState(es)) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() // put an object addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124) @@ -209,7 +209,7 @@ func TestDB_IsLocked(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() // existing and locked objs diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go index f99262be4..ce6ae1004 100644 --- a/pkg/local_object_storage/metabase/mode.go +++ b/pkg/local_object_storage/metabase/mode.go @@ -18,7 +18,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error { } if !db.mode.NoMetabase() { - if err := db.Close(); err != nil { + if err := db.Close(ctx); err != nil { return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err) } } @@ -28,7 +28,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error { } else { err := db.openDB(ctx, m) if err == nil && !m.ReadOnly() { - err = db.Init() + err = db.Init(ctx) } if err != nil { return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err) diff --git a/pkg/local_object_storage/metabase/mode_test.go b/pkg/local_object_storage/metabase/mode_test.go index 1b9f60055..28b42283f 100644 --- a/pkg/local_object_storage/metabase/mode_test.go +++ b/pkg/local_object_storage/metabase/mode_test.go @@ -25,13 +25,13 @@ func Test_Mode(t *testing.T) { require.NoError(t, bdb.Open(context.Background(), mode.DegradedReadOnly)) require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Init()) + require.NoError(t, bdb.Init(context.Background())) require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Close()) + require.NoError(t, bdb.Close(context.Background())) require.NoError(t, bdb.Open(context.Background(), mode.Degraded)) require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Init()) + require.NoError(t, bdb.Init(context.Background())) require.Nil(t, bdb.boltDB) - require.NoError(t, bdb.Close()) + require.NoError(t, bdb.Close(context.Background())) } diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go index 914f5ef06..f37ed4cf2 100644 --- a/pkg/local_object_storage/metabase/put_test.go +++ b/pkg/local_object_storage/metabase/put_test.go @@ -46,7 +46,7 @@ func BenchmarkPut(b *testing.B) { db := newDB(b, meta.WithMaxBatchDelay(time.Millisecond*10), meta.WithMaxBatchSize(runtime.NumCPU())) - defer func() { require.NoError(b, db.Close()) }() + defer func() { require.NoError(b, db.Close(context.Background())) }() // Ensure the benchmark is bound by CPU and not waiting batch-delay time. b.SetParallelism(1) @@ -68,7 +68,7 @@ func BenchmarkPut(b *testing.B) { db := newDB(b, meta.WithMaxBatchDelay(time.Millisecond*10), meta.WithMaxBatchSize(1)) - defer func() { require.NoError(b, db.Close()) }() + defer func() { require.NoError(b, db.Close(context.Background())) }() var index atomic.Int64 index.Store(-1) objs := prepareObjects(b.N) @@ -84,7 +84,7 @@ func BenchmarkPut(b *testing.B) { func TestDB_PutBlobovniczaUpdate(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() raw1 := testutil.GenerateObject() storageID := []byte{1, 2, 3, 4} diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go index 993079dce..45faecc13 100644 --- a/pkg/local_object_storage/metabase/reset_test.go +++ b/pkg/local_object_storage/metabase/reset_test.go @@ -30,9 +30,9 @@ func TestResetDropsContainerBuckets(t *testing.T) { ) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() for idx := range 100 { var putPrm PutPrm diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index 6f48607be..5cc998311 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -38,7 +38,7 @@ func testSelectUserAttributes(t *testing.T, index bool) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -200,7 +200,7 @@ func TestDB_SelectRootPhyParent(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -354,7 +354,7 @@ func TestDB_SelectInhume(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -385,7 +385,7 @@ func TestDB_SelectPayloadHash(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -456,7 +456,7 @@ func TestDB_SelectWithSlowFilters(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -564,7 +564,7 @@ func TestDB_SelectObjectID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -680,7 +680,7 @@ func TestDB_SelectOwnerID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -786,7 +786,7 @@ func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() ecChunk1 := oidtest.ID() @@ -865,7 +865,7 @@ func TestDB_RawHead_SplitInfo(t *testing.T) { ) db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -906,7 +906,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde t.Run("first last, then linking", func(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() require.NoError(t, metaPut(db, lastPart, nil)) require.NoError(t, metaPut(db, linking, nil)) @@ -930,7 +930,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde }) t.Run("first linking, then last", func(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() require.NoError(t, metaPut(db, linking, nil)) require.NoError(t, metaPut(db, lastPart, nil)) @@ -954,7 +954,7 @@ func testGetRawSplitInfo(t *testing.T, cnr cidSDK.ID, ids *transformer.AccessIde }) t.Run("only last part", func(t *testing.T) { db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() require.NoError(t, metaPut(db, lastPart, nil)) @@ -984,7 +984,7 @@ func TestDB_SelectSplitID_EC(t *testing.T) { ) db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -1052,7 +1052,7 @@ func TestDB_SelectSplitID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -1109,7 +1109,7 @@ func TestDB_SelectContainerID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() cnr := cidtest.ID() @@ -1157,7 +1157,7 @@ func TestDB_SelectContainerID(t *testing.T) { func BenchmarkSelect(b *testing.B) { const objCount = 1000 db := newDB(b) - defer func() { require.NoError(b, db.Close()) }() + defer func() { require.NoError(b, db.Close(context.Background())) }() cid := cidtest.ID() @@ -1199,7 +1199,7 @@ func TestExpiredObjects(t *testing.T) { t.Parallel() db := newDB(t, meta.WithEpochState(epochState{currEpoch})) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) { cidExp, _ := exp.ContainerID() diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go index a86e42bd2..fef680159 100644 --- a/pkg/local_object_storage/metabase/storage_id_test.go +++ b/pkg/local_object_storage/metabase/storage_id_test.go @@ -15,7 +15,7 @@ func TestDB_StorageID(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() raw1 := testutil.GenerateObject() raw2 := testutil.GenerateObject() @@ -79,7 +79,7 @@ func TestPutWritecacheDataRace(t *testing.T) { t.Parallel() db := newDB(t) - defer func() { require.NoError(t, db.Close()) }() + defer func() { require.NoError(t, db.Close(context.Background())) }() putStorageID := []byte{1, 2, 3} wcStorageID := []byte{1, 2, 3, 4, 5} diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go index e2eee86b0..5444264be 100644 --- a/pkg/local_object_storage/metabase/upgrade_test.go +++ b/pkg/local_object_storage/metabase/upgrade_test.go @@ -34,12 +34,12 @@ func TestUpgradeV2ToV3(t *testing.T) { }() db := New(WithPath(path), WithEpochState(epochState{e: 1000}), WithLogger(test.NewLogger(t))) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.ErrorIs(t, db.Init(), ErrOutdatedVersion) - require.NoError(t, db.Close()) + require.ErrorIs(t, db.Init(context.Background()), ErrOutdatedVersion) + require.NoError(t, db.Close(context.Background())) require.NoError(t, Upgrade(context.Background(), path, true, &testContainerInfoProvider{}, t.Log)) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - require.NoError(t, db.Close()) + require.NoError(t, db.Init(context.Background())) + require.NoError(t, db.Close(context.Background())) fmt.Println() } @@ -87,7 +87,7 @@ func TestGenerateMetabaseFile(t *testing.T) { require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) db.boltDB.AllocSize = allocSize db.boltDB.NoSync = true - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) containers := make([]cid.ID, containersCount) for i := range containers { containers[i] = cidtest.ID() @@ -218,5 +218,5 @@ func TestGenerateMetabaseFile(t *testing.T) { require.NoError(t, eg.Wait()) db.log.Info(ctx, "simple objects locked by locks generated") require.NoError(t, db.boltDB.Sync()) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) } diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go index 509e72479..b373fb32e 100644 --- a/pkg/local_object_storage/metabase/version_test.go +++ b/pkg/local_object_storage/metabase/version_test.go @@ -45,15 +45,15 @@ func TestVersion(t *testing.T) { t.Run("simple", func(t *testing.T) { db := newDB(t) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) check(t, db) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) t.Run("reopen", func(t *testing.T) { require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) check(t, db) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) }) }) t.Run("old data", func(t *testing.T) { @@ -61,9 +61,9 @@ func TestVersion(t *testing.T) { require.NoError(t, db.SetShardID(context.Background(), []byte{1, 2, 3, 4}, mode.ReadWrite)) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) + require.NoError(t, db.Init(context.Background())) check(t, db) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) }) t.Run("invalid version", func(t *testing.T) { db := newDB(t) @@ -71,37 +71,37 @@ func TestVersion(t *testing.T) { require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error { return updateVersion(tx, version+1) })) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.Error(t, db.Init()) - require.NoError(t, db.Close()) + require.Error(t, db.Init(context.Background())) + require.NoError(t, db.Close(context.Background())) t.Run("reset", func(t *testing.T) { require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) require.NoError(t, db.Reset()) check(t, db) - require.NoError(t, db.Close()) + require.NoError(t, db.Close(context.Background())) }) }) t.Run("incompleted upgrade", func(t *testing.T) { db := newDB(t) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, db.Init()) - require.NoError(t, db.Close()) + require.NoError(t, db.Init(context.Background())) + require.NoError(t, db.Close(context.Background())) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error { return tx.Bucket(shardInfoBucket).Put(upgradeKey, zeroValue) })) - require.ErrorIs(t, db.Init(), ErrIncompletedUpgrade) - require.NoError(t, db.Close()) + require.ErrorIs(t, db.Init(context.Background()), ErrIncompletedUpgrade) + require.NoError(t, db.Close(context.Background())) require.NoError(t, db.Open(context.Background(), mode.ReadWrite)) require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error { return tx.Bucket(shardInfoBucket).Delete(upgradeKey) })) - require.NoError(t, db.Init()) - require.NoError(t, db.Close()) + require.NoError(t, db.Init(context.Background())) + require.NoError(t, db.Close(context.Background())) }) } diff --git a/pkg/local_object_storage/pilorama/bench_test.go b/pkg/local_object_storage/pilorama/bench_test.go index 22b951a41..3156751f2 100644 --- a/pkg/local_object_storage/pilorama/bench_test.go +++ b/pkg/local_object_storage/pilorama/bench_test.go @@ -28,8 +28,8 @@ func BenchmarkCreate(b *testing.B) { WithPath(filepath.Join(tmpDir, "test.db")), WithMaxBatchSize(runtime.GOMAXPROCS(0))) require.NoError(b, f.Open(context.Background(), mode.ReadWrite)) - require.NoError(b, f.Init()) - defer func() { require.NoError(b, f.Close()) }() + require.NoError(b, f.Init(context.Background())) + defer func() { require.NoError(b, f.Close(context.Background())) }() b.Cleanup(func() { require.NoError(b, os.RemoveAll(tmpDir)) diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 9ffcf1e83..c62d728b1 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -91,7 +91,7 @@ func NewBoltForest(opts ...Option) ForestStorage { return &b } -func (t *boltForest) SetMode(_ context.Context, m mode.Mode) error { +func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error { t.modeMtx.Lock() defer t.modeMtx.Unlock() @@ -99,10 +99,10 @@ func (t *boltForest) SetMode(_ context.Context, m mode.Mode) error { return nil } - err := t.Close() + err := t.Close(ctx) if err == nil && !m.NoMetabase() { if err = t.openBolt(m); err == nil { - err = t.Init() + err = t.Init(ctx) } } if err != nil { @@ -148,7 +148,7 @@ func (t *boltForest) openBolt(m mode.Mode) error { return nil } -func (t *boltForest) Init() error { +func (t *boltForest) Init(context.Context) error { if t.mode.NoMetabase() || t.db.IsReadOnly() { return nil } @@ -162,7 +162,7 @@ func (t *boltForest) Init() error { }) } -func (t *boltForest) Close() error { +func (t *boltForest) Close(context.Context) error { var err error if t.db != nil { err = t.db.Close() diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index 76da1c0c2..f31504e2b 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -120,7 +120,7 @@ func (f *memoryForest) TreeApplyBatch(ctx context.Context, cnr cid.ID, treeID st return nil } -func (f *memoryForest) Init() error { +func (f *memoryForest) Init(context.Context) error { return nil } @@ -132,7 +132,7 @@ func (f *memoryForest) SetMode(context.Context, mode.Mode) error { return nil } -func (f *memoryForest) Close() error { +func (f *memoryForest) Close(context.Context) error { return nil } func (f *memoryForest) SetParentID(string) {} diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go index fbcc53fb3..de56fc82b 100644 --- a/pkg/local_object_storage/pilorama/forest_test.go +++ b/pkg/local_object_storage/pilorama/forest_test.go @@ -30,7 +30,7 @@ var providers = []struct { {"inmemory", func(t testing.TB, _ ...Option) ForestStorage { f := NewMemoryForest() require.NoError(t, f.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, f.Init()) + require.NoError(t, f.Init(context.Background())) return f }}, {"bbolt", func(t testing.TB, opts ...Option) ForestStorage { @@ -40,7 +40,7 @@ var providers = []struct { WithMaxBatchSize(1), }, opts...)...) require.NoError(t, f.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, f.Init()) + require.NoError(t, f.Init(context.Background())) return f }}, } @@ -61,7 +61,7 @@ func TestForest_TreeMove(t *testing.T) { } func testForestTreeMove(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -125,7 +125,7 @@ func TestMemoryForest_TreeGetChildren(t *testing.T) { } func testForestTreeGetChildren(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -247,7 +247,7 @@ func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) { } func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -302,7 +302,7 @@ func TestForest_TreeSortedIteration(t *testing.T) { } func testForestTreeSortedIteration(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -361,7 +361,7 @@ func TestForest_TreeSortedFilename(t *testing.T) { } func testForestTreeSortedByFilename(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() const controlAttr = "control_attr" cid := cidtest.ID() @@ -453,7 +453,7 @@ func TestForest_TreeDrop(t *testing.T) { } func testForestTreeDrop(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() const cidsSize = 3 var cids [cidsSize]cidSDK.ID @@ -523,7 +523,7 @@ func TestForest_TreeAdd(t *testing.T) { } func testForestTreeAdd(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -571,7 +571,7 @@ func TestForest_TreeAddByPath(t *testing.T) { } func testForestTreeAddByPath(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() d := CIDDescriptor{cid, 0, 1} @@ -709,7 +709,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio t.Run("add a child, then insert a parent removal", func(t *testing.T) { s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}}) @@ -722,7 +722,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio }) t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) { s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}} testApply(t, s, 11, 10, meta) @@ -792,7 +792,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ t.Run("expected", func(t *testing.T) { s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() for i := range logs { require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false)) @@ -801,7 +801,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ }) s := constructor(t, WithMaxBatchSize(batchSize)) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false)) for range batchSize { @@ -842,7 +842,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op } s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() t.Run("empty log, no panic", func(t *testing.T) { _, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0) @@ -883,7 +883,7 @@ func TestForest_TreeExists(t *testing.T) { func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) ForestStorage) { s := constructor(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) { actual, err := s.TreeExists(context.Background(), cid, treeID) @@ -942,7 +942,7 @@ func TestApplyTricky1(t *testing.T) { for i := range providers { t.Run(providers[i].name, func(t *testing.T) { s := providers[i].construct(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() for i := range ops { require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false)) @@ -1005,7 +1005,7 @@ func TestApplyTricky2(t *testing.T) { for i := range providers { t.Run(providers[i].name, func(t *testing.T) { s := providers[i].construct(t) - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() for i := range ops { require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false)) @@ -1115,7 +1115,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ treeID := "version" expected := constructor(t, WithNoSync(true)) - defer func() { require.NoError(t, expected.Close()) }() + defer func() { require.NoError(t, expected.Close(context.Background())) }() for i := range ops { require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) @@ -1145,7 +1145,7 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ wg.Wait() compareForests(t, expected, actual, cid, treeID, nodeCount) - require.NoError(t, actual.Close()) + require.NoError(t, actual.Close(context.Background())) } } @@ -1163,7 +1163,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ .. treeID := "version" expected := constructor(t, WithNoSync(true)) - defer func() { require.NoError(t, expected.Close()) }() + defer func() { require.NoError(t, expected.Close(context.Background())) }() for i := range ops { require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) @@ -1179,7 +1179,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ .. require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false)) } compareForests(t, expected, actual, cid, treeID, nodeCount) - require.NoError(t, actual.Close()) + require.NoError(t, actual.Close(context.Background())) } } @@ -1197,7 +1197,7 @@ func BenchmarkApplySequential(b *testing.B) { b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) { r := mrand.New(mrand.NewSource(time.Now().Unix())) s := providers[i].construct(b, WithMaxBatchSize(bs)) - defer func() { require.NoError(b, s.Close()) }() + defer func() { require.NoError(b, s.Close(context.Background())) }() benchmarkApply(b, s, func(opCount int) []Move { ops := make([]Move, opCount) @@ -1233,7 +1233,7 @@ func BenchmarkApplyReorderLast(b *testing.B) { b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) { r := mrand.New(mrand.NewSource(time.Now().Unix())) s := providers[i].construct(b, WithMaxBatchSize(bs)) - defer func() { require.NoError(b, s.Close()) }() + defer func() { require.NoError(b, s.Close(context.Background())) }() benchmarkApply(b, s, func(opCount int) []Move { ops := make([]Move, opCount) @@ -1290,7 +1290,7 @@ func TestTreeGetByPath(t *testing.T) { } func testTreeGetByPath(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cid := cidtest.ID() treeID := "version" @@ -1369,7 +1369,7 @@ func TestGetTrees(t *testing.T) { } func testTreeGetTrees(t *testing.T, s ForestStorage) { - defer func() { require.NoError(t, s.Close()) }() + defer func() { require.NoError(t, s.Close(context.Background())) }() cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()} d := CIDDescriptor{Position: 0, Size: 1} @@ -1415,7 +1415,7 @@ func TestTreeLastSyncHeight(t *testing.T) { } func testTreeLastSyncHeight(t *testing.T, f ForestStorage) { - defer func() { require.NoError(t, f.Close()) }() + defer func() { require.NoError(t, f.Close(context.Background())) }() cnr := cidtest.ID() treeID := "someTree" diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go index 9717b2401..1f7e742a2 100644 --- a/pkg/local_object_storage/pilorama/interface.go +++ b/pkg/local_object_storage/pilorama/interface.go @@ -62,9 +62,9 @@ type Forest interface { type ForestStorage interface { // DumpInfo returns information about the pilorama. DumpInfo() Info - Init() error + Init(context.Context) error Open(context.Context, mode.Mode) error - Close() error + Close(context.Context) error SetMode(context.Context, mode.Mode) error SetParentID(id string) Forest diff --git a/pkg/local_object_storage/pilorama/mode_test.go b/pkg/local_object_storage/pilorama/mode_test.go index 01d3da9f0..0c042aa56 100644 --- a/pkg/local_object_storage/pilorama/mode_test.go +++ b/pkg/local_object_storage/pilorama/mode_test.go @@ -19,13 +19,13 @@ func Test_Mode(t *testing.T) { require.NoError(t, f.Open(context.Background(), mode.DegradedReadOnly)) require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Init()) + require.NoError(t, f.Init(context.Background())) require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Close()) + require.NoError(t, f.Close(context.Background())) require.NoError(t, f.Open(context.Background(), mode.Degraded)) require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Init()) + require.NoError(t, f.Init(context.Background())) require.Nil(t, f.(*boltForest).db) - require.NoError(t, f.Close()) + require.NoError(t, f.Close(context.Background())) } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 056737a9d..5a9e26155 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -91,8 +91,8 @@ func (s *Shard) Open(ctx context.Context) error { type metabaseSynchronizer Shard -func (x *metabaseSynchronizer) Init() error { - ctx, span := tracing.StartSpanFromContext(context.TODO(), "metabaseSynchronizer.Init") +func (x *metabaseSynchronizer) Init(ctx context.Context) error { + ctx, span := tracing.StartSpanFromContext(ctx, "metabaseSynchronizer.Init") defer span.End() return (*Shard)(x).refillMetabase(ctx) @@ -140,7 +140,7 @@ func (s *Shard) Init(ctx context.Context) error { func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error { type initializer interface { - Init() error + Init(context.Context) error } var components []initializer @@ -170,7 +170,7 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error { } for _, component := range components { - if err := component.Init(); err != nil { + if err := component.Init(ctx); err != nil { if component == s.metaBase { if errors.Is(err, meta.ErrOutdatedVersion) || errors.Is(err, meta.ErrIncompletedUpgrade) { return fmt.Errorf("metabase initialization: %w", err) @@ -368,7 +368,7 @@ func (s *Shard) Close(ctx context.Context) error { if s.rb != nil { s.rb.Stop(ctx, s.log) } - var components []interface{ Close() error } + var components []interface{ Close(context.Context) error } if s.pilorama != nil { components = append(components, s.pilorama) @@ -384,7 +384,7 @@ func (s *Shard) Close(ctx context.Context) error { var lastErr error for _, component := range components { - if err := component.Close(); err != nil { + if err := component.Close(ctx); err != nil { lastErr = err s.log.Error(ctx, logs.ShardCouldNotCloseShardComponent, zap.Error(err)) } @@ -392,7 +392,7 @@ func (s *Shard) Close(ctx context.Context) error { // If Init/Open was unsuccessful gc can be nil. if s.gc != nil { - s.gc.stop() + s.gc.stop(ctx) } return lastErr @@ -437,7 +437,7 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error { // config after the node was updated. err = s.refillMetabase(ctx) } else { - err = s.metaBase.Init() + err = s.metaBase.Init(ctx) } if err != nil { s.log.Error(ctx, logs.ShardCantInitializeMetabaseMoveToADegradedreadonlyMode, zap.Error(err)) diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 6fabf7103..a987d3d14 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -213,7 +213,7 @@ func (gc *gc) handleEvent(ctx context.Context, event Event) { } } -func (gc *gc) releaseResources() { +func (gc *gc) releaseResources(ctx context.Context) { if gc.workerPool != nil { gc.workerPool.Release() } @@ -222,7 +222,7 @@ func (gc *gc) releaseResources() { // because it is possible that we are close it earlier than stop writing. // It is ok to keep it opened. - gc.log.Debug(context.Background(), logs.ShardGCIsStopped) + gc.log.Debug(ctx, logs.ShardGCIsStopped) } func (gc *gc) tickRemover(ctx context.Context) { @@ -236,10 +236,10 @@ func (gc *gc) tickRemover(ctx context.Context) { case <-ctx.Done(): // Context canceled earlier than we start to close shards. // It make sense to stop collecting garbage by context too. - gc.releaseResources() + gc.releaseResources(ctx) return case <-gc.stopChannel: - gc.releaseResources() + gc.releaseResources(ctx) return case <-timer.C: startedAt := time.Now() @@ -258,12 +258,12 @@ func (gc *gc) tickRemover(ctx context.Context) { } } -func (gc *gc) stop() { +func (gc *gc) stop(ctx context.Context) { gc.onceStop.Do(func() { close(gc.stopChannel) }) - gc.log.Info(context.Background(), logs.ShardWaitingForGCWorkersToStop) + gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop) gc.wg.Wait() } @@ -730,14 +730,14 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc } // HandleDeletedLocks unlocks all objects which were locked by lockers. -func (s *Shard) HandleDeletedLocks(lockers []oid.Address) { +func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { if s.GetMode().NoMetabase() { return } _, err := s.metaBase.FreeLockedBy(lockers) if err != nil { - s.log.Warn(context.Background(), logs.ShardFailureToUnlockObjects, + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.String("error", err.Error()), ) diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go index 39073a529..9998bbae2 100644 --- a/pkg/local_object_storage/shard/gc_internal_test.go +++ b/pkg/local_object_storage/shard/gc_internal_test.go @@ -61,8 +61,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { meta.WithEpochState(epochState{}), ), WithPiloramaOptions(pilorama.WithPath(filepath.Join(rootPath, "pilorama"))), - WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) { - sh.HandleDeletedLocks(addresses) + WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) { + sh.HandleDeletedLocks(ctx, addresses) }), WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) { sh.HandleExpiredLocks(ctx, epoch, a) diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go index 7da8b8c28..5caf3641f 100644 --- a/pkg/local_object_storage/shard/lock_test.go +++ b/pkg/local_object_storage/shard/lock_test.go @@ -53,8 +53,8 @@ func TestShard_Lock(t *testing.T) { meta.WithPath(filepath.Join(rootPath, "meta")), meta.WithEpochState(epochState{}), ), - WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) { - sh.HandleDeletedLocks(addresses) + WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) { + sh.HandleDeletedLocks(ctx, addresses) }), } diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index 3a06fe8a7..1eb7f14d0 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -95,7 +95,7 @@ type cfg struct { metricsWriter MetricsWriter - reportErrorFunc func(selfID string, message string, err error) + reportErrorFunc func(ctx context.Context, selfID string, message string, err error) containerInfo container.InfoProvider } @@ -105,7 +105,7 @@ func defaultCfg() *cfg { rmBatchSize: 100, log: logger.NewLoggerWrapper(zap.L()), gcCfg: defaultGCCfg(), - reportErrorFunc: func(string, string, error) {}, + reportErrorFunc: func(context.Context, string, string, error) {}, zeroSizeContainersCallback: func(context.Context, []cid.ID) {}, zeroCountContainersCallback: func(context.Context, []cid.ID) {}, metricsWriter: noopMetrics{}, @@ -130,8 +130,8 @@ func New(opts ...Option) *Shard { tsSource: c.tsSource, } - reportFunc := func(msg string, err error) { - s.reportErrorFunc(s.ID().String(), msg, err) + reportFunc := func(ctx context.Context, msg string, err error) { + s.reportErrorFunc(ctx, s.ID().String(), msg, err) } s.blobStor.SetReportErrorFunc(reportFunc) @@ -317,7 +317,7 @@ func WithGCMetrics(v GCMectrics) Option { // WithReportErrorFunc returns option to specify callback for handling storage-related errors // in the background workers. -func WithReportErrorFunc(f func(selfID string, message string, err error)) Option { +func WithReportErrorFunc(f func(ctx context.Context, selfID string, message string, err error)) Option { return func(c *cfg) { c.reportErrorFunc = f } diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go index 73ba2e82b..f9ee34488 100644 --- a/pkg/local_object_storage/shard/shard_test.go +++ b/pkg/local_object_storage/shard/shard_test.go @@ -89,8 +89,8 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard WithPiloramaOptions(pilorama.WithPath(filepath.Join(o.rootPath, "pilorama"))), WithWriteCache(enableWriteCache), WithWriteCacheOptions(o.wcOpts), - WithDeletedLockCallback(func(_ context.Context, addresses []oid.Address) { - sh.HandleDeletedLocks(addresses) + WithDeletedLockCallback(func(ctx context.Context, addresses []oid.Address) { + sh.HandleDeletedLocks(ctx, addresses) }), WithExpiredLocksCallback(func(ctx context.Context, epoch uint64, a []oid.Address) { sh.HandleExpiredLocks(ctx, epoch, a) diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go index 79ab7d9c6..fd85b4501 100644 --- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go +++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go @@ -43,12 +43,12 @@ func BenchmarkWriteAfterDelete(b *testing.B) { b.SetParallelism(parallel) benchmarkRunPar(b, cache, payloadSize) }) - require.NoError(b, cache.Close()) + require.NoError(b, cache.Close(context.Background())) } func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) { benchmarkPutPrepare(b, cache) - defer func() { require.NoError(b, cache.Close()) }() + defer func() { require.NoError(b, cache.Close(context.Background())) }() ctx := context.Background() objGen := testutil.RandObjGenerator{ObjSize: size} @@ -71,7 +71,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) { func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) { benchmarkPutPrepare(b, cache) - defer func() { require.NoError(b, cache.Close()) }() + defer func() { require.NoError(b, cache.Close(context.Background())) }() benchmarkRunPar(b, cache, size) } @@ -100,7 +100,7 @@ func benchmarkRunPar(b *testing.B, cache writecache.Cache, size uint64) { func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) { require.NoError(b, cache.Open(context.Background(), mode.ReadWrite), "opening") - require.NoError(b, cache.Init(), "initializing") + require.NoError(b, cache.Init(context.Background()), "initializing") } type testMetabase struct{} diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go index 098872e08..e829d013c 100644 --- a/pkg/local_object_storage/writecache/cache.go +++ b/pkg/local_object_storage/writecache/cache.go @@ -98,19 +98,19 @@ func (c *cache) Open(_ context.Context, mod mode.Mode) error { } // Init runs necessary services. -func (c *cache) Init() error { +func (c *cache) Init(ctx context.Context) error { c.metrics.SetMode(mode.ConvertToComponentModeDegraded(c.mode)) - if err := c.flushAndDropBBoltDB(context.Background()); err != nil { + if err := c.flushAndDropBBoltDB(ctx); err != nil { return fmt.Errorf("flush previous version write-cache database: %w", err) } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(context.WithoutCancel(ctx)) // canceling performed by cache c.cancel.Store(cancel) c.runFlushLoop(ctx) return nil } // Close closes db connection and stops services. Executes ObjectCounters.FlushAndClose op. -func (c *cache) Close() error { +func (c *cache) Close(ctx context.Context) error { if cancelValue := c.cancel.Swap(dummyCanceler); cancelValue != nil { cancelValue.(context.CancelFunc)() } @@ -127,7 +127,7 @@ func (c *cache) Close() error { var err error if c.fsTree != nil { - err = c.fsTree.Close() + err = c.fsTree.Close(ctx) if err != nil { c.fsTree = nil } diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index 123eb4abc..d9e34ceab 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -112,7 +112,7 @@ func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectI }) if err != nil { if !client.IsErrObjectNotFound(err) { - c.reportFlushError(logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err)) + c.reportFlushError(ctx, logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err)) } return } @@ -126,11 +126,11 @@ func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectI c.deleteFromDisk(ctx, objInfo.addr, uint64(len(res.RawData))) } -func (c *cache) reportFlushError(msg string, addr string, err error) { +func (c *cache) reportFlushError(ctx context.Context, msg string, addr string, err error) { if c.reportError != nil { - c.reportError(msg, err) + c.reportError(ctx, msg, err) } else { - c.log.Error(context.Background(), msg, + c.log.Error(ctx, msg, zap.String("address", addr), zap.Error(err)) } @@ -145,7 +145,7 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error { var obj objectSDK.Object err := obj.Unmarshal(e.ObjectData) if err != nil { - c.reportFlushError(logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err)) + c.reportFlushError(ctx, logs.FSTreeCantUnmarshalObject, sAddr, metaerr.Wrap(err)) if ignoreErrors { return nil } @@ -183,7 +183,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b if err != nil { if !errors.Is(err, common.ErrNoSpace) && !errors.Is(err, common.ErrReadOnly) && !errors.Is(err, blobstor.ErrNoPlaceFound) { - c.reportFlushError(logs.FSTreeCantFushObjectBlobstor, + c.reportFlushError(ctx, logs.FSTreeCantFushObjectBlobstor, addr.EncodeToString(), err) } return err @@ -195,7 +195,7 @@ func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []b _, err = c.metabase.UpdateStorageID(ctx, updPrm) if err != nil { - c.reportFlushError(logs.FSTreeCantUpdateID, + c.reportFlushError(ctx, logs.FSTreeCantUpdateID, addr.EncodeToString(), err) } return err diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go index 92fb493e0..7fc84657c 100644 --- a/pkg/local_object_storage/writecache/flush_test.go +++ b/pkg/local_object_storage/writecache/flush_test.go @@ -38,9 +38,9 @@ func TestFlush(t *testing.T) { errCountOpt := func() (Option, *atomic.Uint32) { cnt := &atomic.Uint32{} - return WithReportErrorFunc(func(msg string, err error) { + return WithReportErrorFunc(func(ctx context.Context, msg string, err error) { cnt.Add(1) - testlogger.Warn(context.Background(), msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err)) + testlogger.Warn(ctx, msg, zap.Uint32("error_count", cnt.Load()), zap.Error(err)) }), cnt } @@ -114,7 +114,7 @@ func runFlushTest[Option any]( ) { t.Run("no errors", func(t *testing.T) { wc, bs, mb := newCache(t, createCacheFn) - defer func() { require.NoError(t, wc.Close()) }() + defer func() { require.NoError(t, wc.Close(context.Background())) }() objects := putObjects(t, wc) require.NoError(t, bs.SetMode(context.Background(), mode.ReadWrite)) @@ -127,7 +127,7 @@ func runFlushTest[Option any]( t.Run("flush on moving to degraded mode", func(t *testing.T) { wc, bs, mb := newCache(t, createCacheFn) - defer func() { require.NoError(t, wc.Close()) }() + defer func() { require.NoError(t, wc.Close(context.Background())) }() objects := putObjects(t, wc) // Blobstor is read-only, so we expect en error from `flush` here. @@ -145,7 +145,7 @@ func runFlushTest[Option any]( t.Run(f.Desc, func(t *testing.T) { errCountOpt, errCount := errCountOption() wc, bs, mb := newCache(t, createCacheFn, errCountOpt) - defer func() { require.NoError(t, wc.Close()) }() + defer func() { require.NoError(t, wc.Close(context.Background())) }() objects := putObjects(t, wc) f.InjectFn(t, wc) @@ -173,7 +173,7 @@ func newCache[Option any]( meta.WithPath(filepath.Join(dir, "meta")), meta.WithEpochState(dummyEpoch{})) require.NoError(t, mb.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, mb.Init()) + require.NoError(t, mb.Init(context.Background())) bs := blobstor.New(blobstor.WithStorages([]blobstor.SubStorage{ { @@ -184,11 +184,11 @@ func newCache[Option any]( }, })) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, bs.Init()) + require.NoError(t, bs.Init(context.Background())) wc := createCacheFn(t, mb, bs, opts...) require.NoError(t, wc.Open(context.Background(), mode.ReadWrite)) - require.NoError(t, wc.Init()) + require.NoError(t, wc.Init(context.Background())) // First set mode for metabase and blobstor to prevent background flushes. require.NoError(t, mb.SetMode(context.Background(), mode.ReadOnly)) diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go index db789d994..73d12fd33 100644 --- a/pkg/local_object_storage/writecache/mode.go +++ b/pkg/local_object_storage/writecache/mode.go @@ -82,7 +82,7 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error { return nil } if !shrink { - if err := c.fsTree.Close(); err != nil { + if err := c.fsTree.Close(ctx); err != nil { return fmt.Errorf("can't close write-cache storage: %w", err) } return nil @@ -101,7 +101,7 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error { return fmt.Errorf("failed to check write-cache items: %w", err) } } - if err := c.fsTree.Close(); err != nil { + if err := c.fsTree.Close(ctx); err != nil { return fmt.Errorf("can't close write-cache storage: %w", err) } if empty { diff --git a/pkg/local_object_storage/writecache/mode_test.go b/pkg/local_object_storage/writecache/mode_test.go index 70cfe8382..4fbadbc64 100644 --- a/pkg/local_object_storage/writecache/mode_test.go +++ b/pkg/local_object_storage/writecache/mode_test.go @@ -18,13 +18,13 @@ func TestMode(t *testing.T) { require.NoError(t, wc.Open(context.Background(), mode.DegradedReadOnly)) require.Nil(t, wc.(*cache).fsTree) - require.NoError(t, wc.Init()) + require.NoError(t, wc.Init(context.Background())) require.Nil(t, wc.(*cache).fsTree) - require.NoError(t, wc.Close()) + require.NoError(t, wc.Close(context.Background())) require.NoError(t, wc.Open(context.Background(), mode.Degraded)) require.Nil(t, wc.(*cache).fsTree) - require.NoError(t, wc.Init()) + require.NoError(t, wc.Init(context.Background())) require.Nil(t, wc.(*cache).fsTree) - require.NoError(t, wc.Close()) + require.NoError(t, wc.Close(context.Background())) } diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go index 25c1694a8..f2957fe98 100644 --- a/pkg/local_object_storage/writecache/options.go +++ b/pkg/local_object_storage/writecache/options.go @@ -1,6 +1,8 @@ package writecache import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" ) @@ -29,7 +31,7 @@ type options struct { // noSync is true iff FSTree allows unsynchronized writes. noSync bool // reportError is the function called when encountering disk errors in background workers. - reportError func(string, error) + reportError func(context.Context, string, error) // metrics is metrics implementation metrics Metrics // disableBackgroundFlush is for testing purposes only. @@ -108,7 +110,7 @@ func WithNoSync(noSync bool) Option { } // WithReportErrorFunc sets error reporting function. -func WithReportErrorFunc(f func(string, error)) Option { +func WithReportErrorFunc(f func(context.Context, string, error)) Option { return func(o *options) { o.reportError = f } diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go index d07220b68..70b17eb8e 100644 --- a/pkg/local_object_storage/writecache/writecache.go +++ b/pkg/local_object_storage/writecache/writecache.go @@ -44,9 +44,9 @@ type Cache interface { Flush(context.Context, bool, bool) error Seal(context.Context, SealPrm) error - Init() error + Init(context.Context) error Open(ctx context.Context, mode mode.Mode) error - Close() error + Close(context.Context) error GetMetrics() Metrics } diff --git a/pkg/morph/client/balance/burn.go b/pkg/morph/client/balance/burn.go index 4befbef45..f4685b0ab 100644 --- a/pkg/morph/client/balance/burn.go +++ b/pkg/morph/client/balance/burn.go @@ -1,6 +1,8 @@ package balance import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/util" ) @@ -30,12 +32,12 @@ func (b *BurnPrm) SetID(id []byte) { } // Burn destroys funds from the account. -func (c *Client) Burn(p BurnPrm) error { +func (c *Client) Burn(ctx context.Context, p BurnPrm) error { prm := client.InvokePrm{} prm.SetMethod(burnMethod) prm.SetArgs(p.to, p.amount, p.id) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/balance/lock.go b/pkg/morph/client/balance/lock.go index a5b206799..83e8b0586 100644 --- a/pkg/morph/client/balance/lock.go +++ b/pkg/morph/client/balance/lock.go @@ -1,6 +1,8 @@ package balance import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/util" ) @@ -42,12 +44,12 @@ func (l *LockPrm) SetDueEpoch(dueEpoch int64) { } // Lock locks fund on the user account. -func (c *Client) Lock(p LockPrm) error { +func (c *Client) Lock(ctx context.Context, p LockPrm) error { prm := client.InvokePrm{} prm.SetMethod(lockMethod) prm.SetArgs(p.id, p.user, p.lock, p.amount, p.dueEpoch) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/balance/mint.go b/pkg/morph/client/balance/mint.go index 73448da31..082ade85e 100644 --- a/pkg/morph/client/balance/mint.go +++ b/pkg/morph/client/balance/mint.go @@ -1,6 +1,8 @@ package balance import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/util" ) @@ -30,12 +32,12 @@ func (m *MintPrm) SetID(id []byte) { } // Mint sends funds to the account. -func (c *Client) Mint(p MintPrm) error { +func (c *Client) Mint(ctx context.Context, p MintPrm) error { prm := client.InvokePrm{} prm.SetMethod(mintMethod) prm.SetArgs(p.to, p.amount, p.id) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go index 08fb05289..65a0b70a6 100644 --- a/pkg/morph/client/balance/transfer.go +++ b/pkg/morph/client/balance/transfer.go @@ -1,6 +1,7 @@ package balance import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -23,7 +24,7 @@ type TransferPrm struct { // with details p.Details through direct smart contract call. // // If TryNotary is provided, calls notary contract. -func (c *Client) TransferX(p TransferPrm) error { +func (c *Client) TransferX(ctx context.Context, p TransferPrm) error { from, err := address.StringToUint160(p.From.EncodeToString()) if err != nil { return err @@ -39,7 +40,7 @@ func (c *Client) TransferX(p TransferPrm) error { prm.SetArgs(from, to, p.Amount, p.Details) prm.InvokePrmOptional = p.InvokePrmOptional - _, err = c.client.Invoke(prm) + _, err = c.client.Invoke(ctx, prm) if err != nil { return fmt.Errorf("could not invoke method (%s): %w", transferXMethod, err) } diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index ef6a009e4..f61c6e9f9 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -180,7 +180,7 @@ func wrapFrostFSError(err error) error { // Invoke invokes contract method by sending transaction into blockchain. // Returns valid until block value. // Supported args types: int64, string, util.Uint160, []byte and bool. -func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) { +func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) { start := time.Now() success := false defer func() { @@ -199,7 +199,7 @@ func (c *Client) Invoke(contract util.Uint160, fee fixedn.Fixed8, method string, return 0, fmt.Errorf("could not invoke %s: %w", method, err) } - c.logger.Debug(context.Background(), logs.ClientNeoClientInvoke, + c.logger.Debug(ctx, logs.ClientNeoClientInvoke, zap.String("method", method), zap.Uint32("vub", vub), zap.Stringer("tx_hash", txHash.Reverse())) diff --git a/pkg/morph/client/container/delete.go b/pkg/morph/client/container/delete.go index 20351b570..5696645b2 100644 --- a/pkg/morph/client/container/delete.go +++ b/pkg/morph/client/container/delete.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/sha256" "fmt" @@ -12,7 +13,7 @@ import ( // along with signature and session token. // // Returns error if container ID is nil. -func Delete(c *Client, witness core.RemovalWitness) error { +func Delete(ctx context.Context, c *Client, witness core.RemovalWitness) error { binCnr := make([]byte, sha256.Size) witness.ContainerID.Encode(binCnr) @@ -26,7 +27,7 @@ func Delete(c *Client, witness core.RemovalWitness) error { prm.SetToken(tok.Marshal()) } - _, err := c.Delete(prm) + _, err := c.Delete(ctx, prm) return err } @@ -67,7 +68,7 @@ func (d *DeletePrm) SetKey(key []byte) { // the removal to interrupt. // // If TryNotary is provided, calls notary contract. -func (c *Client) Delete(p DeletePrm) (uint32, error) { +func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) { if len(p.signature) == 0 && !p.IsControl() { return 0, errNilArgument } @@ -77,7 +78,7 @@ func (c *Client) Delete(p DeletePrm) (uint32, error) { prm.SetArgs(p.cnr, p.signature, p.key, p.token) prm.InvokePrmOptional = p.InvokePrmOptional - res, err := c.client.Invoke(prm) + res, err := c.client.Invoke(ctx, prm) if err != nil { return 0, fmt.Errorf("could not invoke method (%s): %w", deleteMethod, err) } diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go index 777ae2d4e..74d9f6da8 100644 --- a/pkg/morph/client/container/put.go +++ b/pkg/morph/client/container/put.go @@ -1,6 +1,7 @@ package container import ( + "context" "fmt" containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" @@ -14,7 +15,7 @@ import ( // along with sig.Key() and sig.Sign(). // // Returns error if container is nil. -func Put(c *Client, cnr containercore.Container) (*cid.ID, error) { +func Put(ctx context.Context, c *Client, cnr containercore.Container) (*cid.ID, error) { data := cnr.Value.Marshal() d := container.ReadDomain(cnr.Value) @@ -35,7 +36,7 @@ func Put(c *Client, cnr containercore.Container) (*cid.ID, error) { prm.SetKey(sigV2.GetKey()) prm.SetSignature(sigV2.GetSign()) - err := c.Put(prm) + err := c.Put(ctx, prm) if err != nil { return nil, err } @@ -95,7 +96,7 @@ func (p *PutPrm) SetZone(zone string) { // encountered that caused the saving to interrupt. // // If TryNotary is provided, calls notary contract. -func (c *Client) Put(p PutPrm) error { +func (c *Client) Put(ctx context.Context, p PutPrm) error { if len(p.sig) == 0 || len(p.key) == 0 { return errNilArgument } @@ -116,7 +117,7 @@ func (c *Client) Put(p PutPrm) error { prm.SetMethod(method) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) if err != nil { return fmt.Errorf("could not invoke method (%s): %w", method, err) } diff --git a/pkg/morph/client/frostfs/cheque.go b/pkg/morph/client/frostfs/cheque.go index 016b56f8f..d3eba7639 100644 --- a/pkg/morph/client/frostfs/cheque.go +++ b/pkg/morph/client/frostfs/cheque.go @@ -1,6 +1,8 @@ package frostfscontract import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/util" @@ -37,13 +39,13 @@ func (c *ChequePrm) SetLock(lock util.Uint160) { } // Cheque invokes `cheque` method of FrostFS contract. -func (x *Client) Cheque(p ChequePrm) error { +func (x *Client) Cheque(ctx context.Context, p ChequePrm) error { prm := client.InvokePrm{} prm.SetMethod(chequeMethod) prm.SetArgs(p.id, p.user, p.amount, p.lock) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := x.client.Invoke(prm) + _, err := x.client.Invoke(ctx, prm) return err } @@ -66,12 +68,12 @@ func (a *AlphabetUpdatePrm) SetPubs(pubs keys.PublicKeys) { } // AlphabetUpdate update list of alphabet nodes. -func (x *Client) AlphabetUpdate(p AlphabetUpdatePrm) error { +func (x *Client) AlphabetUpdate(ctx context.Context, p AlphabetUpdatePrm) error { prm := client.InvokePrm{} prm.SetMethod(alphabetUpdateMethod) prm.SetArgs(p.id, p.pubs) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := x.client.Invoke(prm) + _, err := x.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go index 2d19a8193..0a3c351db 100644 --- a/pkg/morph/client/netmap/config.go +++ b/pkg/morph/client/netmap/config.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "errors" "fmt" @@ -155,13 +156,13 @@ func (s *SetConfigPrm) SetValue(value any) { } // SetConfig sets config field. -func (c *Client) SetConfig(p SetConfigPrm) error { +func (c *Client) SetConfig(ctx context.Context, p SetConfigPrm) error { prm := client.InvokePrm{} prm.SetMethod(setConfigMethod) prm.SetArgs(p.id, p.key, p.value) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go index d6f8c56b2..c9dc7d2fc 100644 --- a/pkg/morph/client/netmap/innerring.go +++ b/pkg/morph/client/netmap/innerring.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "crypto/elliptic" "fmt" @@ -23,7 +24,7 @@ func (u *UpdateIRPrm) SetKeys(keys keys.PublicKeys) { } // UpdateInnerRing updates inner ring keys. -func (c *Client) UpdateInnerRing(p UpdateIRPrm) error { +func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error { args := make([][]byte, len(p.keys)) for i := range args { args[i] = p.keys[i].Bytes() @@ -34,7 +35,7 @@ func (c *Client) UpdateInnerRing(p UpdateIRPrm) error { prm.SetArgs(args) prm.InvokePrmOptional = p.InvokePrmOptional - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) return err } diff --git a/pkg/morph/client/netmap/new_epoch.go b/pkg/morph/client/netmap/new_epoch.go index ded386c86..efcdfd7b6 100644 --- a/pkg/morph/client/netmap/new_epoch.go +++ b/pkg/morph/client/netmap/new_epoch.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -8,12 +9,12 @@ import ( // NewEpoch updates FrostFS epoch number through // Netmap contract call. -func (c *Client) NewEpoch(epoch uint64) error { +func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error { prm := client.InvokePrm{} prm.SetMethod(newEpochMethod) prm.SetArgs(epoch) - _, err := c.client.Invoke(prm) + _, err := c.client.Invoke(ctx, prm) if err != nil { return fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err) } @@ -24,14 +25,14 @@ func (c *Client) NewEpoch(epoch uint64) error { // control notary transaction internally to ensure all // nodes produce the same transaction with high probability. // If vub > 0, vub will be used as valid until block value. -func (c *Client) NewEpochControl(epoch uint64, vub uint32) (uint32, error) { +func (c *Client) NewEpochControl(ctx context.Context, epoch uint64, vub uint32) (uint32, error) { prm := client.InvokePrm{} prm.SetMethod(newEpochMethod) prm.SetArgs(epoch) prm.SetControlTX(true) prm.SetVUB(vub) - res, err := c.client.Invoke(prm) + res, err := c.client.Invoke(ctx, prm) if err != nil { return 0, fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err) } diff --git a/pkg/morph/client/netmap/peer.go b/pkg/morph/client/netmap/peer.go index 764bbc899..9617d018c 100644 --- a/pkg/morph/client/netmap/peer.go +++ b/pkg/morph/client/netmap/peer.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "errors" "fmt" @@ -24,7 +25,7 @@ func (a *AddPeerPrm) SetNodeInfo(nodeInfo netmap.NodeInfo) { // AddPeer registers peer in FrostFS network through // Netmap contract call. -func (c *Client) AddPeer(p AddPeerPrm) error { +func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error { method := addPeerMethod if c.client.WithNotary() && c.client.IsAlpha() { @@ -39,7 +40,7 @@ func (c *Client) AddPeer(p AddPeerPrm) error { prm.SetArgs(p.nodeInfo.Marshal()) prm.InvokePrmOptional = p.InvokePrmOptional - if _, err := c.client.Invoke(prm); err != nil { + if _, err := c.client.Invoke(ctx, prm); err != nil { return fmt.Errorf("could not invoke method (%s): %w", method, err) } return nil @@ -47,7 +48,7 @@ func (c *Client) AddPeer(p AddPeerPrm) error { // ForceRemovePeer marks the given peer as offline via a notary control transaction. // If vub > 0, vub will be used as valid until block value. -func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) { +func (c *Client) ForceRemovePeer(ctx context.Context, nodeInfo netmap.NodeInfo, vub uint32) (uint32, error) { if !c.client.WithNotary() { return 0, errFailedToRemovePeerWithoutNotary } @@ -57,7 +58,7 @@ func (c *Client) ForceRemovePeer(nodeInfo netmap.NodeInfo, vub uint32) (uint32, prm.SetControlTX(true) prm.SetVUB(vub) - vub, err := c.UpdatePeerState(prm) + vub, err := c.UpdatePeerState(ctx, prm) if err != nil { return 0, fmt.Errorf("updating peer state: %v", err) } diff --git a/pkg/morph/client/netmap/update_state.go b/pkg/morph/client/netmap/update_state.go index 7c3a4e8cd..971a55d33 100644 --- a/pkg/morph/client/netmap/update_state.go +++ b/pkg/morph/client/netmap/update_state.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" @@ -36,7 +37,7 @@ func (u *UpdatePeerPrm) SetMaintenance() { } // UpdatePeerState changes peer status through Netmap contract call. -func (c *Client) UpdatePeerState(p UpdatePeerPrm) (uint32, error) { +func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (uint32, error) { method := updateStateMethod if c.client.WithNotary() && c.client.IsAlpha() { @@ -55,7 +56,7 @@ func (c *Client) UpdatePeerState(p UpdatePeerPrm) (uint32, error) { prm.SetArgs(int64(p.state), p.key) prm.InvokePrmOptional = p.InvokePrmOptional - res, err := c.client.Invoke(prm) + res, err := c.client.Invoke(ctx, prm) if err != nil { return 0, fmt.Errorf("could not invoke smart contract: %w", err) } diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 58c417fb1..65a5e77a6 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -141,7 +141,7 @@ func (c *Client) ProbeNotary() (res bool) { // use this function. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (util.Uint256, error) { +func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta uint32) (util.Uint256, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -164,7 +164,7 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (util.Uint256 } till := max(int64(bc+delta), currentTill) - res, _, err := c.depositNotary(amount, till) + res, _, err := c.depositNotary(ctx, amount, till) return res, err } @@ -173,7 +173,7 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (util.Uint256 // This allows to avoid ValidAfterDeposit failures. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (util.Uint256, uint32, error) { +func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) (util.Uint256, uint32, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -186,10 +186,10 @@ func (c *Client) DepositEndlessNotary(amount fixedn.Fixed8) (util.Uint256, uint3 } // till value refers to a block height and it is uint32 value in neo-go - return c.depositNotary(amount, math.MaxUint32) + return c.depositNotary(ctx, amount, math.MaxUint32) } -func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) { +func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) { txHash, vub, err := c.gasToken.Transfer( c.accAddr, c.notary.notary, @@ -202,7 +202,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256, // Transaction is already in mempool waiting to be processed. // This is an expected situation if we restart the service. - c.logger.Info(context.Background(), logs.ClientNotaryDepositHasAlreadyBeenMade, + c.logger.Info(ctx, logs.ClientNotaryDepositHasAlreadyBeenMade, zap.Int64("amount", int64(amount)), zap.Int64("expire_at", till), zap.Uint32("vub", vub), @@ -210,7 +210,7 @@ func (c *Client) depositNotary(amount fixedn.Fixed8, till int64) (util.Uint256, return util.Uint256{}, 0, nil } - c.logger.Info(context.Background(), logs.ClientNotaryDepositInvoke, + c.logger.Info(ctx, logs.ClientNotaryDepositInvoke, zap.Int64("amount", int64(amount)), zap.Int64("expire_at", till), zap.Uint32("vub", vub), @@ -275,7 +275,7 @@ func (u *UpdateNotaryListPrm) SetHash(hash util.Uint256) { // committee multi signature. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error { +func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm) error { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -293,6 +293,7 @@ func (c *Client) UpdateNotaryList(prm UpdateNotaryListPrm) error { } return c.notaryInvokeAsCommittee( + ctx, setDesignateMethod, nonce, vub, @@ -323,7 +324,7 @@ func (u *UpdateAlphabetListPrm) SetHash(hash util.Uint256) { // Requires committee multi signature. // // This function must be invoked with notary enabled otherwise it throws panic. -func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error { +func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabetListPrm) error { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -341,6 +342,7 @@ func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error { } return c.notaryInvokeAsCommittee( + ctx, setDesignateMethod, nonce, vub, @@ -356,7 +358,7 @@ func (c *Client) UpdateNeoFSAlphabetList(prm UpdateAlphabetListPrm) error { // Returns valid until block value. // // `nonce` and `vub` are used only if notary is enabled. -func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) { +func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -365,10 +367,10 @@ func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce ui } if c.notary == nil { - return c.Invoke(contract, fee, method, args...) + return c.Invoke(ctx, contract, fee, method, args...) } - return c.notaryInvoke(false, true, contract, nonce, vub, method, args...) + return c.notaryInvoke(ctx, false, true, contract, nonce, vub, method, args...) } // NotaryInvokeNotAlpha does the same as NotaryInvoke but does not use client's @@ -376,7 +378,7 @@ func (c *Client) NotaryInvoke(contract util.Uint160, fee fixedn.Fixed8, nonce ui // not expected to be signed by the current node. // // Considered to be used by non-IR nodes. -func (c *Client) NotaryInvokeNotAlpha(contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (uint32, error) { +func (c *Client) NotaryInvokeNotAlpha(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (uint32, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() @@ -385,10 +387,10 @@ func (c *Client) NotaryInvokeNotAlpha(contract util.Uint160, fee fixedn.Fixed8, } if c.notary == nil { - return c.Invoke(contract, fee, method, args...) + return c.Invoke(ctx, contract, fee, method, args...) } - return c.notaryInvoke(false, false, contract, rand.Uint32(), vubP, method, args...) + return c.notaryInvoke(ctx, false, false, contract, rand.Uint32(), vubP, method, args...) } // NotarySignAndInvokeTX signs and sends notary request that was received from @@ -438,13 +440,13 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error { return nil } -func (c *Client) notaryInvokeAsCommittee(method string, nonce, vub uint32, args ...any) error { +func (c *Client) notaryInvokeAsCommittee(ctx context.Context, method string, nonce, vub uint32, args ...any) error { designate := c.GetDesignateHash() - _, err := c.notaryInvoke(true, true, designate, nonce, &vub, method, args...) + _, err := c.notaryInvoke(ctx, true, true, designate, nonce, &vub, method, args...) return err } -func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) { +func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) { start := time.Now() success := false defer func() { @@ -486,7 +488,7 @@ func (c *Client) notaryInvoke(committee, invokedByAlpha bool, contract util.Uint return 0, err } - c.logger.Debug(context.Background(), logs.ClientNotaryRequestInvoked, + c.logger.Debug(ctx, logs.ClientNotaryRequestInvoked, zap.String("method", method), zap.Uint32("valid_until_block", untilActual), zap.String("tx_hash", mainH.StringLE()), diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go index dfcf62b83..1e091936f 100644 --- a/pkg/morph/client/static.go +++ b/pkg/morph/client/static.go @@ -1,6 +1,7 @@ package client import ( + "context" "fmt" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" @@ -140,7 +141,7 @@ type InvokeRes struct { // // If fee for the operation executed using specified method is customized, then StaticClient uses it. // Otherwise, default fee is used. -func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) { +func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, error) { var res InvokeRes var err error var vubP *uint32 @@ -169,7 +170,7 @@ func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) { vubP = &prm.vub } - res.VUB, err = s.client.NotaryInvoke(s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...) + res.VUB, err = s.client.NotaryInvoke(ctx, s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...) return res, err } @@ -177,11 +178,12 @@ func (s StaticClient) Invoke(prm InvokePrm) (InvokeRes, error) { vubP = &prm.vub } - res.VUB, err = s.client.NotaryInvokeNotAlpha(s.scScriptHash, s.fee, vubP, prm.method, prm.args...) + res.VUB, err = s.client.NotaryInvokeNotAlpha(ctx, s.scScriptHash, s.fee, vubP, prm.method, prm.args...) return res, err } res.VUB, err = s.client.Invoke( + ctx, s.scScriptHash, s.fee, prm.method, diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go index bda83ba54..822335329 100644 --- a/pkg/morph/event/handlers.go +++ b/pkg/morph/event/handlers.go @@ -10,7 +10,7 @@ import ( type Handler func(context.Context, Event) // BlockHandler is a chain block processing function. -type BlockHandler func(*block.Block) +type BlockHandler func(context.Context, *block.Block) // NotificationHandlerInfo is a structure that groups // the parameters of the handler of particular diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index eeec46540..6e6184e77 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -291,18 +291,18 @@ loop: continue loop } - l.handleBlockEvent(b) + l.handleBlockEvent(ctx, b) } } } -func (l *listener) handleBlockEvent(b *block.Block) { +func (l *listener) handleBlockEvent(ctx context.Context, b *block.Block) { if err := l.pool.Submit(func() { for i := range l.blockHandlers { - l.blockHandlers[i](b) + l.blockHandlers[i](ctx, b) } }); err != nil { - l.log.Warn(context.Background(), logs.EventListenerWorkerPoolDrained, + l.log.Warn(ctx, logs.EventListenerWorkerPoolDrained, zap.Int("capacity", l.pool.Cap())) } } diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go index 214daf694..c0f9722d7 100644 --- a/pkg/morph/event/listener_test.go +++ b/pkg/morph/event/listener_test.go @@ -34,7 +34,7 @@ func TestEventHandling(t *testing.T) { blockHandled := make(chan bool) handledBlocks := make([]*block.Block, 0) - l.RegisterBlockHandler(func(b *block.Block) { + l.RegisterBlockHandler(func(_ context.Context, b *block.Block) { handledBlocks = append(handledBlocks, b) blockHandled <- true }) @@ -137,7 +137,7 @@ func TestErrorPassing(t *testing.T) { WorkerPoolCapacity: 10, }) require.NoError(t, err, "failed to create listener") - l.RegisterBlockHandler(func(b *block.Block) {}) + l.RegisterBlockHandler(func(context.Context, *block.Block) {}) errCh := make(chan error) diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go index fa6252118..15dacd553 100644 --- a/pkg/network/transport/object/grpc/service.go +++ b/pkg/network/transport/object/grpc/service.go @@ -26,7 +26,7 @@ func New(c objectSvc.ServiceServer) *Server { // Patch opens internal Object patch stream and feeds it by the data read from gRPC stream. func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error { - stream, err := s.srv.Patch() + stream, err := s.srv.Patch(gStream.Context()) if err != nil { return err } @@ -68,7 +68,7 @@ func (s *Server) Patch(gStream objectGRPC.ObjectService_PatchServer) error { // Put opens internal Object service Put stream and overtakes data from gRPC stream to it. func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error { - stream, err := s.srv.Put() + stream, err := s.srv.Put(gStream.Context()) if err != nil { return err } diff --git a/pkg/services/apemanager/audit.go b/pkg/services/apemanager/audit.go index b9bea07fb..61fb025b8 100644 --- a/pkg/services/apemanager/audit.go +++ b/pkg/services/apemanager/audit.go @@ -33,7 +33,7 @@ func (a *auditService) AddChain(ctx context.Context, req *apemanager.AddChainReq return res, err } - audit.LogRequest(a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req, + audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_AddChain_FullMethodName, req, audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(), req.GetBody().GetTarget().GetName(), res.GetBody().GetChainID()), @@ -49,7 +49,7 @@ func (a *auditService) ListChains(ctx context.Context, req *apemanager.ListChain return res, err } - audit.LogRequest(a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req, + audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_ListChains_FullMethodName, req, audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(), req.GetBody().GetTarget().GetName(), nil), @@ -65,7 +65,7 @@ func (a *auditService) RemoveChain(ctx context.Context, req *apemanager.RemoveCh return res, err } - audit.LogRequest(a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req, + audit.LogRequest(ctx, a.log, ape_grpc.APEManagerService_RemoveChain_FullMethodName, req, audit.TargetFromChainID(req.GetBody().GetTarget().GetTargetType().String(), req.GetBody().GetTarget().GetName(), req.GetBody().GetChainID()), diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go index 03d3dc13d..411eb4863 100644 --- a/pkg/services/container/audit.go +++ b/pkg/services/container/audit.go @@ -35,7 +35,7 @@ func (a *auditService) Delete(ctx context.Context, req *container.DeleteRequest) return res, err } - audit.LogRequest(a.log, container_grpc.ContainerService_Delete_FullMethodName, req, + audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Delete_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil) return res, err @@ -47,7 +47,7 @@ func (a *auditService) Get(ctx context.Context, req *container.GetRequest) (*con if !a.enabled.Load() { return res, err } - audit.LogRequest(a.log, container_grpc.ContainerService_Get_FullMethodName, req, + audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Get_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil) return res, err } @@ -58,7 +58,7 @@ func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*c if !a.enabled.Load() { return res, err } - audit.LogRequest(a.log, container_grpc.ContainerService_List_FullMethodName, req, + audit.LogRequest(ctx, a.log, container_grpc.ContainerService_List_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil) return res, err } @@ -69,7 +69,7 @@ func (a *auditService) Put(ctx context.Context, req *container.PutRequest) (*con if !a.enabled.Load() { return res, err } - audit.LogRequest(a.log, container_grpc.ContainerService_Put_FullMethodName, req, + audit.LogRequest(ctx, a.log, container_grpc.ContainerService_Put_FullMethodName, req, audit.TargetFromRef(res.GetBody().GetContainerID(), &cid.ID{}), err == nil) return res, err } diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go index eb43eab70..211f469f3 100644 --- a/pkg/services/container/morph/executor.go +++ b/pkg/services/container/morph/executor.go @@ -35,9 +35,9 @@ type Reader interface { // Writer is an interface of container storage updater. type Writer interface { // Put stores specified container in the side chain. - Put(containercore.Container) (*cid.ID, error) + Put(context.Context, containercore.Container) (*cid.ID, error) // Delete removes specified container from the side chain. - Delete(containercore.RemovalWitness) error + Delete(context.Context, containercore.RemovalWitness) error } func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor { @@ -47,7 +47,7 @@ func NewExecutor(rdr Reader, wrt Writer) containerSvc.ServiceExecutor { } } -func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) { +func (s *morphExecutor) Put(ctx context.Context, tokV2 *sessionV2.Token, body *container.PutRequestBody) (*container.PutResponseBody, error) { sigV2 := body.GetSignature() if sigV2 == nil { // TODO(@cthulhu-rider): #468 use "const" error @@ -80,7 +80,7 @@ func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *con } } - idCnr, err := s.wrt.Put(cnr) + idCnr, err := s.wrt.Put(ctx, cnr) if err != nil { return nil, err } @@ -94,7 +94,7 @@ func (s *morphExecutor) Put(_ context.Context, tokV2 *sessionV2.Token, body *con return res, nil } -func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) { +func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body *container.DeleteRequestBody) (*container.DeleteResponseBody, error) { idV2 := body.GetContainerID() if idV2 == nil { return nil, errors.New("missing container ID") @@ -124,7 +124,7 @@ func (s *morphExecutor) Delete(_ context.Context, tokV2 *sessionV2.Token, body * rmWitness.Signature = body.GetSignature() rmWitness.SessionToken = tok - err = s.wrt.Delete(rmWitness) + err = s.wrt.Delete(ctx, rmWitness) if err != nil { return nil, err } diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go index 87d307385..1f6fdb0be 100644 --- a/pkg/services/container/morph/executor_test.go +++ b/pkg/services/container/morph/executor_test.go @@ -24,11 +24,11 @@ type mock struct { containerSvcMorph.Reader } -func (m mock) Put(_ containerCore.Container) (*cid.ID, error) { +func (m mock) Put(_ context.Context, _ containerCore.Container) (*cid.ID, error) { return new(cid.ID), nil } -func (m mock) Delete(_ containerCore.RemovalWitness) error { +func (m mock) Delete(_ context.Context, _ containerCore.RemovalWitness) error { return nil } diff --git a/pkg/services/control/ir/server/audit.go b/pkg/services/control/ir/server/audit.go index e54fa9824..d9f65a2fc 100644 --- a/pkg/services/control/ir/server/audit.go +++ b/pkg/services/control/ir/server/audit.go @@ -36,7 +36,7 @@ func (a *auditService) HealthCheck(ctx context.Context, req *control.HealthCheck if !a.enabled.Load() { return res, err } - audit.LogRequestWithKey(a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil) + audit.LogRequestWithKey(ctx, a.log, control.ControlService_HealthCheck_FullMethodName, req.GetSignature().GetKey(), nil, err == nil) return res, err } @@ -79,7 +79,7 @@ func (a *auditService) RemoveContainer(ctx context.Context, req *control.RemoveC } } - audit.LogRequestWithKey(a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil) + audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveContainer_FullMethodName, req.GetSignature().GetKey(), sb, err == nil) return res, err } @@ -90,7 +90,7 @@ func (a *auditService) RemoveNode(ctx context.Context, req *control.RemoveNodeRe return res, err } - audit.LogRequestWithKey(a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(), + audit.LogRequestWithKey(ctx, a.log, control.ControlService_RemoveNode_FullMethodName, req.GetSignature().GetKey(), audit.TargetFromString(hex.EncodeToString(req.GetBody().GetKey())), err == nil) return res, err } @@ -102,7 +102,7 @@ func (a *auditService) TickEpoch(ctx context.Context, req *control.TickEpochRequ return res, err } - audit.LogRequestWithKey(a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(), + audit.LogRequestWithKey(ctx, a.log, control.ControlService_TickEpoch_FullMethodName, req.GetSignature().GetKey(), nil, err == nil) return res, err } diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go index 63be22411..e2c385c6a 100644 --- a/pkg/services/control/ir/server/calls.go +++ b/pkg/services/control/ir/server/calls.go @@ -40,7 +40,7 @@ func (s *Server) HealthCheck(_ context.Context, req *control.HealthCheckRequest) // TickEpoch forces a new epoch. // // If request is not signed with a key from white list, permission error returns. -func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) { +func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) (*control.TickEpochResponse, error) { if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } @@ -53,7 +53,7 @@ func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*c return nil, fmt.Errorf("getting current epoch: %w", err) } - vub, err := s.netmapClient.NewEpochControl(epoch+1, req.GetBody().GetVub()) + vub, err := s.netmapClient.NewEpochControl(ctx, epoch+1, req.GetBody().GetVub()) if err != nil { return nil, fmt.Errorf("forcing new epoch: %w", err) } @@ -69,7 +69,7 @@ func (s *Server) TickEpoch(_ context.Context, req *control.TickEpochRequest) (*c // RemoveNode forces a node removal. // // If request is not signed with a key from white list, permission error returns. -func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) { +func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) (*control.RemoveNodeResponse, error) { if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } @@ -95,7 +95,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) ( return nil, status.Error(codes.FailedPrecondition, "node is already offline") } - vub, err := s.netmapClient.ForceRemovePeer(nodeInfo, req.GetBody().GetVub()) + vub, err := s.netmapClient.ForceRemovePeer(ctx, nodeInfo, req.GetBody().GetVub()) if err != nil { return nil, fmt.Errorf("forcing node removal: %w", err) } @@ -109,7 +109,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) ( } // RemoveContainer forces a container removal. -func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) { +func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContainerRequest) (*control.RemoveContainerResponse, error) { if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } @@ -124,7 +124,7 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer return nil, status.Error(codes.InvalidArgument, "failed to parse container ID: "+err.Error()) } var err error - vub, err = s.removeContainer(containerID, req.GetBody().GetVub()) + vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub()) if err != nil { return nil, err } @@ -144,7 +144,7 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer } for _, containerID := range cids { - vub, err = s.removeContainer(containerID, req.GetBody().GetVub()) + vub, err = s.removeContainer(ctx, containerID, req.GetBody().GetVub()) if err != nil { return nil, err } @@ -162,13 +162,13 @@ func (s *Server) RemoveContainer(_ context.Context, req *control.RemoveContainer return resp, nil } -func (s *Server) removeContainer(containerID cid.ID, vub uint32) (uint32, error) { +func (s *Server) removeContainer(ctx context.Context, containerID cid.ID, vub uint32) (uint32, error) { var prm container.DeletePrm prm.SetCID(containerID[:]) prm.SetControlTX(true) prm.SetVUB(vub) - vub, err := s.containerClient.Delete(prm) + vub, err := s.containerClient.Delete(ctx, prm) if err != nil { return 0, fmt.Errorf("forcing container removal: %w", err) } diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go index b6fdcb246..94aa1ff5b 100644 --- a/pkg/services/control/server/server.go +++ b/pkg/services/control/server/server.go @@ -1,6 +1,7 @@ package control import ( + "context" "crypto/ecdsa" "sync/atomic" @@ -45,11 +46,11 @@ type NodeState interface { // // If status is control.NetmapStatus_MAINTENANCE and maintenance is allowed // in the network settings, the node additionally starts local maintenance. - SetNetmapStatus(st control.NetmapStatus) error + SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error // ForceMaintenance works like SetNetmapStatus(control.NetmapStatus_MAINTENANCE) // but starts local maintenance regardless of the network settings. - ForceMaintenance() error + ForceMaintenance(ctx context.Context) error GetNetmapStatus() (control.NetmapStatus, uint64, error) } diff --git a/pkg/services/control/server/set_netmap_status.go b/pkg/services/control/server/set_netmap_status.go index 3fd69df12..529041dca 100644 --- a/pkg/services/control/server/set_netmap_status.go +++ b/pkg/services/control/server/set_netmap_status.go @@ -12,7 +12,7 @@ import ( // SetNetmapStatus sets node status in FrostFS network. // // If request is unsigned or signed by disallowed key, permission error returns. -func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) { +func (s *Server) SetNetmapStatus(ctx context.Context, req *control.SetNetmapStatusRequest) (*control.SetNetmapStatusResponse, error) { // verify request if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) @@ -29,9 +29,9 @@ func (s *Server) SetNetmapStatus(_ context.Context, req *control.SetNetmapStatus "force_maintenance MUST be set for %s status only", control.NetmapStatus_MAINTENANCE) } - err = s.nodeState.ForceMaintenance() + err = s.nodeState.ForceMaintenance(ctx) } else { - err = s.nodeState.SetNetmapStatus(st) + err = s.nodeState.SetNetmapStatus(ctx, st) } if err != nil { diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go index 93ad3dc46..db0f13ee7 100644 --- a/pkg/services/object/acl/v2/service.go +++ b/pkg/services/object/acl/v2/service.go @@ -193,7 +193,7 @@ func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream src: request, } - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectGet) + reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectGet) if err != nil { return err } @@ -203,8 +203,8 @@ func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream return b.next.Get(request, newWrappedGetObjectStreamStream(stream, reqInfo)) } -func (b Service) Put() (object.PutObjectStream, error) { - streamer, err := b.next.Put() +func (b Service) Put(ctx context.Context) (object.PutObjectStream, error) { + streamer, err := b.next.Put(ctx) return putStreamBasicChecker{ source: &b, @@ -212,8 +212,8 @@ func (b Service) Put() (object.PutObjectStream, error) { }, err } -func (b Service) Patch() (object.PatchObjectStream, error) { - streamer, err := b.next.Patch() +func (b Service) Patch(ctx context.Context) (object.PatchObjectStream, error) { + streamer, err := b.next.Patch(ctx) return &patchStreamBasicChecker{ source: &b, @@ -259,7 +259,7 @@ func (b Service) Head( src: request, } - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHead) + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHead) if err != nil { return nil, err } @@ -299,7 +299,7 @@ func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStr src: request, } - reqInfo, err := b.findRequestInfo(req, id, acl.OpObjectSearch) + reqInfo, err := b.findRequestInfo(stream.Context(), req, id, acl.OpObjectSearch) if err != nil { return err } @@ -345,7 +345,7 @@ func (b Service) Delete( src: request, } - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectDelete) + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectDelete) if err != nil { return nil, err } @@ -390,7 +390,7 @@ func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetOb src: request, } - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectRange) + reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectRange) if err != nil { return err } @@ -448,7 +448,7 @@ func (b Service) GetRangeHash( src: request, } - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectHash) + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHash) if err != nil { return nil, err } @@ -499,7 +499,7 @@ func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleReque src: request, } - reqInfo, err := b.findRequestInfo(req, cnr, acl.OpObjectPut) + reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) if err != nil { return nil, err } @@ -564,7 +564,7 @@ func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRe src: request, } - reqInfo, err := p.source.findRequestInfo(req, cnr, acl.OpObjectPut) + reqInfo, err := p.source.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) if err != nil { return err } @@ -651,7 +651,7 @@ func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.Pa src: request, } - reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(req, cnr) + reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(ctx, req, cnr) if err != nil { return err } @@ -668,7 +668,7 @@ func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.Pa return p.next.CloseAndRecv(ctx) } -func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) { +func (b Service) findRequestInfo(ctx context.Context, req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) { cnr, err := b.containers.Get(idCnr) // fetch actual container if err != nil { return info, err @@ -697,7 +697,7 @@ func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (in if err != nil { return info, err } - res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value) + res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) if err != nil { return info, err } @@ -726,7 +726,7 @@ func (b Service) findRequestInfo(req MetaWithToken, idCnr cid.ID, op acl.Op) (in } // findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert. -func (b Service) findRequestInfoWithoutACLOperationAssert(req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) { +func (b Service) findRequestInfoWithoutACLOperationAssert(ctx context.Context, req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) { cnr, err := b.containers.Get(idCnr) // fetch actual container if err != nil { return info, err @@ -751,7 +751,7 @@ func (b Service) findRequestInfoWithoutACLOperationAssert(req MetaWithToken, idC if err != nil { return info, err } - res, err := b.c.Classify(ownerID, ownerKey, idCnr, cnr.Value) + res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) if err != nil { return info, err } diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go index 558c48da8..c6d152e0f 100644 --- a/pkg/services/object/ape/service.go +++ b/pkg/services/object/ape/service.go @@ -186,8 +186,8 @@ func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutR return p.next.CloseAndRecv(ctx) } -func (c *Service) Put() (objectSvc.PutObjectStream, error) { - streamer, err := c.next.Put() +func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { + streamer, err := c.next.Put(ctx) return &putStreamBasicChecker{ apeChecker: c.apeChecker, @@ -241,8 +241,8 @@ func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.Pa return p.next.CloseAndRecv(ctx) } -func (c *Service) Patch() (objectSvc.PatchObjectStream, error) { - streamer, err := c.next.Patch() +func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error) { + streamer, err := c.next.Patch(ctx) return &patchStreamBasicChecker{ apeChecker: c.apeChecker, diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go index b42084634..dde9f8fc0 100644 --- a/pkg/services/object/audit.go +++ b/pkg/services/object/audit.go @@ -37,7 +37,7 @@ func (a *auditService) Delete(ctx context.Context, req *object.DeleteRequest) (* if !a.enabled.Load() { return res, err } - audit.LogRequest(a.log, objectGRPC.ObjectService_Delete_FullMethodName, req, + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Delete_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return res, err } @@ -48,7 +48,7 @@ func (a *auditService) Get(req *object.GetRequest, stream GetObjectStream) error if !a.enabled.Load() { return err } - audit.LogRequest(a.log, objectGRPC.ObjectService_Get_FullMethodName, req, + audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Get_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return err } @@ -59,7 +59,7 @@ func (a *auditService) GetRange(req *object.GetRangeRequest, stream GetObjectRan if !a.enabled.Load() { return err } - audit.LogRequest(a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req, + audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_GetRange_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return err } @@ -70,7 +70,7 @@ func (a *auditService) GetRangeHash(ctx context.Context, req *object.GetRangeHas if !a.enabled.Load() { return resp, err } - audit.LogRequest(a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req, + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_GetRangeHash_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return resp, err } @@ -81,19 +81,19 @@ func (a *auditService) Head(ctx context.Context, req *object.HeadRequest) (*obje if !a.enabled.Load() { return resp, err } - audit.LogRequest(a.log, objectGRPC.ObjectService_Head_FullMethodName, req, + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Head_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetAddress(), &oid.Address{}), err == nil) return resp, err } // Put implements ServiceServer. -func (a *auditService) Put() (PutObjectStream, error) { - res, err := a.next.Put() +func (a *auditService) Put(ctx context.Context) (PutObjectStream, error) { + res, err := a.next.Put(ctx) if !a.enabled.Load() { return res, err } if err != nil { - audit.LogRequest(a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false) + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, nil, nil, false) return res, err } return &auditPutStream{ @@ -108,7 +108,7 @@ func (a *auditService) PutSingle(ctx context.Context, req *object.PutSingleReque if !a.enabled.Load() { return resp, err } - audit.LogRequest(a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req, + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_PutSingle_FullMethodName, req, audit.TargetFromContainerIDObjectID(req.GetBody().GetObject().GetHeader().GetContainerID(), req.GetBody().GetObject().GetObjectID()), err == nil) @@ -121,7 +121,7 @@ func (a *auditService) Search(req *object.SearchRequest, stream SearchStream) er if !a.enabled.Load() { return err } - audit.LogRequest(a.log, objectGRPC.ObjectService_Search_FullMethodName, req, + audit.LogRequest(stream.Context(), a.log, objectGRPC.ObjectService_Search_FullMethodName, req, audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil) return err } @@ -145,7 +145,7 @@ func (a *auditPutStream) CloseAndRecv(ctx context.Context) (*object.PutResponse, a.failed = true } a.objectID = resp.GetBody().GetObjectID() - audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, + audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) return resp, err @@ -164,7 +164,7 @@ func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error a.failed = true } if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here - audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, + audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) } @@ -183,13 +183,13 @@ type auditPatchStream struct { nonFirstSend bool } -func (a *auditService) Patch() (PatchObjectStream, error) { - res, err := a.next.Patch() +func (a *auditService) Patch(ctx context.Context) (PatchObjectStream, error) { + res, err := a.next.Patch(ctx) if !a.enabled.Load() { return res, err } if err != nil { - audit.LogRequest(a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false) + audit.LogRequest(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, nil, nil, false) return res, err } return &auditPatchStream{ @@ -205,7 +205,7 @@ func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchRespo a.failed = true } a.objectID = resp.GetBody().GetObjectID() - audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, + audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) return resp, err @@ -225,7 +225,7 @@ func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) e a.failed = true } if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here - audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, + audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) } diff --git a/pkg/services/object/common.go b/pkg/services/object/common.go index 758156607..ef65e78bc 100644 --- a/pkg/services/object/common.go +++ b/pkg/services/object/common.go @@ -40,20 +40,20 @@ func (x *Common) Get(req *objectV2.GetRequest, stream GetObjectStream) error { return x.nextHandler.Get(req, stream) } -func (x *Common) Put() (PutObjectStream, error) { +func (x *Common) Put(ctx context.Context) (PutObjectStream, error) { if x.state.IsMaintenance() { return nil, new(apistatus.NodeUnderMaintenance) } - return x.nextHandler.Put() + return x.nextHandler.Put(ctx) } -func (x *Common) Patch() (PatchObjectStream, error) { +func (x *Common) Patch(ctx context.Context) (PatchObjectStream, error) { if x.state.IsMaintenance() { return nil, new(apistatus.NodeUnderMaintenance) } - return x.nextHandler.Patch() + return x.nextHandler.Patch(ctx) } func (x *Common) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV2.HeadResponse, error) { diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go index 8b92d34ed..3b68efab4 100644 --- a/pkg/services/object/common/writer/common.go +++ b/pkg/services/object/common/writer/common.go @@ -89,7 +89,7 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr}) if err != nil { resErr.Store(err) - svcutil.LogServiceError(n.cfg.Logger, "PUT", addr.Addresses(), err) + svcutil.LogServiceError(ctx, n.cfg.Logger, "PUT", addr.Addresses(), err) return } @@ -97,7 +97,7 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. *item = true }); err != nil { wg.Done() - svcutil.LogWorkerPoolError(n.cfg.Logger, "PUT", err) + svcutil.LogWorkerPoolError(ctx, n.cfg.Logger, "PUT", err) return true } diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index 64115b86b..fdaa569da 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -154,7 +154,7 @@ func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index err = e.Relay(ctx, info, c) }); poolErr != nil { close(completed) - svcutil.LogWorkerPoolError(e.Config.Logger, "PUT", poolErr) + svcutil.LogWorkerPoolError(ctx, e.Config.Logger, "PUT", poolErr) return poolErr } <-completed diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go index 8ab423c87..59dd7fd93 100644 --- a/pkg/services/object/get/assembleec.go +++ b/pkg/services/object/get/assembleec.go @@ -37,7 +37,7 @@ func (r *request) assembleEC(ctx context.Context) { r.log.Debug(ctx, logs.GetTryingToAssembleTheECObject) // initialize epoch number - ok := r.initEpoch() + ok := r.initEpoch(ctx) if !ok { return } diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go index 2b84c5b32..0ee8aed53 100644 --- a/pkg/services/object/get/container.go +++ b/pkg/services/object/get/container.go @@ -21,7 +21,7 @@ func (r *request) executeOnContainer(ctx context.Context) { ) // initialize epoch number - ok := r.initEpoch() + ok := r.initEpoch(ctx) if !ok { return } @@ -50,7 +50,7 @@ func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool zap.Uint64("number", r.curProcEpoch), ) - traverser, ok := r.generateTraverser(r.address()) + traverser, ok := r.generateTraverser(ctx, r.address()) if !ok { return true } diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go index b6a83fd0c..78ca5b5e3 100644 --- a/pkg/services/object/get/remote.go +++ b/pkg/services/object/get/remote.go @@ -20,7 +20,7 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool { r.log.Debug(ctx, logs.ProcessingNode, zap.String("node_key", hex.EncodeToString(info.PublicKey()))) - rs, ok := r.getRemoteStorage(info) + rs, ok := r.getRemoteStorage(ctx, info) if !ok { return true } diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go index bba767d2d..be0950c60 100644 --- a/pkg/services/object/get/request.go +++ b/pkg/services/object/get/request.go @@ -116,7 +116,7 @@ func (r *request) netmapLookupDepth() uint64 { return r.prm.common.NetmapLookupDepth() } -func (r *request) initEpoch() bool { +func (r *request) initEpoch(ctx context.Context) bool { r.curProcEpoch = r.netmapEpoch() if r.curProcEpoch > 0 { return true @@ -129,7 +129,7 @@ func (r *request) initEpoch() bool { r.status = statusUndefined r.err = err - r.log.Debug(context.Background(), logs.CouldNotGetCurrentEpochNumber, zap.Error(err)) + r.log.Debug(ctx, logs.CouldNotGetCurrentEpochNumber, zap.Error(err)) return false case err == nil: @@ -138,7 +138,7 @@ func (r *request) initEpoch() bool { } } -func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, bool) { +func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*placement.Traverser, bool) { obj := addr.Object() t, _, err := r.traverserGenerator.GenerateTraverser(addr.Container(), &obj, r.curProcEpoch) @@ -148,7 +148,7 @@ func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, boo r.status = statusUndefined r.err = err - r.log.Debug(context.Background(), logs.GetCouldNotGenerateContainerTraverser, zap.Error(err)) + r.log.Debug(ctx, logs.GetCouldNotGenerateContainerTraverser, zap.Error(err)) return nil, false case err == nil: @@ -156,13 +156,13 @@ func (r *request) generateTraverser(addr oid.Address) (*placement.Traverser, boo } } -func (r *request) getRemoteStorage(info clientcore.NodeInfo) (remoteStorage, bool) { +func (r *request) getRemoteStorage(ctx context.Context, info clientcore.NodeInfo) (remoteStorage, bool) { rs, err := r.remoteStorageConstructor.Get(info) if err != nil { r.status = statusUndefined r.err = err - r.log.Debug(context.Background(), logs.GetCouldNotConstructRemoteNodeClient) + r.log.Debug(ctx, logs.GetCouldNotConstructRemoteNodeClient) return nil, false } diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go index 377350fdd..19748e938 100644 --- a/pkg/services/object/metrics.go +++ b/pkg/services/object/metrics.go @@ -64,11 +64,11 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er return } -func (m MetricCollector) Put() (PutObjectStream, error) { +func (m MetricCollector) Put(ctx context.Context) (PutObjectStream, error) { if m.enabled { t := time.Now() - stream, err := m.next.Put() + stream, err := m.next.Put(ctx) if err != nil { return nil, err } @@ -79,14 +79,14 @@ func (m MetricCollector) Put() (PutObjectStream, error) { start: t, }, nil } - return m.next.Put() + return m.next.Put(ctx) } -func (m MetricCollector) Patch() (PatchObjectStream, error) { +func (m MetricCollector) Patch(ctx context.Context) (PatchObjectStream, error) { if m.enabled { t := time.Now() - stream, err := m.next.Patch() + stream, err := m.next.Patch(ctx) if err != nil { return nil, err } @@ -97,7 +97,7 @@ func (m MetricCollector) Patch() (PatchObjectStream, error) { start: t, }, nil } - return m.next.Patch() + return m.next.Patch(ctx) } func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingleRequest) (*object.PutSingleResponse, error) { diff --git a/pkg/services/object/response.go b/pkg/services/object/response.go index 3787b4168..80c971e8f 100644 --- a/pkg/services/object/response.go +++ b/pkg/services/object/response.go @@ -80,8 +80,8 @@ func (s *putStreamResponser) CloseAndRecv(ctx context.Context) (*object.PutRespo return r, nil } -func (s *ResponseService) Put() (PutObjectStream, error) { - stream, err := s.svc.Put() +func (s *ResponseService) Put(ctx context.Context) (PutObjectStream, error) { + stream, err := s.svc.Put(ctx) if err != nil { return nil, fmt.Errorf("could not create Put object streamer: %w", err) } @@ -109,8 +109,8 @@ func (s *patchStreamResponser) CloseAndRecv(ctx context.Context) (*object.PatchR return r, nil } -func (s *ResponseService) Patch() (PatchObjectStream, error) { - stream, err := s.svc.Patch() +func (s *ResponseService) Patch(ctx context.Context) (PatchObjectStream, error) { + stream, err := s.svc.Patch(ctx) if err != nil { return nil, fmt.Errorf("could not create Put object streamer: %w", err) } diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go index bb5c720ff..e24da975d 100644 --- a/pkg/services/object/search/search.go +++ b/pkg/services/object/search/search.go @@ -23,7 +23,7 @@ func (exec *execCtx) execute(ctx context.Context) error { exec.log.Debug(ctx, logs.ServingRequest) err := exec.executeLocal(ctx) - exec.logResult(err) + exec.logResult(ctx, err) if exec.isLocal() { exec.log.Debug(ctx, logs.SearchReturnResultDirectly) @@ -31,15 +31,15 @@ func (exec *execCtx) execute(ctx context.Context) error { } err = exec.executeOnContainer(ctx) - exec.logResult(err) + exec.logResult(ctx, err) return err } -func (exec *execCtx) logResult(err error) { +func (exec *execCtx) logResult(ctx context.Context, err error) { switch { default: - exec.log.Debug(context.Background(), logs.OperationFinishedWithError, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.String("error", err.Error())) case err == nil: - exec.log.Debug(context.Background(), logs.OperationFinishedSuccessfully) + exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) } } diff --git a/pkg/services/object/server.go b/pkg/services/object/server.go index c570e9d8e..e65293977 100644 --- a/pkg/services/object/server.go +++ b/pkg/services/object/server.go @@ -41,8 +41,8 @@ type PatchObjectStream interface { // serving v2 Object service. type ServiceServer interface { Get(*object.GetRequest, GetObjectStream) error - Put() (PutObjectStream, error) - Patch() (PatchObjectStream, error) + Put(context.Context) (PutObjectStream, error) + Patch(context.Context) (PatchObjectStream, error) Head(context.Context, *object.HeadRequest) (*object.HeadResponse, error) Search(*object.SearchRequest, SearchStream) error Delete(context.Context, *object.DeleteRequest) (*object.DeleteResponse, error) diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go index 2c5e794e9..2b44227a5 100644 --- a/pkg/services/object/sign.go +++ b/pkg/services/object/sign.go @@ -103,8 +103,8 @@ func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutRes return resp, s.sigSvc.SignResponse(resp, err) } -func (s *SignService) Put() (PutObjectStream, error) { - stream, err := s.svc.Put() +func (s *SignService) Put(ctx context.Context) (PutObjectStream, error) { + stream, err := s.svc.Put(ctx) if err != nil { return nil, fmt.Errorf("could not create Put object streamer: %w", err) } @@ -139,8 +139,8 @@ func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.Patc return resp, s.sigSvc.SignResponse(resp, err) } -func (s *SignService) Patch() (PatchObjectStream, error) { - stream, err := s.svc.Patch() +func (s *SignService) Patch(ctx context.Context) (PatchObjectStream, error) { + stream, err := s.svc.Patch(ctx) if err != nil { return nil, fmt.Errorf("could not create Put object streamer: %w", err) } diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go index 1438a0ea2..0b3676edb 100644 --- a/pkg/services/object/transport_splitter.go +++ b/pkg/services/object/transport_splitter.go @@ -87,12 +87,12 @@ func (c *TransportSplitter) Get(req *object.GetRequest, stream GetObjectStream) }) } -func (c TransportSplitter) Put() (PutObjectStream, error) { - return c.next.Put() +func (c TransportSplitter) Put(ctx context.Context) (PutObjectStream, error) { + return c.next.Put(ctx) } -func (c TransportSplitter) Patch() (PatchObjectStream, error) { - return c.next.Patch() +func (c TransportSplitter) Patch(ctx context.Context) (PatchObjectStream, error) { + return c.next.Patch(ctx) } func (c TransportSplitter) Head(ctx context.Context, request *object.HeadRequest) (*object.HeadResponse, error) { diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go index 5075344a4..a9f875d8d 100644 --- a/pkg/services/object/util/log.go +++ b/pkg/services/object/util/log.go @@ -10,8 +10,8 @@ import ( ) // LogServiceError writes error message of object service to provided logger. -func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, err error) { - l.Error(context.Background(), logs.UtilObjectServiceError, +func LogServiceError(ctx context.Context, l *logger.Logger, req string, node network.AddressGroup, err error) { + l.Error(ctx, logs.UtilObjectServiceError, zap.String("node", network.StringifyGroup(node)), zap.String("request", req), zap.String("error", err.Error()), @@ -19,8 +19,8 @@ func LogServiceError(l *logger.Logger, req string, node network.AddressGroup, er } // LogWorkerPoolError writes debug error message of object worker pool to provided logger. -func LogWorkerPoolError(l *logger.Logger, req string, err error) { - l.Error(context.Background(), logs.UtilCouldNotPushTaskToWorkerPool, +func LogWorkerPoolError(ctx context.Context, l *logger.Logger, req string, err error) { + l.Error(ctx, logs.UtilCouldNotPushTaskToWorkerPool, zap.String("request", req), zap.String("error", err.Error()), ) diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go index 6a9706b9e..a4e36c2dc 100644 --- a/pkg/services/object_manager/tombstone/checker.go +++ b/pkg/services/object_manager/tombstone/checker.go @@ -63,7 +63,7 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr ) } else { if ts != nil { - return g.handleTS(addrStr, ts, epoch) + return g.handleTS(ctx, addrStr, ts, epoch) } } @@ -72,12 +72,12 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr return false } -func (g *ExpirationChecker) handleTS(addr string, ts *objectSDK.Object, reqEpoch uint64) bool { +func (g *ExpirationChecker) handleTS(ctx context.Context, addr string, ts *objectSDK.Object, reqEpoch uint64) bool { for _, atr := range ts.Attributes() { if atr.Key() == objectV2.SysAttributeExpEpoch { epoch, err := strconv.ParseUint(atr.Value(), 10, 64) if err != nil { - g.log.Warn(context.Background(), + g.log.Warn(ctx, logs.TombstoneExpirationParseFailure, zap.Error(err), ) diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index c82680a1e..2e5e54dfd 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -122,7 +122,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe shortage-- } else if nodes[i].Status().IsMaintenance() { - shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies) + shortage, uncheckedCopies = p.handleMaintenance(ctx, nodes[i], checkedNodes, shortage, uncheckedCopies) } else { if status := checkedNodes.processStatus(nodes[i]); status.Processed() { if status == nodeHoldsObject { @@ -149,7 +149,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe checkedNodes.submitReplicaCandidate(nodes[i]) continue } else if client.IsErrNodeUnderMaintenance(err) { - shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies) + shortage, uncheckedCopies = p.handleMaintenance(ctx, nodes[i], checkedNodes, shortage, uncheckedCopies) } else { p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", addr), @@ -173,12 +173,12 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe // prevent spam with new replicas. // However, additional copies should not be removed in this case, // because we can remove the only copy this way. -func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) { +func (p *Policer) handleMaintenance(ctx context.Context, node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) { checkedNodes.submitReplicaHolder(node) shortage-- uncheckedCopies++ - p.log.Debug(context.Background(), logs.PolicerConsiderNodeUnderMaintenanceAsOK, + p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(node)), ) return shortage, uncheckedCopies diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go index 95bdda34b..e7a13827e 100644 --- a/pkg/services/tree/getsubtree_test.go +++ b/pkg/services/tree/getsubtree_test.go @@ -131,7 +131,7 @@ func TestGetSubTreeOrderAsc(t *testing.T) { t.Run("boltdb forest", func(t *testing.T) { p := pilorama.NewBoltForest(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))) require.NoError(t, p.Open(context.Background(), 0o644)) - require.NoError(t, p.Init()) + require.NoError(t, p.Init(context.Background())) testGetSubTreeOrderAsc(t, p) }) } diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index e2249c9fb..c48a312fb 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -388,7 +388,7 @@ func (s *Service) syncLoop(ctx context.Context) { break } - newMap, cnrsToSync := s.containersToSync(cnrs) + newMap, cnrsToSync := s.containersToSync(ctx, cnrs) s.syncContainers(ctx, cnrsToSync) @@ -475,14 +475,14 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID } } -func (s *Service) containersToSync(cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) { +func (s *Service) containersToSync(ctx context.Context, cnrs []cid.ID) (map[cid.ID]struct{}, []cid.ID) { newMap := make(map[cid.ID]struct{}, len(s.cnrMap)) cnrsToSync := make([]cid.ID, 0, len(cnrs)) for _, cnr := range cnrs { _, pos, err := s.getContainerNodes(cnr) if err != nil { - s.log.Error(context.Background(), logs.TreeCouldNotCalculateContainerNodes, + s.log.Error(ctx, logs.TreeCouldNotCalculateContainerNodes, zap.Stringer("cid", cnr), zap.Error(err)) continue diff --git a/pkg/util/http/calls.go b/pkg/util/http/calls.go index a9877e007..8569ec734 100644 --- a/pkg/util/http/calls.go +++ b/pkg/util/http/calls.go @@ -32,8 +32,8 @@ func (x *Server) Serve() error { // // Once Shutdown has been called on a server, it may not be reused; // future calls to Serve method will have no effect. -func (x *Server) Shutdown() error { - ctx, cancel := context.WithTimeout(context.Background(), x.shutdownTimeout) +func (x *Server) Shutdown(ctx context.Context) error { + ctx, cancel := context.WithTimeout(context.WithoutCancel(ctx), x.shutdownTimeout) err := x.srv.Shutdown(ctx) diff --git a/scripts/populate-metabase/main.go b/scripts/populate-metabase/main.go index 6f6b233cf..8c4ea41ad 100644 --- a/scripts/populate-metabase/main.go +++ b/scripts/populate-metabase/main.go @@ -91,15 +91,15 @@ func populate() (err error) { return fmt.Errorf("couldn't open the metabase: %w", err) } defer func() { - if errOnClose := db.Close(); errOnClose != nil { + if errOnClose := db.Close(ctx); errOnClose != nil { err = errors.Join( err, - fmt.Errorf("couldn't close the metabase: %w", db.Close()), + fmt.Errorf("couldn't close the metabase: %w", db.Close(ctx)), ) } }() - if err = db.Init(); err != nil { + if err = db.Init(ctx); err != nil { return fmt.Errorf("couldn't init the metabase: %w", err) } From 612b34d5708c23f888f75f6b6b1e5e87efdeede1 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 7 Nov 2024 14:37:42 +0300 Subject: [PATCH 178/591] [#1437] logger: Add caller skip to log original caller position Signed-off-by: Dmitrii Stepanov --- pkg/util/logger/logger.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index b3a1b9b94..19d3f1ed1 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -131,6 +131,7 @@ func newConsoleLogger(prm *Prm) (*Logger, error) { lZap, err := c.Build( zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), + zap.AddCallerSkip(1), ) if err != nil { return nil, err @@ -167,7 +168,7 @@ func newJournaldLogger(prm *Prm) (*Logger, error) { zapjournald.SyslogPid(), }) - lZap := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))) + lZap := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1)) l := &Logger{z: lZap, lvl: lvl} prm._log = l @@ -190,6 +191,6 @@ func (l *Logger) With(fields ...zap.Field) *Logger { func NewLoggerWrapper(z *zap.Logger) *Logger { return &Logger{ - z: z, + z: z.WithOptions(zap.AddCallerSkip(1)), } } From c6066d6ee4da0e1ac193ad986ca2b8c470f070f7 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Tue, 12 Nov 2024 17:15:23 +0300 Subject: [PATCH 179/591] [#1491] engine/test: Use more suitable testing utils here and there Use `setShardsNum` instead of `setInitializedShards` wherever possible. Signed-off-by: Aleksey Savchuk --- pkg/local_object_storage/engine/delete_test.go | 6 +----- pkg/local_object_storage/engine/engine_test.go | 7 +------ 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go index 0904c9820..e095e4bbd 100644 --- a/pkg/local_object_storage/engine/delete_test.go +++ b/pkg/local_object_storage/engine/delete_test.go @@ -49,11 +49,7 @@ func TestDeleteBigObject(t *testing.T) { link.SetSplitID(splitID) link.SetChildren(childIDs...) - s1 := testNewShard(t) - s2 := testNewShard(t) - s3 := testNewShard(t) - - e := testNewEngine(t).setInitializedShards(t, s1, s2, s3).engine + e := testNewEngine(t).setShardsNum(t, 3).engine e.log = test.NewLogger(t) defer e.Close(context.Background()) diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 88c523b76..44bda2cbc 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -43,12 +43,7 @@ func BenchmarkExists(b *testing.B) { } func benchmarkExists(b *testing.B, shardNum int) { - shards := make([]*shard.Shard, shardNum) - for i := range shardNum { - shards[i] = testNewShard(b) - } - - e := testNewEngine(b).setInitializedShards(b, shards...).engine + e := testNewEngine(b).setShardsNum(b, shardNum).engine defer func() { require.NoError(b, e.Close(context.Background())) }() addr := oidtest.Address() From 7ef36749d0c17d7c4f1da0e684f3db7e7ab69e61 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Wed, 13 Nov 2024 11:59:42 +0300 Subject: [PATCH 180/591] [#1491] engine/test: Move `BenchmarkExists` to `exists_test.go` Move `BenchmarkExists` from `engine_test.go` to `exists_test.go` for better organization and clarity. Signed-off-by: Aleksey Savchuk --- .../engine/engine_test.go | 44 ---------------- .../engine/exists_test.go | 51 +++++++++++++++++++ 2 files changed, 51 insertions(+), 44 deletions(-) create mode 100644 pkg/local_object_storage/engine/exists_test.go diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 44bda2cbc..bac35917c 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -10,17 +10,11 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "git.frostfs.info/TrueCloudLab/hrw" - "github.com/panjf2000/ants/v2" "github.com/stretchr/testify/require" ) @@ -30,44 +24,6 @@ func (s epochState) CurrentEpoch() uint64 { return 0 } -func BenchmarkExists(b *testing.B) { - b.Run("2 shards", func(b *testing.B) { - benchmarkExists(b, 2) - }) - b.Run("4 shards", func(b *testing.B) { - benchmarkExists(b, 4) - }) - b.Run("8 shards", func(b *testing.B) { - benchmarkExists(b, 8) - }) -} - -func benchmarkExists(b *testing.B, shardNum int) { - e := testNewEngine(b).setShardsNum(b, shardNum).engine - defer func() { require.NoError(b, e.Close(context.Background())) }() - - addr := oidtest.Address() - for range 100 { - obj := testutil.GenerateObjectWithCID(cidtest.ID()) - err := Put(context.Background(), e, obj, false) - if err != nil { - b.Fatal(err) - } - } - - b.ReportAllocs() - b.ResetTimer() - for range b.N { - var shPrm shard.ExistsPrm - shPrm.Address = addr - shPrm.ParentAddress = oid.Address{} - ok, _, err := e.exists(context.Background(), shPrm) - if err != nil || ok { - b.Fatalf("%t %v", ok, err) - } - } -} - type testEngineWrapper struct { engine *StorageEngine shardIDs []*shard.ID diff --git a/pkg/local_object_storage/engine/exists_test.go b/pkg/local_object_storage/engine/exists_test.go new file mode 100644 index 000000000..e2e5ff13e --- /dev/null +++ b/pkg/local_object_storage/engine/exists_test.go @@ -0,0 +1,51 @@ +package engine + +import ( + "context" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + "github.com/stretchr/testify/require" +) + +func BenchmarkExists(b *testing.B) { + b.Run("2 shards", func(b *testing.B) { + benchmarkExists(b, 2) + }) + b.Run("4 shards", func(b *testing.B) { + benchmarkExists(b, 4) + }) + b.Run("8 shards", func(b *testing.B) { + benchmarkExists(b, 8) + }) +} + +func benchmarkExists(b *testing.B, shardNum int) { + e := testNewEngine(b).setShardsNum(b, shardNum).engine + defer func() { require.NoError(b, e.Close(context.Background())) }() + + addr := oidtest.Address() + for range 100 { + obj := testutil.GenerateObjectWithCID(cidtest.ID()) + err := Put(context.Background(), e, obj, false) + if err != nil { + b.Fatal(err) + } + } + + b.ReportAllocs() + b.ResetTimer() + for range b.N { + var shPrm shard.ExistsPrm + shPrm.Address = addr + shPrm.ParentAddress = oid.Address{} + ok, _, err := e.exists(context.Background(), shPrm) + if err != nil || ok { + b.Fatalf("%t %v", ok, err) + } + } +} From 7fc6101bec418f6a5540a1ecf3626be8251d2696 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Wed, 13 Nov 2024 13:30:16 +0300 Subject: [PATCH 181/591] [#1491] engine/test: Rework engine test utils - Remove `testNewShard` and `setInitializedShards` because they violated the default engine workflow. The correct workflow is: first use `New()`, followed by `Open()`, and then `Init()`. As a result, adding new logic to `(*StorageEngine).Init` caused several tests to fail with a panic when attempting to access uninitialized resources. Now, all engines created with the test utils must be initialized manually. The new helper method `prepare` can be used for that purpose. - Additionally, `setInitializedShards` hardcoded the shard worker pool size, which prevented it from being configured in tests and benchmarks. This has been fixed as well. - Ensure engine initialization is done wherever it was missing. - Refactor `setShardsNumOpts`, `setShardsNumAdditionalOpts`, and `setShardsNum`. Make them all depend on `setShardsNumOpts`. Signed-off-by: Aleksey Savchuk --- .../engine/control_test.go | 8 +- .../engine/delete_test.go | 16 +-- .../engine/engine_test.go | 129 +++++++----------- pkg/local_object_storage/engine/error_test.go | 4 +- .../engine/evacuate_test.go | 5 +- .../engine/exists_test.go | 2 +- pkg/local_object_storage/engine/head_test.go | 8 +- .../engine/inhume_test.go | 13 +- pkg/local_object_storage/engine/list_test.go | 5 +- pkg/local_object_storage/engine/lock_test.go | 17 +-- .../engine/shards_test.go | 4 +- 11 files changed, 88 insertions(+), 123 deletions(-) diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go index 83babeca3..c9efc312c 100644 --- a/pkg/local_object_storage/engine/control_test.go +++ b/pkg/local_object_storage/engine/control_test.go @@ -164,7 +164,7 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O } func TestExecBlocks(t *testing.T) { - e := testNewEngine(t).setShardsNum(t, 2).engine // number doesn't matter in this test, 2 is several but not many + e := testNewEngine(t).setShardsNum(t, 2).prepare(t).engine // number doesn't matter in this test, 2 is several but not many // put some object obj := testutil.GenerateObjectWithCID(cidtest.ID()) @@ -302,7 +302,8 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str meta.WithEpochState(epochState{}), ), } - }) + }). + prepare(t) e, ids := te.engine, te.shardIDs for _, id := range ids { @@ -312,8 +313,5 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str require.Equal(t, num, len(e.shards)) require.Equal(t, num, len(e.shardPools)) - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) - return e, currShards } diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go index e095e4bbd..0dd2e94bb 100644 --- a/pkg/local_object_storage/engine/delete_test.go +++ b/pkg/local_object_storage/engine/delete_test.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -49,9 +48,8 @@ func TestDeleteBigObject(t *testing.T) { link.SetSplitID(splitID) link.SetChildren(childIDs...) - e := testNewEngine(t).setShardsNum(t, 3).engine - e.log = test.NewLogger(t) - defer e.Close(context.Background()) + e := testNewEngine(t).setShardsNum(t, 3).prepare(t).engine + defer func() { require.NoError(t, e.Close(context.Background())) }() for i := range children { require.NoError(t, Put(context.Background(), e, children[i], false)) @@ -115,11 +113,13 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) { link.SetSplitID(splitID) link.SetChildren(childIDs...) - s1 := testNewShard(t, shard.WithDisabledGC()) + te := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option { + return []shard.Option{shard.WithDisabledGC()} + }).prepare(t) + e := te.engine + defer func() { require.NoError(t, e.Close(context.Background())) }() - e := testNewEngine(t).setInitializedShards(t, s1).engine - e.log = test.NewLogger(t) - defer e.Close(context.Background()) + s1 := te.shards[0] for i := range children { require.NoError(t, Put(context.Background(), e, children[i], false)) diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index bac35917c..a7cb90bae 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -3,7 +3,6 @@ package engine import ( "context" "path/filepath" - "sync/atomic" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" @@ -26,68 +25,77 @@ func (s epochState) CurrentEpoch() uint64 { type testEngineWrapper struct { engine *StorageEngine + shards []*shard.Shard shardIDs []*shard.ID } func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper { - engine := New(WithLogger(test.NewLogger(t))) - for _, opt := range opts { - opt(engine.cfg) - } - return &testEngineWrapper{ - engine: engine, - } -} - -func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard.Shard) *testEngineWrapper { - for _, s := range shards { - pool, err := ants.NewPool(10, ants.WithNonblocking(true)) - require.NoError(t, err) - - te.engine.shards[s.ID().String()] = hashedShard{ - shardWrapper: shardWrapper{ - errorCount: new(atomic.Uint32), - Shard: s, - }, - hash: hrw.StringHash(s.ID().String()), - } - te.engine.shardPools[s.ID().String()] = pool - te.shardIDs = append(te.shardIDs, s.ID()) - } - return te + opts = append(testGetDefaultEngineOptions(t), opts...) + return &testEngineWrapper{engine: New(opts...)} } func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper { - shards := make([]*shard.Shard, 0, num) - - for range num { - shards = append(shards, testNewShard(t)) - } - - return te.setInitializedShards(t, shards...) + return te.setShardsNumOpts(t, num, func(_ int) []shard.Option { + return testGetDefaultShardOptions(t) + }) } -func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper { +func (te *testEngineWrapper) setShardsNumOpts( + t testing.TB, num int, shardOpts func(id int) []shard.Option, +) *testEngineWrapper { + te.shards = make([]*shard.Shard, num) + te.shardIDs = make([]*shard.ID, num) for i := range num { - opts := shardOpts(i) - id, err := te.engine.AddShard(context.Background(), opts...) + shard, err := te.engine.createShard(context.Background(), shardOpts(i)) require.NoError(t, err) - te.shardIDs = append(te.shardIDs, id) + require.NoError(t, te.engine.addShard(shard)) + te.shards[i] = shard + te.shardIDs[i] = shard.ID() } + require.Len(t, te.engine.shards, num) + require.Len(t, te.engine.shardPools, num) return te } -func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper { - for i := range num { - defaultOpts := testDefaultShardOptions(t) - opts := append(defaultOpts, shardOpts(i)...) - id, err := te.engine.AddShard(context.Background(), opts...) - require.NoError(t, err) - te.shardIDs = append(te.shardIDs, id) - } +func (te *testEngineWrapper) setShardsNumAdditionalOpts( + t testing.TB, num int, shardOpts func(id int) []shard.Option, +) *testEngineWrapper { + return te.setShardsNumOpts(t, num, func(id int) []shard.Option { + return append(testGetDefaultShardOptions(t), shardOpts(id)...) + }) +} + +// prepare calls Open and Init on the created engine. +func (te *testEngineWrapper) prepare(t testing.TB) *testEngineWrapper { + require.NoError(t, te.engine.Open(context.Background())) + require.NoError(t, te.engine.Init(context.Background())) return te } +func testGetDefaultEngineOptions(t testing.TB) []Option { + return []Option{ + WithLogger(test.NewLogger(t)), + } +} + +func testGetDefaultShardOptions(t testing.TB) []shard.Option { + return []shard.Option{ + shard.WithLogger(test.NewLogger(t)), + shard.WithBlobStorOptions( + blobstor.WithStorages( + newStorages(t, t.TempDir(), 1<<20)), + blobstor.WithLogger(test.NewLogger(t)), + ), + shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))), + shard.WithMetaBaseOptions( + meta.WithPath(filepath.Join(t.TempDir(), "metabase")), + meta.WithPermissions(0o700), + meta.WithEpochState(epochState{}), + meta.WithLogger(test.NewLogger(t)), + ), + } +} + func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStorage { return []blobstor.SubStorage{ { @@ -137,34 +145,3 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes }, }, smallFileStorage, largeFileStorage } - -func testNewShard(t testing.TB, opts ...shard.Option) *shard.Shard { - sid, err := generateShardID() - require.NoError(t, err) - - shardOpts := append([]shard.Option{shard.WithID(sid)}, testDefaultShardOptions(t)...) - s := shard.New(append(shardOpts, opts...)...) - - require.NoError(t, s.Open(context.Background())) - require.NoError(t, s.Init(context.Background())) - - return s -} - -func testDefaultShardOptions(t testing.TB) []shard.Option { - return []shard.Option{ - shard.WithLogger(test.NewLogger(t)), - shard.WithBlobStorOptions( - blobstor.WithStorages( - newStorages(t, t.TempDir(), 1<<20)), - blobstor.WithLogger(test.NewLogger(t)), - ), - shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))), - shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(t.TempDir(), "metabase")), - meta.WithPermissions(0o700), - meta.WithEpochState(epochState{}), - meta.WithLogger(test.NewLogger(t)), - ), - } -} diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go index 57c423764..d68a7e826 100644 --- a/pkg/local_object_storage/engine/error_test.go +++ b/pkg/local_object_storage/engine/error_test.go @@ -67,10 +67,8 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32) pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))), pilorama.WithPerm(0o700)), } - }) + }).prepare(t) e := te.engine - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) for i, id := range te.shardIDs { testShards[i].id = id diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index 54eacc3f2..beab8384e 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -75,10 +75,9 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng pilorama.WithPerm(0o700), ), } - }) + }). + prepare(t) e, ids := te.engine, te.shardIDs - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) objects := make([]*objectSDK.Object, 0, objPerShard*len(ids)) treeID := "version" diff --git a/pkg/local_object_storage/engine/exists_test.go b/pkg/local_object_storage/engine/exists_test.go index e2e5ff13e..1b51c10dc 100644 --- a/pkg/local_object_storage/engine/exists_test.go +++ b/pkg/local_object_storage/engine/exists_test.go @@ -25,7 +25,7 @@ func BenchmarkExists(b *testing.B) { } func benchmarkExists(b *testing.B, shardNum int) { - e := testNewEngine(b).setShardsNum(b, shardNum).engine + e := testNewEngine(b).setShardsNum(b, shardNum).prepare(b).engine defer func() { require.NoError(b, e.Close(context.Background())) }() addr := oidtest.Address() diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go index 5afc50f07..f9db81f16 100644 --- a/pkg/local_object_storage/engine/head_test.go +++ b/pkg/local_object_storage/engine/head_test.go @@ -39,11 +39,11 @@ func TestHeadRaw(t *testing.T) { link.SetSplitID(splitID) t.Run("virtual object split in different shards", func(t *testing.T) { - s1 := testNewShard(t) - s2 := testNewShard(t) + te := testNewEngine(t).setShardsNum(t, 2).prepare(t) + e := te.engine + defer func() { require.NoError(t, e.Close(context.Background())) }() - e := testNewEngine(t).setInitializedShards(t, s1, s2).engine - defer e.Close(context.Background()) + s1, s2 := te.shards[0], te.shards[1] var putPrmLeft shard.PutPrm putPrmLeft.SetObject(child) diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index b4fbbd810..6980afb07 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -37,8 +37,8 @@ func TestStorageEngine_Inhume(t *testing.T) { t.Run("delete small object", func(t *testing.T) { t.Parallel() - e := testNewEngine(t).setShardsNum(t, 1).engine - defer e.Close(context.Background()) + e := testNewEngine(t).setShardsNum(t, 1).prepare(t).engine + defer func() { require.NoError(t, e.Close(context.Background())) }() err := Put(context.Background(), e, parent, false) require.NoError(t, err) @@ -56,11 +56,12 @@ func TestStorageEngine_Inhume(t *testing.T) { t.Run("delete big object", func(t *testing.T) { t.Parallel() - s1 := testNewShard(t) - s2 := testNewShard(t) - e := testNewEngine(t).setInitializedShards(t, s1, s2).engine - defer e.Close(context.Background()) + te := testNewEngine(t).setShardsNum(t, 2).prepare(t) + e := te.engine + defer func() { require.NoError(t, e.Close(context.Background())) }() + + s1, s2 := te.shards[0], te.shards[1] var putChild shard.PutPrm putChild.SetObject(child) diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go index d683b5475..6cfa546f8 100644 --- a/pkg/local_object_storage/engine/list_test.go +++ b/pkg/local_object_storage/engine/list_test.go @@ -68,10 +68,7 @@ func TestListWithCursor(t *testing.T) { meta.WithEpochState(epochState{}), ), } - }).engine - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) - + }).prepare(t).engine defer func() { require.NoError(t, e.Close(context.Background())) }() diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go index 7e15c76f5..feca9cb69 100644 --- a/pkg/local_object_storage/engine/lock_test.go +++ b/pkg/local_object_storage/engine/lock_test.go @@ -57,11 +57,9 @@ func TestLockUserScenario(t *testing.T) { }), shard.WithTombstoneSource(tss{lockerExpiresAfter}), } - }) + }). + prepare(t) e := testEngine.engine - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) - defer func() { require.NoError(t, e.Close(context.Background())) }() lockerID := oidtest.ID() @@ -162,11 +160,9 @@ func TestLockExpiration(t *testing.T) { return pool }), } - }) + }). + prepare(t) e := testEngine.engine - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) - defer func() { require.NoError(t, e.Close(context.Background())) }() const lockerExpiresAfter = 13 @@ -243,9 +239,8 @@ func TestLockForceRemoval(t *testing.T) { }), shard.WithDeletedLockCallback(e.processDeletedLocks), } - }).engine - require.NoError(t, e.Open(context.Background())) - require.NoError(t, e.Init(context.Background())) + }). + prepare(t).engine defer func() { require.NoError(t, e.Close(context.Background())) }() cnr := cidtest.ID() diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go index 207491bd4..0bbc7563c 100644 --- a/pkg/local_object_storage/engine/shards_test.go +++ b/pkg/local_object_storage/engine/shards_test.go @@ -13,7 +13,7 @@ import ( func TestRemoveShard(t *testing.T) { const numOfShards = 6 - te := testNewEngine(t).setShardsNum(t, numOfShards) + te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t) e, ids := te.engine, te.shardIDs defer func() { require.NoError(t, e.Close(context.Background())) }() @@ -51,7 +51,7 @@ func TestDisableShards(t *testing.T) { const numOfShards = 2 - te := testNewEngine(t).setShardsNum(t, numOfShards) + te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t) e, ids := te.engine, te.shardIDs defer func() { require.NoError(t, e.Close(context.Background())) }() From f7cb6b4d87ec9869d371e569b49cdf418d26a756 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Wed, 13 Nov 2024 16:26:40 +0300 Subject: [PATCH 182/591] [#1482] Makefile: Update golangci-lint Signed-off-by: Alexander Chuprov --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index c54601c73..ecac760e9 100755 --- a/Makefile +++ b/Makefile @@ -8,8 +8,8 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" GO_VERSION ?= 1.22 -LINT_VERSION ?= 1.61.0 -TRUECLOUDLAB_LINT_VERSION ?= 0.0.7 +LINT_VERSION ?= 1.62.0 +TRUECLOUDLAB_LINT_VERSION ?= 0.0.8 PROTOC_VERSION ?= 25.0 PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOC_OS_VERSION=osx-x86_64 From 6dc0dc66919a19e08b6542cbdb5569b78f432121 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 18 Oct 2024 13:45:06 +0300 Subject: [PATCH 183/591] [#1493] shard: Take mode mutex in HandleExpiredTombstones() Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/shard/gc.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index a987d3d14..57c21459c 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -627,7 +627,10 @@ func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid // // Does not modify tss. func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.TombstonedObject) { - if s.GetMode().NoMetabase() { + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { return } From 1e6f132b4e59bfc7c341e44a4612de6b2924a555 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 18 Oct 2024 13:57:16 +0300 Subject: [PATCH 184/591] [#1493] metabase: Pass InhumePrm by value Unify with the other code, no functional changes. Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/metabase/inhume.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index 8d1e18729..915de5262 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -217,7 +217,7 @@ func (db *DB) inhumeTx(tx *bbolt.Tx, epoch uint64, prm InhumePrm, res *InhumeRes garbageBKT := tx.Bucket(garbageBucketName) graveyardBKT := tx.Bucket(graveyardBucketName) - bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, &prm) + bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm) if err != nil { return err } @@ -354,7 +354,7 @@ func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error { // 1. tombstone address if Inhume was called with // a Tombstone // 2. zeroValue if Inhume was called with a GC mark -func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm *InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) { +func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Bucket, prm InhumePrm) (targetBucket *bbolt.Bucket, value []byte, err error) { if prm.tomb != nil { targetBucket = graveyardBKT tombKey := addressKey(*prm.tomb, make([]byte, addressKeySize)) From 44df67492f8f933c2f923e7660d9a58949e21a57 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 18 Oct 2024 14:08:57 +0300 Subject: [PATCH 185/591] [#1493] metabase: Split inhumeTx() into 2 functions No functional changes. Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/metabase/inhume.go | 138 +++++++++++--------- 1 file changed, 73 insertions(+), 65 deletions(-) diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index 915de5262..5ac0c0be5 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -224,78 +224,86 @@ func (db *DB) inhumeTx(tx *bbolt.Tx, epoch uint64, prm InhumePrm, res *InhumeRes buf := make([]byte, addressKeySize) for i := range prm.target { - id := prm.target[i].Object() - cnr := prm.target[i].Container() - - // prevent locked objects to be inhumed - if !prm.forceRemoval && objectLocked(tx, cnr, id) { - return new(apistatus.ObjectLocked) - } - - var lockWasChecked bool - - // prevent lock objects to be inhumed - // if `Inhume` was called not with the - // `WithForceGCMark` option - if !prm.forceRemoval { - if isLockObject(tx, cnr, id) { - return ErrLockObjectRemoval - } - - lockWasChecked = true - } - - obj, err := db.get(tx, prm.target[i], buf, false, true, epoch) - targetKey := addressKey(prm.target[i], buf) - var ecErr *objectSDK.ECInfoError - if err == nil { - err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res) - if err != nil { - return err - } - } else if errors.As(err, &ecErr) { - err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value) - if err != nil { - return err - } - } - - if prm.tomb != nil { - var isTomb bool - isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey) - if err != nil { - return err - } - - if isTomb { - continue - } - } - - // consider checking if target is already in graveyard? - err = bkt.Put(targetKey, value) - if err != nil { + if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, prm.target[i], buf, epoch, prm, res); err != nil { return err } - - if prm.lockObjectHandling { - // do not perform lock check if - // it was already called - if lockWasChecked { - // inhumed object is not of - // the LOCK type - continue - } - - if isLockObject(tx, cnr, id) { - res.deletedLockObj = append(res.deletedLockObj, prm.target[i]) - } - } } return db.applyInhumeResToCounters(tx, res) } +func (db *DB) inhumeTxSingle(bkt *bbolt.Bucket, value []byte, graveyardBKT, garbageBKT *bbolt.Bucket, addr oid.Address, buf []byte, epoch uint64, prm InhumePrm, res *InhumeRes) error { + id := addr.Object() + cnr := addr.Container() + tx := bkt.Tx() + + // prevent locked objects to be inhumed + if !prm.forceRemoval && objectLocked(tx, cnr, id) { + return new(apistatus.ObjectLocked) + } + + var lockWasChecked bool + + // prevent lock objects to be inhumed + // if `Inhume` was called not with the + // `WithForceGCMark` option + if !prm.forceRemoval { + if isLockObject(tx, cnr, id) { + return ErrLockObjectRemoval + } + + lockWasChecked = true + } + + obj, err := db.get(tx, addr, buf, false, true, epoch) + targetKey := addressKey(addr, buf) + var ecErr *objectSDK.ECInfoError + if err == nil { + err = db.updateDeleteInfo(tx, garbageBKT, graveyardBKT, targetKey, cnr, obj, res) + if err != nil { + return err + } + } else if errors.As(err, &ecErr) { + err = db.inhumeECInfo(tx, epoch, prm.tomb, res, garbageBKT, graveyardBKT, ecErr.ECInfo(), cnr, bkt, value) + if err != nil { + return err + } + } + + if prm.tomb != nil { + var isTomb bool + isTomb, err = db.markAsGC(graveyardBKT, garbageBKT, targetKey) + if err != nil { + return err + } + + if isTomb { + return nil + } + } + + // consider checking if target is already in graveyard? + err = bkt.Put(targetKey, value) + if err != nil { + return err + } + + if prm.lockObjectHandling { + // do not perform lock check if + // it was already called + if lockWasChecked { + // inhumed object is not of + // the LOCK type + return nil + } + + if isLockObject(tx, cnr, id) { + res.deletedLockObj = append(res.deletedLockObj, addr) + } + } + return nil +} + func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *InhumeRes, garbageBKT *bbolt.Bucket, graveyardBKT *bbolt.Bucket, ecInfo *objectSDK.ECInfo, cnr cid.ID, targetBucket *bbolt.Bucket, value []byte, From d77a218f7c1a449369eb6d63e00ae1906984aed4 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 13 Nov 2024 15:56:16 +0300 Subject: [PATCH 186/591] [#1493] metabase: Merge Inhume() and DropGraves() for tombstones DropGraves() is only used to drop gravemarks after a tombstone removal. Thus, it makes sense to do Inhume() and DropGraves() in one transaction. It has less overhead and no unexpected problems in case of sudden power failure. Signed-off-by: Evgenii Stratonikov --- .../metabase/delete_ec_test.go | 20 +--------- .../metabase/graveyard.go | 39 ++++++++++++------- .../metabase/graveyard_test.go | 26 ++++++++++--- pkg/local_object_storage/shard/gc.go | 21 +--------- 4 files changed, 50 insertions(+), 56 deletions(-) diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go index 9f1f91e14..884da23ff 100644 --- a/pkg/local_object_storage/metabase/delete_ec_test.go +++ b/pkg/local_object_storage/metabase/delete_ec_test.go @@ -130,17 +130,9 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) { require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm)) require.Equal(t, 2, len(tombstonedObjects)) - var tombstones []oid.Address - for _, tss := range tombstonedObjects { - tombstones = append(tombstones, tss.tomb) - } - inhumePrm.SetAddresses(tombstones...) - inhumePrm.SetGCMark() - _, err = db.Inhume(context.Background(), inhumePrm) + _, err = db.InhumeTombstones(context.Background(), tombstonedObjects) require.NoError(t, err) - require.NoError(t, db.DropGraves(context.Background(), tombstonedObjects)) - // GC finds tombstone as garbage and deletes it garbageAddresses = nil @@ -374,17 +366,9 @@ func testDeleteECObjectWithSplit(t *testing.T, chunksCount int, withLinking bool require.NoError(t, db.IterateOverGraveyard(context.Background(), graveyardIterationPrm)) require.True(t, len(tombstonedObjects) == parentCount+chunksCount) - var tombstones []oid.Address - for _, tss := range tombstonedObjects { - tombstones = append(tombstones, tss.tomb) - } - inhumePrm.SetAddresses(tombstones...) - inhumePrm.SetGCMark() - _, err = db.Inhume(context.Background(), inhumePrm) + _, err = db.InhumeTombstones(context.Background(), tombstonedObjects) require.NoError(t, err) - require.NoError(t, db.DropGraves(context.Background(), tombstonedObjects)) - // GC finds tombstone as garbage and deletes it garbageAddresses = nil diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go index 31f95d6ed..b0db952b2 100644 --- a/pkg/local_object_storage/metabase/graveyard.go +++ b/pkg/local_object_storage/metabase/graveyard.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.etcd.io/bbolt" ) @@ -255,46 +256,58 @@ func graveFromKV(k, v []byte) (res TombstonedObject, err error) { return } -// DropGraves deletes tombstoned objects from the +// InhumeTombstones deletes tombstoned objects from the // graveyard bucket. // // Returns any error appeared during deletion process. -func (db *DB) DropGraves(ctx context.Context, tss []TombstonedObject) error { +func (db *DB) InhumeTombstones(ctx context.Context, tss []TombstonedObject) (InhumeRes, error) { var ( startedAt = time.Now() success = false ) defer func() { - db.metrics.AddMethodDuration("DropGraves", time.Since(startedAt), success) + db.metrics.AddMethodDuration("InhumeTombstones", time.Since(startedAt), success) }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.DropGraves") + _, span := tracing.StartSpanFromContext(ctx, "metabase.InhumeTombstones") defer span.End() db.modeMtx.RLock() defer db.modeMtx.RUnlock() if db.mode.NoMetabase() { - return ErrDegradedMode + return InhumeRes{}, ErrDegradedMode } else if db.mode.ReadOnly() { - return ErrReadOnlyMode + return InhumeRes{}, ErrReadOnlyMode } buf := make([]byte, addressKeySize) + prm := InhumePrm{forceRemoval: true} + currEpoch := db.epochState.CurrentEpoch() - return db.boltDB.Batch(func(tx *bbolt.Tx) error { - bkt := tx.Bucket(graveyardBucketName) - if bkt == nil { - return nil + var res InhumeRes + + err := db.boltDB.Batch(func(tx *bbolt.Tx) error { + res = InhumeRes{inhumedByCnrID: make(map[cid.ID]ObjectCounters)} + + garbageBKT := tx.Bucket(garbageBucketName) + graveyardBKT := tx.Bucket(graveyardBucketName) + + bkt, value, err := db.getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT, prm) + if err != nil { + return err } - for _, ts := range tss { - err := bkt.Delete(addressKey(ts.Address(), buf)) - if err != nil { + for i := range tss { + if err := db.inhumeTxSingle(bkt, value, graveyardBKT, garbageBKT, tss[i].Tombstone(), buf, currEpoch, prm, &res); err != nil { + return err + } + if err := graveyardBKT.Delete(addressKey(tss[i].Address(), buf)); err != nil { return err } } return nil }) + return res, err } diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go index 99794e609..ebadecc04 100644 --- a/pkg/local_object_storage/metabase/graveyard_test.go +++ b/pkg/local_object_storage/metabase/graveyard_test.go @@ -7,7 +7,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "github.com/stretchr/testify/require" @@ -393,7 +395,7 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) { require.False(t, iWasCalled) } -func TestDB_DropGraves(t *testing.T) { +func TestDB_InhumeTombstones(t *testing.T) { db := newDB(t) defer func() { require.NoError(t, db.Close(context.Background())) }() @@ -410,9 +412,20 @@ func TestDB_DropGraves(t *testing.T) { err = putBig(db, obj2) require.NoError(t, err) - // inhume with tombstone - addrTombstone := oidtest.Address() - addrTombstone.SetContainer(cnr) + id1, _ := obj1.ID() + id2, _ := obj2.ID() + ts := objectSDK.NewTombstone() + ts.SetMembers([]oid.ID{id1, id2}) + objTs := objectSDK.New() + objTs.SetContainerID(cnr) + objTs.SetType(objectSDK.TypeTombstone) + + data, _ := ts.Marshal() + objTs.SetPayload(data) + require.NoError(t, objectSDK.CalculateAndSetID(objTs)) + require.NoError(t, putBig(db, objTs)) + + addrTombstone := object.AddressOf(objTs) var inhumePrm meta.InhumePrm inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2)) @@ -435,8 +448,11 @@ func TestDB_DropGraves(t *testing.T) { require.NoError(t, err) require.Equal(t, 2, counter) - err = db.DropGraves(context.Background(), buriedTS) + res, err := db.InhumeTombstones(context.Background(), buriedTS) require.NoError(t, err) + require.EqualValues(t, 1, res.LogicInhumed()) + require.EqualValues(t, 0, res.UserInhumed()) + require.EqualValues(t, map[cid.ID]meta.ObjectCounters{cnr: {Logic: 1}}, res.InhumedByCnrID()) counter = 0 iterGravePRM.SetHandler(func(_ meta.TombstonedObject) error { diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 57c21459c..c212f8c36 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -634,19 +634,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston return } - // Mark tombstones as garbage. - var pInhume meta.InhumePrm - - tsAddrs := make([]oid.Address, 0, len(tss)) - for _, ts := range tss { - tsAddrs = append(tsAddrs, ts.Tombstone()) - } - - pInhume.SetGCMark() - pInhume.SetAddresses(tsAddrs...) - - // inhume tombstones - res, err := s.metaBase.Inhume(ctx, pInhume) + res, err := s.metaBase.InhumeTombstones(ctx, tss) if err != nil { s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.String("error", err.Error()), @@ -666,13 +654,6 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston s.addToContainerSize(delInfo.CID.EncodeToString(), -int64(delInfo.Size)) i++ } - - // drop just processed expired tombstones - // from graveyard - err = s.metaBase.DropGraves(ctx, tss) - if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotDropExpiredGraveRecords, zap.Error(err)) - } } // HandleExpiredLocks unlocks all objects which were locked by lockers. From 69c63006da57d182618eb1ef498e3aaac9888783 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 14 Nov 2024 09:52:35 +0300 Subject: [PATCH 187/591] [#1496] morph: Move tx waiter to morph package Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/morph.go | 49 ++++---------------------------- pkg/morph/client/waiter.go | 57 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 44 deletions(-) create mode 100644 pkg/morph/client/waiter.go diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index ca9f4fe3e..4e33ad6e2 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -17,11 +17,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" "github.com/nspcc-dev/neo-go/pkg/core/block" "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/neorpc/result" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/waiter" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" "go.uber.org/zap" ) @@ -164,48 +160,13 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error return c.cfgMorph.client.DepositEndlessNotary(ctx, depositAmount) } -var ( - errNotaryDepositFail = errors.New("notary deposit tx has faulted") - errNotaryDepositTimeout = errors.New("notary deposit tx has not appeared in the network") -) - -type waiterClient struct { - c *client.Client -} - -func (w *waiterClient) Context() context.Context { - return context.Background() -} - -func (w *waiterClient) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) { - return w.c.GetApplicationLog(hash, trig) -} - -func (w *waiterClient) GetBlockCount() (uint32, error) { - return w.c.BlockCount() -} - -func (w *waiterClient) GetVersion() (*result.Version, error) { - return w.c.GetVersion() -} - func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error { - w, err := waiter.NewPollingBased(&waiterClient{c: c.cfgMorph.client}) - if err != nil { - return fmt.Errorf("could not create notary deposit waiter: %w", err) + if err := c.cfgMorph.client.WaitTxHalt(ctx, client.WaitParams{Hash: tx, ValidUntilBlock: vub}); err != nil { + return err } - res, err := w.WaitAny(ctx, vub, tx) - if err != nil { - if errors.Is(err, waiter.ErrTxNotAccepted) { - return errNotaryDepositTimeout - } - return fmt.Errorf("could not wait for notary deposit persists in chain: %w", err) - } - if res.Execution.VMState.HasFlag(vmstate.Halt) { - c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted) - return nil - } - return errNotaryDepositFail + + c.log.Info(ctx, logs.ClientNotaryDepositTransactionWasSuccessfullyPersisted) + return nil } func listenMorphNotifications(ctx context.Context, c *cfg) { diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go new file mode 100644 index 000000000..8211deac4 --- /dev/null +++ b/pkg/morph/client/waiter.go @@ -0,0 +1,57 @@ +package client + +import ( + "context" + "fmt" + + "github.com/nspcc-dev/neo-go/pkg/neorpc/result" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/waiter" + "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" +) + +// WaitParams represents transaction to wait for. +type WaitParams struct { + Hash util.Uint256 + ValidUntilBlock uint32 +} + +type waiterClient struct { + c *Client +} + +func (w *waiterClient) Context() context.Context { + return context.Background() +} + +func (w *waiterClient) GetApplicationLog(hash util.Uint256, trig *trigger.Type) (*result.ApplicationLog, error) { + return w.c.GetApplicationLog(hash, trig) +} + +func (w *waiterClient) GetBlockCount() (uint32, error) { + return w.c.BlockCount() +} + +func (w *waiterClient) GetVersion() (*result.Version, error) { + return w.c.GetVersion() +} + +// WaitTxHalt waits until transaction with the specified hash persists on the blockchain. +// It also checks execution result to finish in HALT state. +func (c *Client) WaitTxHalt(ctx context.Context, p WaitParams) error { + w, err := waiter.NewPollingBased(&waiterClient{c: c}) + if err != nil { + return fmt.Errorf("create tx waiter: %w", err) + } + + res, err := w.WaitAny(ctx, p.ValidUntilBlock, p.Hash) + if err != nil { + return fmt.Errorf("wait until tx persists: %w", err) + } + + if res.VMState.HasFlag(vmstate.Halt) { + return nil + } + return wrapFrostFSError(¬HaltStateError{state: res.VMState.String(), exception: res.FaultException}) +} From b65874d1c35fe194a20d25bf19c8d82c633a04b6 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 14 Nov 2024 10:01:59 +0300 Subject: [PATCH 188/591] [#1496] morph: Return `InvokeRes` from all invoke*() methods Signed-off-by: Evgenii Stratonikov --- .../processors/alphabet/handlers_test.go | 5 +++-- .../processors/alphabet/processor.go | 3 ++- pkg/morph/client/client.go | 8 +++---- pkg/morph/client/notary.go | 22 +++++++++---------- pkg/morph/client/static.go | 14 +++++------- 5 files changed, 25 insertions(+), 27 deletions(-) diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go index 036b8055c..ac3e2a14d 100644 --- a/pkg/innerring/processors/alphabet/handlers_test.go +++ b/pkg/innerring/processors/alphabet/handlers_test.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/alphabet" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/timers" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -247,7 +248,7 @@ type testMorphClient struct { batchTransferedGas []batchTransferGas } -func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) { +func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error) { c.invokedMethods = append(c.invokedMethods, invokedMethod{ contract: contract, @@ -255,7 +256,7 @@ func (c *testMorphClient) Invoke(_ context.Context, contract util.Uint160, fee f method: method, args: args, }) - return 0, nil + return client.InvokeRes{}, nil } func (c *testMorphClient) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error { diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go index bf74834ed..3992e00f3 100644 --- a/pkg/innerring/processors/alphabet/processor.go +++ b/pkg/innerring/processors/alphabet/processor.go @@ -8,6 +8,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/metrics" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -39,7 +40,7 @@ type ( } morphClient interface { - Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) + Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (client.InvokeRes, error) TransferGas(receiver util.Uint160, amount fixedn.Fixed8) error BatchTransferGas(receivers []util.Uint160, amount fixedn.Fixed8) error } diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index f61c6e9f9..a0c29141b 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -180,7 +180,7 @@ func wrapFrostFSError(err error) error { // Invoke invokes contract method by sending transaction into blockchain. // Returns valid until block value. // Supported args types: int64, string, util.Uint160, []byte and bool. -func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (uint32, error) { +func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, method string, args ...any) (InvokeRes, error) { start := time.Now() success := false defer func() { @@ -191,12 +191,12 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F defer c.switchLock.RUnlock() if c.inactive { - return 0, ErrConnectionLost + return InvokeRes{}, ErrConnectionLost } txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...) if err != nil { - return 0, fmt.Errorf("could not invoke %s: %w", method, err) + return InvokeRes{}, fmt.Errorf("could not invoke %s: %w", method, err) } c.logger.Debug(ctx, logs.ClientNeoClientInvoke, @@ -205,7 +205,7 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F zap.Stringer("tx_hash", txHash.Reverse())) success = true - return vub, nil + return InvokeRes{Hash: txHash, VUB: vub}, nil } // TestInvokeIterator invokes contract method returning an iterator and executes cb on each element. diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 65a5e77a6..71232cb33 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -358,12 +358,12 @@ func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabet // Returns valid until block value. // // `nonce` and `vub` are used only if notary is enabled. -func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) { +func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() if c.inactive { - return 0, ErrConnectionLost + return InvokeRes{}, ErrConnectionLost } if c.notary == nil { @@ -378,12 +378,12 @@ func (c *Client) NotaryInvoke(ctx context.Context, contract util.Uint160, fee fi // not expected to be signed by the current node. // // Considered to be used by non-IR nodes. -func (c *Client) NotaryInvokeNotAlpha(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (uint32, error) { +func (c *Client) NotaryInvokeNotAlpha(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, vubP *uint32, method string, args ...any) (InvokeRes, error) { c.switchLock.RLock() defer c.switchLock.RUnlock() if c.inactive { - return 0, ErrConnectionLost + return InvokeRes{}, ErrConnectionLost } if c.notary == nil { @@ -446,7 +446,7 @@ func (c *Client) notaryInvokeAsCommittee(ctx context.Context, method string, non return err } -func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (uint32, error) { +func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha bool, contract util.Uint160, nonce uint32, vub *uint32, method string, args ...any) (InvokeRes, error) { start := time.Now() success := false defer func() { @@ -455,22 +455,22 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo alphabetList, err := c.notary.alphabetSource() if err != nil { - return 0, err + return InvokeRes{}, err } until, err := c.getUntilValue(vub) if err != nil { - return 0, err + return InvokeRes{}, err } cosigners, err := c.notaryCosigners(invokedByAlpha, alphabetList, committee) if err != nil { - return 0, err + return InvokeRes{}, err } nAct, err := notary.NewActor(c.client, cosigners, c.acc) if err != nil { - return 0, err + return InvokeRes{}, err } mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error { @@ -485,7 +485,7 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo }, args...)) if err != nil && !alreadyOnChainError(err) { - return 0, err + return InvokeRes{}, err } c.logger.Debug(ctx, logs.ClientNotaryRequestInvoked, @@ -495,7 +495,7 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo zap.String("fallback_hash", fbH.StringLE())) success = true - return until, nil + return InvokeRes{Hash: mainH, VUB: until}, nil } func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabetList keys.PublicKeys) ([]actor.SignerAccount, error) { diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go index 1e091936f..be4c09182 100644 --- a/pkg/morph/client/static.go +++ b/pkg/morph/client/static.go @@ -129,7 +129,8 @@ func (i *InvokePrmOptional) SetVUB(v uint32) { } type InvokeRes struct { - VUB uint32 + Hash util.Uint256 + VUB uint32 } // Invoke calls Invoke method of Client with static internal script hash and fee. @@ -142,8 +143,6 @@ type InvokeRes struct { // If fee for the operation executed using specified method is customized, then StaticClient uses it. // Otherwise, default fee is used. func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, error) { - var res InvokeRes - var err error var vubP *uint32 if s.tryNotary { if s.alpha { @@ -170,26 +169,23 @@ func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, err vubP = &prm.vub } - res.VUB, err = s.client.NotaryInvoke(ctx, s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...) - return res, err + return s.client.NotaryInvoke(ctx, s.scScriptHash, s.fee, nonce, vubP, prm.method, prm.args...) } if prm.vub > 0 { vubP = &prm.vub } - res.VUB, err = s.client.NotaryInvokeNotAlpha(ctx, s.scScriptHash, s.fee, vubP, prm.method, prm.args...) - return res, err + return s.client.NotaryInvokeNotAlpha(ctx, s.scScriptHash, s.fee, vubP, prm.method, prm.args...) } - res.VUB, err = s.client.Invoke( + return s.client.Invoke( ctx, s.scScriptHash, s.fee, prm.method, prm.args..., ) - return res, err } // TestInvokePrm groups parameters of the TestInvoke operation. From acd5babd86ec98f227a30573497af0cdcd53df87 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 14 Nov 2024 10:04:02 +0300 Subject: [PATCH 189/591] [#1496] morph: Merge `InvokeRes` and `WaitParams` Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/morph.go | 2 +- pkg/morph/client/waiter.go | 10 ++-------- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 4e33ad6e2..67d2d1c06 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -161,7 +161,7 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error } func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error { - if err := c.cfgMorph.client.WaitTxHalt(ctx, client.WaitParams{Hash: tx, ValidUntilBlock: vub}); err != nil { + if err := c.cfgMorph.client.WaitTxHalt(ctx, client.InvokeRes{Hash: tx, VUB: vub}); err != nil { return err } diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go index 8211deac4..962ec1bc2 100644 --- a/pkg/morph/client/waiter.go +++ b/pkg/morph/client/waiter.go @@ -11,12 +11,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" ) -// WaitParams represents transaction to wait for. -type WaitParams struct { - Hash util.Uint256 - ValidUntilBlock uint32 -} - type waiterClient struct { c *Client } @@ -39,13 +33,13 @@ func (w *waiterClient) GetVersion() (*result.Version, error) { // WaitTxHalt waits until transaction with the specified hash persists on the blockchain. // It also checks execution result to finish in HALT state. -func (c *Client) WaitTxHalt(ctx context.Context, p WaitParams) error { +func (c *Client) WaitTxHalt(ctx context.Context, p InvokeRes) error { w, err := waiter.NewPollingBased(&waiterClient{c: c}) if err != nil { return fmt.Errorf("create tx waiter: %w", err) } - res, err := w.WaitAny(ctx, p.ValidUntilBlock, p.Hash) + res, err := w.WaitAny(ctx, p.VUB, p.Hash) if err != nil { return fmt.Errorf("wait until tx persists: %w", err) } From d82f0d192691f33eb63e2d71beac433a4232b163 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 14 Nov 2024 15:09:42 +0300 Subject: [PATCH 190/591] [#1496] node/control: Await until SetNetmapStatus() persists Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/netmap.go | 7 +++++-- pkg/morph/client/netmap/peer.go | 4 ++-- pkg/morph/client/netmap/update_state.go | 9 ++------- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 6df947954..9127d1123 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -421,8 +421,11 @@ func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient. prm.SetKey(c.key.PublicKey().Bytes()) stateSetter(&prm) - _, err := c.cfgNetmap.wrapper.UpdatePeerState(ctx, prm) - return err + res, err := c.cfgNetmap.wrapper.UpdatePeerState(ctx, prm) + if err != nil { + return err + } + return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res) } type netInfo struct { diff --git a/pkg/morph/client/netmap/peer.go b/pkg/morph/client/netmap/peer.go index 9617d018c..949e8cb63 100644 --- a/pkg/morph/client/netmap/peer.go +++ b/pkg/morph/client/netmap/peer.go @@ -58,9 +58,9 @@ func (c *Client) ForceRemovePeer(ctx context.Context, nodeInfo netmap.NodeInfo, prm.SetControlTX(true) prm.SetVUB(vub) - vub, err := c.UpdatePeerState(ctx, prm) + res, err := c.UpdatePeerState(ctx, prm) if err != nil { return 0, fmt.Errorf("updating peer state: %v", err) } - return vub, nil + return res.VUB, nil } diff --git a/pkg/morph/client/netmap/update_state.go b/pkg/morph/client/netmap/update_state.go index 971a55d33..f9f639c19 100644 --- a/pkg/morph/client/netmap/update_state.go +++ b/pkg/morph/client/netmap/update_state.go @@ -2,7 +2,6 @@ package netmap import ( "context" - "fmt" "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -37,7 +36,7 @@ func (u *UpdatePeerPrm) SetMaintenance() { } // UpdatePeerState changes peer status through Netmap contract call. -func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (uint32, error) { +func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (client.InvokeRes, error) { method := updateStateMethod if c.client.WithNotary() && c.client.IsAlpha() { @@ -56,9 +55,5 @@ func (c *Client) UpdatePeerState(ctx context.Context, p UpdatePeerPrm) (uint32, prm.SetArgs(int64(p.state), p.key) prm.InvokePrmOptional = p.InvokePrmOptional - res, err := c.client.Invoke(ctx, prm) - if err != nil { - return 0, fmt.Errorf("could not invoke smart contract: %w", err) - } - return res.VUB, nil + return c.client.Invoke(ctx, prm) } From 9f4ce600ac98bd34bb097b73d45b148ec4f75b26 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 19 Nov 2024 16:25:16 +0300 Subject: [PATCH 191/591] [#1505] adm: Allow to manage additional keys in frostfsid Signed-off-by: Evgenii Stratonikov --- .../morph/frostfsid/additional_keys.go | 83 +++++++++++++++++++ .../internal/modules/morph/frostfsid/root.go | 2 + 2 files changed, 85 insertions(+) create mode 100644 cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go new file mode 100644 index 000000000..4046e85e3 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/additional_keys.go @@ -0,0 +1,83 @@ +package frostfsid + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + frostfsidAddSubjectKeyCmd = &cobra.Command{ + Use: "add-subject-key", + Short: "Add a public key to the subject in frostfsid contract", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + }, + Run: frostfsidAddSubjectKey, + } + frostfsidRemoveSubjectKeyCmd = &cobra.Command{ + Use: "remove-subject-key", + Short: "Remove a public key from the subject in frostfsid contract", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + }, + Run: frostfsidRemoveSubjectKey, + } +) + +func initFrostfsIDAddSubjectKeyCmd() { + Cmd.AddCommand(frostfsidAddSubjectKeyCmd) + + ff := frostfsidAddSubjectKeyCmd.Flags() + ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + + ff.String(subjectAddressFlag, "", "Subject address") + _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag) + + ff.String(subjectKeyFlag, "", "Public key to add") + _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag) +} + +func initFrostfsIDRemoveSubjectKeyCmd() { + Cmd.AddCommand(frostfsidRemoveSubjectKeyCmd) + + ff := frostfsidRemoveSubjectKeyCmd.Flags() + ff.StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + ff.String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + + ff.String(subjectAddressFlag, "", "Subject address") + _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectAddressFlag) + + ff.String(subjectKeyFlag, "", "Public key to remove") + _ = frostfsidAddSubjectKeyCmd.MarkFlagRequired(subjectKeyFlag) +} + +func frostfsidAddSubjectKey(cmd *cobra.Command, _ []string) { + addr := getFrostfsIDSubjectAddress(cmd) + pub := getFrostfsIDSubjectKey(cmd) + + ffsid, err := newFrostfsIDClient(cmd) + commonCmd.ExitOnErr(cmd, "init contract client: %w", err) + + ffsid.addCall(ffsid.roCli.AddSubjectKeyCall(addr, pub)) + + err = ffsid.sendWait() + commonCmd.ExitOnErr(cmd, "add subject key: %w", err) +} + +func frostfsidRemoveSubjectKey(cmd *cobra.Command, _ []string) { + addr := getFrostfsIDSubjectAddress(cmd) + pub := getFrostfsIDSubjectKey(cmd) + + ffsid, err := newFrostfsIDClient(cmd) + commonCmd.ExitOnErr(cmd, "init contract client: %w", err) + + ffsid.addCall(ffsid.roCli.RemoveSubjectKeyCall(addr, pub)) + + err = ffsid.sendWait() + commonCmd.ExitOnErr(cmd, "remove subject key: %w", err) +} diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go index 850474794..6ffcaa487 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go @@ -12,4 +12,6 @@ func init() { initFrostfsIDAddSubjectToGroupCmd() initFrostfsIDRemoveSubjectFromGroupCmd() initFrostfsIDListGroupSubjectsCmd() + initFrostfsIDAddSubjectKeyCmd() + initFrostfsIDRemoveSubjectKeyCmd() } From e2cb0640f1236ea1874daeb18472ed77da30df90 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 18 Nov 2024 12:19:13 +0300 Subject: [PATCH 192/591] [#1501] util: Move eACL-to-APE converter to `pkg/util` * `ConvertEACLToAPE` is useful method which couldn't be imported out of frostfs-node so far as it has been in `internal` * Since `ConvertEACLToAPE` and related structures and unit-tests are placed in `pkg/util` Signed-off-by: Airat Arifullin --- cmd/frostfs-cli/modules/util/convert_eacl.go | 2 +- {internal => pkg/util}/ape/converter.go | 0 {internal => pkg/util}/ape/converter_test.go | 0 3 files changed, 1 insertion(+), 1 deletion(-) rename {internal => pkg/util}/ape/converter.go (100%) rename {internal => pkg/util}/ape/converter_test.go (100%) diff --git a/cmd/frostfs-cli/modules/util/convert_eacl.go b/cmd/frostfs-cli/modules/util/convert_eacl.go index d588ba35d..caa6dfcfe 100644 --- a/cmd/frostfs-cli/modules/util/convert_eacl.go +++ b/cmd/frostfs-cli/modules/util/convert_eacl.go @@ -6,7 +6,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape" + apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "github.com/spf13/cobra" ) diff --git a/internal/ape/converter.go b/pkg/util/ape/converter.go similarity index 100% rename from internal/ape/converter.go rename to pkg/util/ape/converter.go diff --git a/internal/ape/converter_test.go b/pkg/util/ape/converter_test.go similarity index 100% rename from internal/ape/converter_test.go rename to pkg/util/ape/converter_test.go From ae31ef36029e0f8d6e7934faa5ccfd9a32d2a42a Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 18 Nov 2024 12:37:55 +0300 Subject: [PATCH 193/591] [#1501] cli: Move `PrintHumanReadableAPEChain` to a common package * Both `frostfs-cli` and `frostfs-adm` APE-related subcommands use `PrintHumanReadableAPEChain` to print a parsed APE-chain. So, it's more correct to have it in a common package over `frostfs-cli` and `frostfs-adm` folders. Signed-off-by: Airat Arifullin --- .../internal/modules/morph/ape/ape.go | 4 +- .../internal/modules/morph/ape/ape_util.go | 3 +- .../modules/ape_manager/add_chain.go | 3 +- .../modules/ape_manager/list_chain.go | 4 +- .../modules/bearer/generate_override.go | 3 +- cmd/frostfs-cli/modules/control/add_rule.go | 3 +- cmd/frostfs-cli/modules/control/get_rule.go | 4 +- cmd/frostfs-cli/modules/control/list_rules.go | 4 +- cmd/frostfs-cli/modules/util/ape.go | 34 --------------- cmd/internal/common/ape/commands.go | 41 +++++++++++++++++++ 10 files changed, 57 insertions(+), 46 deletions(-) create mode 100644 cmd/internal/common/ape/commands.go diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go index 077e03737..fb363f903 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go @@ -5,8 +5,8 @@ import ( "encoding/json" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/spf13/cobra" @@ -210,7 +210,7 @@ func listRuleChains(cmd *cobra.Command, _ []string) { prettyJSONFormat(cmd, chains) } else { for _, c := range chains { - parseutil.PrintHumanReadableAPEChain(cmd, c) + apeCmd.PrintHumanReadableAPEChain(cmd, c) } } } diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go index f4373c535..df358ff69 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go @@ -8,6 +8,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" morph "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy" @@ -90,7 +91,7 @@ func parseChain(cmd *cobra.Command) *apechain.Chain { chain.ID = parseChainID(cmd) cmd.Println("Parsed chain:") - parseutil.PrintHumanReadableAPEChain(cmd, chain) + apeCmd.PrintHumanReadableAPEChain(cmd, chain) return chain } diff --git a/cmd/frostfs-cli/modules/ape_manager/add_chain.go b/cmd/frostfs-cli/modules/ape_manager/add_chain.go index a85f3c93e..d9cfc304c 100644 --- a/cmd/frostfs-cli/modules/ape_manager/add_chain.go +++ b/cmd/frostfs-cli/modules/ape_manager/add_chain.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -95,7 +96,7 @@ func parseChain(cmd *cobra.Command) apeSDK.Chain { } cmd.Println("Parsed chain:") - util.PrintHumanReadableAPEChain(cmd, chain) + apeCmd.PrintHumanReadableAPEChain(cmd, chain) serialized := chain.Bytes() return apeSDK.Chain{ diff --git a/cmd/frostfs-cli/modules/ape_manager/list_chain.go b/cmd/frostfs-cli/modules/ape_manager/list_chain.go index a5dd44614..9955c8c03 100644 --- a/cmd/frostfs-cli/modules/ape_manager/list_chain.go +++ b/cmd/frostfs-cli/modules/ape_manager/list_chain.go @@ -4,8 +4,8 @@ import ( internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "github.com/spf13/cobra" @@ -35,7 +35,7 @@ func list(cmd *cobra.Command, _ []string) { for _, respChain := range resp.Chains { var chain apechain.Chain commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(respChain.Raw)) - apeutil.PrintHumanReadableAPEChain(cmd, &chain) + apeCmd.PrintHumanReadableAPEChain(cmd, &chain) } } diff --git a/cmd/frostfs-cli/modules/bearer/generate_override.go b/cmd/frostfs-cli/modules/bearer/generate_override.go index 482c0027e..31dbdbdbd 100644 --- a/cmd/frostfs-cli/modules/bearer/generate_override.go +++ b/cmd/frostfs-cli/modules/bearer/generate_override.go @@ -8,6 +8,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -109,7 +110,7 @@ func parseChain(cmd *cobra.Command) *apechain.Chain { chain.ID = parseChainID(cmd) cmd.Println("Parsed chain:") - parseutil.PrintHumanReadableAPEChain(cmd, chain) + apeCmd.PrintHumanReadableAPEChain(cmd, chain) return chain } diff --git a/cmd/frostfs-cli/modules/control/add_rule.go b/cmd/frostfs-cli/modules/control/add_rule.go index c648377bd..88291f0ac 100644 --- a/cmd/frostfs-cli/modules/control/add_rule.go +++ b/cmd/frostfs-cli/modules/control/add_rule.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" @@ -55,7 +56,7 @@ func parseChain(cmd *cobra.Command) *apechain.Chain { } cmd.Println("Parsed chain:") - util.PrintHumanReadableAPEChain(cmd, chain) + apeCmd.PrintHumanReadableAPEChain(cmd, chain) return chain } diff --git a/cmd/frostfs-cli/modules/control/get_rule.go b/cmd/frostfs-cli/modules/control/get_rule.go index 4b4d6eef5..9bbbb02f7 100644 --- a/cmd/frostfs-cli/modules/control/get_rule.go +++ b/cmd/frostfs-cli/modules/control/get_rule.go @@ -4,8 +4,8 @@ import ( "encoding/hex" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" @@ -56,7 +56,7 @@ func getRule(cmd *cobra.Command, _ []string) { var chain apechain.Chain commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(resp.GetBody().GetChain())) - util.PrintHumanReadableAPEChain(cmd, &chain) + apeCmd.PrintHumanReadableAPEChain(cmd, &chain) } func initControGetRuleCmd() { diff --git a/cmd/frostfs-cli/modules/control/list_rules.go b/cmd/frostfs-cli/modules/control/list_rules.go index 7162df5e0..a7b9f9ef5 100644 --- a/cmd/frostfs-cli/modules/control/list_rules.go +++ b/cmd/frostfs-cli/modules/control/list_rules.go @@ -6,8 +6,8 @@ import ( "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -117,7 +117,7 @@ func listRules(cmd *cobra.Command, _ []string) { for _, c := range chains { var chain apechain.Chain commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(c)) - util.PrintHumanReadableAPEChain(cmd, &chain) + apeCmd.PrintHumanReadableAPEChain(cmd, &chain) } } diff --git a/cmd/frostfs-cli/modules/util/ape.go b/cmd/frostfs-cli/modules/util/ape.go index 73c368510..0e963c0a3 100644 --- a/cmd/frostfs-cli/modules/util/ape.go +++ b/cmd/frostfs-cli/modules/util/ape.go @@ -4,13 +4,11 @@ import ( "errors" "fmt" "os" - "strconv" "strings" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" "github.com/flynn-archive/go-shlex" - "github.com/spf13/cobra" ) var ( @@ -27,38 +25,6 @@ var ( errFailedToParseAllAny = errors.New("any/all is not parsed") ) -// PrintHumanReadableAPEChain print APE chain rules. -func PrintHumanReadableAPEChain(cmd *cobra.Command, chain *apechain.Chain) { - cmd.Println("Chain ID: " + string(chain.ID)) - cmd.Printf(" HEX: %x\n", chain.ID) - cmd.Println("Rules:") - for _, rule := range chain.Rules { - cmd.Println("\n\tStatus: " + rule.Status.String()) - cmd.Println("\tAny: " + strconv.FormatBool(rule.Any)) - cmd.Println("\tConditions:") - for _, c := range rule.Condition { - var ot string - switch c.Kind { - case apechain.KindResource: - ot = "Resource" - case apechain.KindRequest: - ot = "Request" - default: - panic("unknown object type") - } - cmd.Println(fmt.Sprintf("\t\t%s %s %s %s", ot, c.Key, c.Op, c.Value)) - } - cmd.Println("\tActions:\tInverted:" + strconv.FormatBool(rule.Actions.Inverted)) - for _, name := range rule.Actions.Names { - cmd.Println("\t\t" + name) - } - cmd.Println("\tResources:\tInverted:" + strconv.FormatBool(rule.Resources.Inverted)) - for _, name := range rule.Resources.Names { - cmd.Println("\t\t" + name) - } - } -} - func ParseAPEChainBinaryOrJSON(chain *apechain.Chain, path string) error { data, err := os.ReadFile(path) if err != nil { diff --git a/cmd/internal/common/ape/commands.go b/cmd/internal/common/ape/commands.go new file mode 100644 index 000000000..a8f50b29e --- /dev/null +++ b/cmd/internal/common/ape/commands.go @@ -0,0 +1,41 @@ +package ape + +import ( + "fmt" + "strconv" + + apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" + "github.com/spf13/cobra" +) + +// PrintHumanReadableAPEChain print APE chain rules. +func PrintHumanReadableAPEChain(cmd *cobra.Command, chain *apechain.Chain) { + cmd.Println("Chain ID: " + string(chain.ID)) + cmd.Printf(" HEX: %x\n", chain.ID) + cmd.Println("Rules:") + for _, rule := range chain.Rules { + cmd.Println("\n\tStatus: " + rule.Status.String()) + cmd.Println("\tAny: " + strconv.FormatBool(rule.Any)) + cmd.Println("\tConditions:") + for _, c := range rule.Condition { + var ot string + switch c.Kind { + case apechain.KindResource: + ot = "Resource" + case apechain.KindRequest: + ot = "Request" + default: + panic("unknown object type") + } + cmd.Println(fmt.Sprintf("\t\t%s %s %s %s", ot, c.Key, c.Op, c.Value)) + } + cmd.Println("\tActions:\tInverted:" + strconv.FormatBool(rule.Actions.Inverted)) + for _, name := range rule.Actions.Names { + cmd.Println("\t\t" + name) + } + cmd.Println("\tResources:\tInverted:" + strconv.FormatBool(rule.Resources.Inverted)) + for _, name := range rule.Resources.Names { + cmd.Println("\t\t" + name) + } + } +} From ffe9906266044ca3d8a1f2baf6256490a787cada Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 18 Nov 2024 12:54:02 +0300 Subject: [PATCH 194/591] [#1501] cli: Move APE-chain parser methods to `pkg/util` Signed-off-by: Airat Arifullin --- cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go | 2 +- cmd/frostfs-cli/modules/ape_manager/add_chain.go | 6 +++--- cmd/frostfs-cli/modules/bearer/generate_override.go | 2 +- cmd/frostfs-cli/modules/control/add_rule.go | 6 +++--- .../modules/util/ape.go => pkg/util/ape/parser.go | 2 +- .../modules/util/ape_test.go => pkg/util/ape/parser_test.go | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) rename cmd/frostfs-cli/modules/util/ape.go => pkg/util/ape/parser.go (99%) rename cmd/frostfs-cli/modules/util/ape_test.go => pkg/util/ape/parser_test.go (99%) diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go index df358ff69..1a70dd4a2 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go @@ -6,9 +6,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" - parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" + parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" morph "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy" diff --git a/cmd/frostfs-cli/modules/ape_manager/add_chain.go b/cmd/frostfs-cli/modules/ape_manager/add_chain.go index d9cfc304c..7a49dee68 100644 --- a/cmd/frostfs-cli/modules/ape_manager/add_chain.go +++ b/cmd/frostfs-cli/modules/ape_manager/add_chain.go @@ -7,9 +7,9 @@ import ( internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" + parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -88,9 +88,9 @@ func parseChain(cmd *cobra.Command) apeSDK.Chain { chain.ID = apechain.ID(chainIDRaw) if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 { - commonCmd.ExitOnErr(cmd, "parser error: %w", util.ParseAPEChain(chain, rules)) + commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules)) } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" { - commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", util.ParseAPEChainBinaryOrJSON(chain, encPath)) + commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath)) } else { commonCmd.ExitOnErr(cmd, "parser error: %w", errors.New("rule is not passed")) } diff --git a/cmd/frostfs-cli/modules/bearer/generate_override.go b/cmd/frostfs-cli/modules/bearer/generate_override.go index 31dbdbdbd..c3df0294e 100644 --- a/cmd/frostfs-cli/modules/bearer/generate_override.go +++ b/cmd/frostfs-cli/modules/bearer/generate_override.go @@ -6,9 +6,9 @@ import ( "os" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" - parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" + parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" diff --git a/cmd/frostfs-cli/modules/control/add_rule.go b/cmd/frostfs-cli/modules/control/add_rule.go index 88291f0ac..25960f439 100644 --- a/cmd/frostfs-cli/modules/control/add_rule.go +++ b/cmd/frostfs-cli/modules/control/add_rule.go @@ -5,10 +5,10 @@ import ( "errors" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/util" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "github.com/spf13/cobra" @@ -48,9 +48,9 @@ func parseChain(cmd *cobra.Command) *apechain.Chain { chain.ID = apechain.ID(chainIDRaw) if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 { - commonCmd.ExitOnErr(cmd, "parser error: %w", util.ParseAPEChain(chain, rules)) + commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules)) } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" { - commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", util.ParseAPEChainBinaryOrJSON(chain, encPath)) + commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath)) } else { commonCmd.ExitOnErr(cmd, "parser error", errors.New("rule is not passed")) } diff --git a/cmd/frostfs-cli/modules/util/ape.go b/pkg/util/ape/parser.go similarity index 99% rename from cmd/frostfs-cli/modules/util/ape.go rename to pkg/util/ape/parser.go index 0e963c0a3..b4a31fd8d 100644 --- a/cmd/frostfs-cli/modules/util/ape.go +++ b/pkg/util/ape/parser.go @@ -1,4 +1,4 @@ -package util +package ape import ( "errors" diff --git a/cmd/frostfs-cli/modules/util/ape_test.go b/pkg/util/ape/parser_test.go similarity index 99% rename from cmd/frostfs-cli/modules/util/ape_test.go rename to pkg/util/ape/parser_test.go index b275803df..21649fd24 100644 --- a/cmd/frostfs-cli/modules/util/ape_test.go +++ b/pkg/util/ape/parser_test.go @@ -1,4 +1,4 @@ -package util +package ape import ( "fmt" From 7a7ee71a4d076325306e1bae754bdb8c40f5f026 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 18 Nov 2024 13:12:22 +0300 Subject: [PATCH 195/591] [#1501] cmd: Introduce common APE-chain parser commands * Introduce common parsing commands to use them in `frostfs-cli` and `frostfs-adm` APE-related subcommands * Introduce common flags for these parsing commands Signed-off-by: Airat Arifullin --- cmd/internal/common/ape/commands.go | 125 ++++++++++++++++++++++++++++ cmd/internal/common/ape/flags.go | 19 +++++ 2 files changed, 144 insertions(+) create mode 100644 cmd/internal/common/ape/flags.go diff --git a/cmd/internal/common/ape/commands.go b/cmd/internal/common/ape/commands.go index a8f50b29e..354747330 100644 --- a/cmd/internal/common/ape/commands.go +++ b/cmd/internal/common/ape/commands.go @@ -1,13 +1,43 @@ package ape import ( + "encoding/hex" + "errors" "fmt" "strconv" + "strings" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" + "github.com/nspcc-dev/neo-go/cli/input" "github.com/spf13/cobra" ) +const ( + defaultNamespace = "root" + namespaceTarget = "namespace" + containerTarget = "container" + userTarget = "user" + groupTarget = "group" + + Ingress = "ingress" + S3 = "s3" +) + +var mChainName = map[string]apechain.Name{ + Ingress: apechain.Ingress, + S3: apechain.S3, +} + +var ( + errSettingDefaultValueWasDeclined = errors.New("setting default value was declined") + errUnknownTargetType = errors.New("unknown target type") + errUnsupportedChainName = errors.New("unsupported chain name") +) + // PrintHumanReadableAPEChain print APE chain rules. func PrintHumanReadableAPEChain(cmd *cobra.Command, chain *apechain.Chain) { cmd.Println("Chain ID: " + string(chain.ID)) @@ -39,3 +69,98 @@ func PrintHumanReadableAPEChain(cmd *cobra.Command, chain *apechain.Chain) { } } } + +// ParseTarget handles target parsing of an APE chain. +func ParseTarget(cmd *cobra.Command) engine.Target { + typ := ParseTargetType(cmd) + name, _ := cmd.Flags().GetString(TargetNameFlag) + switch typ { + case engine.Namespace: + if name == "" { + ln, err := input.ReadLine(fmt.Sprintf("Target name is not set. Confirm to use %s namespace (n|Y)> ", defaultNamespace)) + commonCmd.ExitOnErr(cmd, "read line error: %w", err) + ln = strings.ToLower(ln) + if len(ln) > 0 && (ln[0] == 'n') { + commonCmd.ExitOnErr(cmd, "read namespace error: %w", errSettingDefaultValueWasDeclined) + } + name = defaultNamespace + } + return engine.NamespaceTarget(name) + case engine.Container: + var cnr cid.ID + commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name)) + return engine.ContainerTarget(name) + case engine.User: + return engine.UserTarget(name) + case engine.Group: + return engine.GroupTarget(name) + default: + commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType) + } + return engine.Target{} +} + +// ParseTargetType handles target type parsing of an APE chain. +func ParseTargetType(cmd *cobra.Command) engine.TargetType { + typ, _ := cmd.Flags().GetString(TargetTypeFlag) + switch typ { + case namespaceTarget: + return engine.Namespace + case containerTarget: + return engine.Container + case userTarget: + return engine.User + case groupTarget: + return engine.Group + } + commonCmd.ExitOnErr(cmd, "parse target type error: %w", errUnknownTargetType) + return engine.TargetType(0) +} + +// ParseChainID handles the parsing of APE-chain identifier. +// For some subcommands, chain ID is optional as an input parameter and should be generated by +// the service instead. +func ParseChainID(cmd *cobra.Command) (id apechain.ID) { + chainID, _ := cmd.Flags().GetString(ChainIDFlag) + id = apechain.ID(chainID) + + hexEncoded, _ := cmd.Flags().GetBool(ChainIDHexFlag) + if !hexEncoded { + return + } + + chainIDRaw, err := hex.DecodeString(chainID) + commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err) + id = apechain.ID(chainIDRaw) + return +} + +// ParseChain parses an APE chain which can be provided either as a rule statement +// or loaded from a binary/JSON file path. +func ParseChain(cmd *cobra.Command) *apechain.Chain { + chain := new(apechain.Chain) + chain.ID = ParseChainID(cmd) + + if rules, _ := cmd.Flags().GetStringArray(RuleFlag); len(rules) > 0 { + commonCmd.ExitOnErr(cmd, "parser error: %w", apeutil.ParseAPEChain(chain, rules)) + } else if encPath, _ := cmd.Flags().GetString(PathFlag); encPath != "" { + commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", apeutil.ParseAPEChainBinaryOrJSON(chain, encPath)) + } else { + commonCmd.ExitOnErr(cmd, "parser error", errors.New("rule is not passed")) + } + + cmd.Println("Parsed chain:") + PrintHumanReadableAPEChain(cmd, chain) + + return chain +} + +// ParseChainName parses chain name: the place in the request lifecycle where policy is applied. +func ParseChainName(cmd *cobra.Command) apechain.Name { + chainName, _ := cmd.Flags().GetString(ChainNameFlag) + apeChainName, ok := mChainName[strings.ToLower(chainName)] + if !ok { + commonCmd.ExitOnErr(cmd, "", errUnsupportedChainName) + } + return apeChainName +} diff --git a/cmd/internal/common/ape/flags.go b/cmd/internal/common/ape/flags.go new file mode 100644 index 000000000..c5e2a3a99 --- /dev/null +++ b/cmd/internal/common/ape/flags.go @@ -0,0 +1,19 @@ +package ape + +const ( + RuleFlag = "rule" + RuleFlagDesc = "Rule statement" + PathFlag = "path" + PathFlagDesc = "Path to encoded chain in JSON or binary format" + TargetNameFlag = "target-name" + TargetNameFlagDesc = "Resource name in APE resource name format" + TargetTypeFlag = "target-type" + TargetTypeFlagDesc = "Resource type(container/namespace)" + ChainIDFlag = "chain-id" + ChainIDFlagDesc = "Chain id" + ChainIDHexFlag = "chain-id-hex" + ChainIDHexFlagDesc = "Flag to parse chain ID as hex" + ChainNameFlag = "chain-name" + ChainNameFlagDesc = "Chain name(ingress|s3)" + AllFlag = "all" +) From daff77b2737cdf2c5257cac608794e87c67ebb60 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 18 Nov 2024 13:33:47 +0300 Subject: [PATCH 196/591] [#1501] cli: Refactor local override managing subcommands * Refactor local override managing subcommands * Use `cmd/internal/common/ape` parser commands within local override subcommands * Use flag names from `cmd/internal/common/ape` Signed-off-by: Airat Arifullin --- cmd/frostfs-cli/modules/control/add_rule.go | 57 ++----------- cmd/frostfs-cli/modules/control/get_rule.go | 18 ++-- cmd/frostfs-cli/modules/control/list_rules.go | 82 +++++-------------- .../modules/control/list_targets.go | 20 ++--- .../modules/control/remove_rule.go | 27 +++--- 5 files changed, 54 insertions(+), 150 deletions(-) diff --git a/cmd/frostfs-cli/modules/control/add_rule.go b/cmd/frostfs-cli/modules/control/add_rule.go index 25960f439..42f229ad9 100644 --- a/cmd/frostfs-cli/modules/control/add_rule.go +++ b/cmd/frostfs-cli/modules/control/add_rule.go @@ -1,24 +1,14 @@ package control import ( - "encoding/hex" - "errors" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "github.com/spf13/cobra" ) -const ( - ruleFlag = "rule" - pathFlag = "path" -) - var addRuleCmd = &cobra.Command{ Use: "add-rule", Short: "Add local override", @@ -32,41 +22,12 @@ control add-rule --endpoint ... -w ... --address ... --chain-id ChainID --cid .. Run: addRule, } -func parseChain(cmd *cobra.Command) *apechain.Chain { - chainID, _ := cmd.Flags().GetString(chainIDFlag) - hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag) - - chainIDRaw := []byte(chainID) - - if hexEncoded { - var err error - chainIDRaw, err = hex.DecodeString(chainID) - commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err) - } - - chain := new(apechain.Chain) - chain.ID = apechain.ID(chainIDRaw) - - if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 { - commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules)) - } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" { - commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath)) - } else { - commonCmd.ExitOnErr(cmd, "parser error", errors.New("rule is not passed")) - } - - cmd.Println("Parsed chain:") - apeCmd.PrintHumanReadableAPEChain(cmd, chain) - - return chain -} - func addRule(cmd *cobra.Command, _ []string) { pk := key.Get(cmd) target := parseTarget(cmd) - parsed := parseChain(cmd) + parsed := apeCmd.ParseChain(cmd) req := &control.AddChainLocalOverrideRequest{ Body: &control.AddChainLocalOverrideRequest_Body{ @@ -95,13 +56,13 @@ func initControlAddRuleCmd() { initControlFlags(addRuleCmd) ff := addRuleCmd.Flags() - ff.StringArray(ruleFlag, []string{}, "Rule statement") - ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format") - ff.String(chainIDFlag, "", "Assign ID to the parsed chain") - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = addRuleCmd.MarkFlagRequired(targetTypeFlag) - ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex") + ff.StringArray(apeCmd.RuleFlag, []string{}, "Rule statement") + ff.String(apeCmd.PathFlag, "", "Path to encoded chain in JSON or binary format") + ff.String(apeCmd.ChainIDFlag, "", "Assign ID to the parsed chain") + ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) + ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = addRuleCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) + ff.Bool(apeCmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex") - addRuleCmd.MarkFlagsMutuallyExclusive(pathFlag, ruleFlag) + addRuleCmd.MarkFlagsMutuallyExclusive(apeCmd.PathFlag, apeCmd.RuleFlag) } diff --git a/cmd/frostfs-cli/modules/control/get_rule.go b/cmd/frostfs-cli/modules/control/get_rule.go index 9bbbb02f7..4da903a9a 100644 --- a/cmd/frostfs-cli/modules/control/get_rule.go +++ b/cmd/frostfs-cli/modules/control/get_rule.go @@ -5,7 +5,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" + apecmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" @@ -24,8 +24,8 @@ func getRule(cmd *cobra.Command, _ []string) { target := parseTarget(cmd) - chainID, _ := cmd.Flags().GetString(chainIDFlag) - hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag) + chainID, _ := cmd.Flags().GetString(apecmd.ChainIDFlag) + hexEncoded, _ := cmd.Flags().GetBool(apecmd.ChainIDHexFlag) if hexEncoded { chainIDBytes, err := hex.DecodeString(chainID) @@ -56,16 +56,16 @@ func getRule(cmd *cobra.Command, _ []string) { var chain apechain.Chain commonCmd.ExitOnErr(cmd, "decode error: %w", chain.DecodeBytes(resp.GetBody().GetChain())) - apeCmd.PrintHumanReadableAPEChain(cmd, &chain) + apecmd.PrintHumanReadableAPEChain(cmd, &chain) } func initControGetRuleCmd() { initControlFlags(getRuleCmd) ff := getRuleCmd.Flags() - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = getRuleCmd.MarkFlagRequired(targetTypeFlag) - ff.String(chainIDFlag, "", "Chain id") - ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex") + ff.String(apecmd.TargetNameFlag, "", apecmd.TargetNameFlagDesc) + ff.String(apecmd.TargetTypeFlag, "", apecmd.TargetTypeFlagDesc) + _ = getRuleCmd.MarkFlagRequired(apecmd.TargetTypeFlag) + ff.String(apecmd.ChainIDFlag, "", "Chain id") + ff.Bool(apecmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex") } diff --git a/cmd/frostfs-cli/modules/control/list_rules.go b/cmd/frostfs-cli/modules/control/list_rules.go index a7b9f9ef5..a6c65d083 100644 --- a/cmd/frostfs-cli/modules/control/list_rules.go +++ b/cmd/frostfs-cli/modules/control/list_rules.go @@ -1,7 +1,6 @@ package control import ( - "errors" "fmt" "strings" @@ -10,9 +9,8 @@ import ( apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "github.com/nspcc-dev/neo-go/cli/input" + policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" "github.com/spf13/cobra" ) @@ -23,65 +21,25 @@ var listRulesCmd = &cobra.Command{ Run: listRules, } -const ( - defaultNamespace = "root" - namespaceTarget = "namespace" - containerTarget = "container" - userTarget = "user" - groupTarget = "group" -) - -const ( - targetNameFlag = "target-name" - targetNameDesc = "Resource name in APE resource name format" - targetTypeFlag = "target-type" - targetTypeDesc = "Resource type(container/namespace)" -) - -var ( - errSettingDefaultValueWasDeclined = errors.New("setting default value was declined") - errUnknownTargetType = errors.New("unknown target type") -) +var engineToControlSvcType = map[policyengine.TargetType]control.ChainTarget_TargetType{ + policyengine.Namespace: control.ChainTarget_NAMESPACE, + policyengine.Container: control.ChainTarget_CONTAINER, + policyengine.User: control.ChainTarget_USER, + policyengine.Group: control.ChainTarget_GROUP, +} func parseTarget(cmd *cobra.Command) *control.ChainTarget { - typ, _ := cmd.Flags().GetString(targetTypeFlag) - name, _ := cmd.Flags().GetString(targetNameFlag) - switch typ { - case namespaceTarget: - if name == "" { - ln, err := input.ReadLine(fmt.Sprintf("Target name is not set. Confirm to use %s namespace (n|Y)> ", defaultNamespace)) - commonCmd.ExitOnErr(cmd, "read line error: %w", err) - ln = strings.ToLower(ln) - if len(ln) > 0 && (ln[0] == 'n') { - commonCmd.ExitOnErr(cmd, "read namespace error: %w", errSettingDefaultValueWasDeclined) - } - name = defaultNamespace - } - return &control.ChainTarget{ - Name: name, - Type: control.ChainTarget_NAMESPACE, - } - case containerTarget: - var cnr cid.ID - commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name)) - return &control.ChainTarget{ - Name: name, - Type: control.ChainTarget_CONTAINER, - } - case userTarget: - return &control.ChainTarget{ - Name: name, - Type: control.ChainTarget_USER, - } - case groupTarget: - return &control.ChainTarget{ - Name: name, - Type: control.ChainTarget_GROUP, - } - default: - commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType) + target := apeCmd.ParseTarget(cmd) + + typ, ok := engineToControlSvcType[target.Type] + if !ok { + commonCmd.ExitOnErr(cmd, "%w", fmt.Errorf("unknown type '%c", target.Type)) + } + + return &control.ChainTarget{ + Name: target.Name, + Type: typ, } - return nil } func listRules(cmd *cobra.Command, _ []string) { @@ -125,7 +83,7 @@ func initControlListRulesCmd() { initControlFlags(listRulesCmd) ff := listRulesCmd.Flags() - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = listRulesCmd.MarkFlagRequired(targetTypeFlag) + ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) + ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = listRulesCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) } diff --git a/cmd/frostfs-cli/modules/control/list_targets.go b/cmd/frostfs-cli/modules/control/list_targets.go index 7c401eb17..8bd2dc9cd 100644 --- a/cmd/frostfs-cli/modules/control/list_targets.go +++ b/cmd/frostfs-cli/modules/control/list_targets.go @@ -2,26 +2,20 @@ package control import ( "bytes" - "crypto/sha256" "fmt" "strconv" "text/tabwriter" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/spf13/cobra" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) -const ( - chainNameFlag = "chain-name" - chainNameFlagUsage = "Chain name(ingress|s3)" -) - var listTargetsCmd = &cobra.Command{ Use: "list-targets", Short: "List local targets", @@ -32,15 +26,11 @@ var listTargetsCmd = &cobra.Command{ func listTargets(cmd *cobra.Command, _ []string) { pk := key.Get(cmd) - var cnr cid.ID - chainName, _ := cmd.Flags().GetString(chainNameFlag) - - rawCID := make([]byte, sha256.Size) - cnr.Encode(rawCID) + chainName := apeCmd.ParseChainName(cmd) req := &control.ListTargetsLocalOverridesRequest{ Body: &control.ListTargetsLocalOverridesRequest_Body{ - ChainName: chainName, + ChainName: string(chainName), }, } @@ -82,7 +72,7 @@ func initControlListTargetsCmd() { initControlFlags(listTargetsCmd) ff := listTargetsCmd.Flags() - ff.String(chainNameFlag, "", chainNameFlagUsage) + ff.String(apeCmd.ChainNameFlag, "", apeCmd.ChainNameFlagDesc) - _ = cobra.MarkFlagRequired(ff, chainNameFlag) + _ = cobra.MarkFlagRequired(ff, apeCmd.ChainNameFlag) } diff --git a/cmd/frostfs-cli/modules/control/remove_rule.go b/cmd/frostfs-cli/modules/control/remove_rule.go index a996156a5..036317bcb 100644 --- a/cmd/frostfs-cli/modules/control/remove_rule.go +++ b/cmd/frostfs-cli/modules/control/remove_rule.go @@ -6,17 +6,12 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apecmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" ) -const ( - chainIDFlag = "chain-id" - chainIDHexFlag = "chain-id-hex" - allFlag = "all" -) - var ( errEmptyChainID = errors.New("chain id cannot be empty") @@ -30,8 +25,8 @@ var ( func removeRule(cmd *cobra.Command, _ []string) { pk := key.Get(cmd) - hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag) - removeAll, _ := cmd.Flags().GetBool(allFlag) + hexEncoded, _ := cmd.Flags().GetBool(apecmd.ChainIDHexFlag) + removeAll, _ := cmd.Flags().GetBool(apecmd.AllFlag) if removeAll { req := &control.RemoveChainLocalOverridesByTargetRequest{ Body: &control.RemoveChainLocalOverridesByTargetRequest_Body{ @@ -52,7 +47,7 @@ func removeRule(cmd *cobra.Command, _ []string) { return } - chainID, _ := cmd.Flags().GetString(chainIDFlag) + chainID, _ := cmd.Flags().GetString(apecmd.ChainIDFlag) if chainID == "" { commonCmd.ExitOnErr(cmd, "read chain id error: %w", errEmptyChainID) } @@ -92,11 +87,11 @@ func initControlRemoveRuleCmd() { initControlFlags(removeRuleCmd) ff := removeRuleCmd.Flags() - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = removeRuleCmd.MarkFlagRequired(targetTypeFlag) - ff.String(chainIDFlag, "", "Chain id") - ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex") - ff.Bool(allFlag, false, "Remove all chains") - removeRuleCmd.MarkFlagsMutuallyExclusive(allFlag, chainIDFlag) + ff.String(apecmd.TargetNameFlag, "", apecmd.TargetNameFlagDesc) + ff.String(apecmd.TargetTypeFlag, "", apecmd.TargetTypeFlagDesc) + _ = removeRuleCmd.MarkFlagRequired(apecmd.TargetTypeFlag) + ff.String(apecmd.ChainIDFlag, "", apecmd.ChainIDFlagDesc) + ff.Bool(apecmd.ChainIDHexFlag, false, apecmd.ChainIDHexFlagDesc) + ff.Bool(apecmd.AllFlag, false, "Remove all chains") + removeRuleCmd.MarkFlagsMutuallyExclusive(apecmd.AllFlag, apecmd.ChainIDFlag) } From 3b1364e4cf9276b944b9a48610a11989e74aa75b Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 18 Nov 2024 14:01:27 +0300 Subject: [PATCH 197/591] [#1501] cli: Refactor ape-manager subcommands * Refactor ape-manager subcommands * Use `cmd/internal/common/ape` parser commands within ape-manager subcommands * Use flag names from `cmd/internal/common/ape` Signed-off-by: Airat Arifullin --- .../modules/ape_manager/add_chain.go | 93 ++++--------------- .../modules/ape_manager/list_chain.go | 6 +- .../modules/ape_manager/remove_chain.go | 47 ++++------ 3 files changed, 39 insertions(+), 107 deletions(-) diff --git a/cmd/frostfs-cli/modules/ape_manager/add_chain.go b/cmd/frostfs-cli/modules/ape_manager/add_chain.go index 7a49dee68..f4039283f 100644 --- a/cmd/frostfs-cli/modules/ape_manager/add_chain.go +++ b/cmd/frostfs-cli/modules/ape_manager/add_chain.go @@ -1,45 +1,19 @@ package apemanager import ( - "encoding/hex" - "errors" + "fmt" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" + "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" "github.com/spf13/cobra" ) -const ( - chainIDFlag = "chain-id" - chainIDHexFlag = "chain-id-hex" - ruleFlag = "rule" - pathFlag = "path" -) - -const ( - targetNameFlag = "target-name" - targetNameDesc = "Resource name in APE resource name format" - targetTypeFlag = "target-type" - targetTypeDesc = "Resource type(container/namespace)" -) - -const ( - namespaceTarget = "namespace" - containerTarget = "container" - userTarget = "user" - groupTarget = "group" -) - -var errUnknownTargetType = errors.New("unknown target type") - var addCmd = &cobra.Command{ Use: "add", Short: "Add rule chain for a target", @@ -50,55 +24,28 @@ var addCmd = &cobra.Command{ } func parseTarget(cmd *cobra.Command) (ct apeSDK.ChainTarget) { - typ, _ := cmd.Flags().GetString(targetTypeFlag) - name, _ := cmd.Flags().GetString(targetNameFlag) + t := apeCmd.ParseTarget(cmd) - ct.Name = name + ct.Name = t.Name - switch typ { - case namespaceTarget: + switch t.Type { + case engine.Namespace: ct.TargetType = apeSDK.TargetTypeNamespace - case containerTarget: - var cnr cid.ID - commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name)) + case engine.Container: ct.TargetType = apeSDK.TargetTypeContainer - case userTarget: + case engine.User: ct.TargetType = apeSDK.TargetTypeUser - case groupTarget: + case engine.Group: ct.TargetType = apeSDK.TargetTypeGroup default: - commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType) + commonCmd.ExitOnErr(cmd, "conversion error: %w", fmt.Errorf("unknown type '%c'", t.Type)) } return ct } func parseChain(cmd *cobra.Command) apeSDK.Chain { - chainID, _ := cmd.Flags().GetString(chainIDFlag) - hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag) - - chainIDRaw := []byte(chainID) - - if hexEncoded { - var err error - chainIDRaw, err = hex.DecodeString(chainID) - commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err) - } - - chain := new(apechain.Chain) - chain.ID = apechain.ID(chainIDRaw) - - if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 { - commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules)) - } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" { - commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath)) - } else { - commonCmd.ExitOnErr(cmd, "parser error: %w", errors.New("rule is not passed")) - } - - cmd.Println("Parsed chain:") - apeCmd.PrintHumanReadableAPEChain(cmd, chain) - - serialized := chain.Bytes() + c := apeCmd.ParseChain(cmd) + serialized := c.Bytes() return apeSDK.Chain{ Raw: serialized, } @@ -127,13 +74,13 @@ func initAddCmd() { commonflags.Init(addCmd) ff := addCmd.Flags() - ff.StringArray(ruleFlag, []string{}, "Rule statement") - ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format") - ff.String(chainIDFlag, "", "Assign ID to the parsed chain") - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = addCmd.MarkFlagRequired(targetTypeFlag) - ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex") + ff.StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc) + ff.String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc) + ff.String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc) + ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) + ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = addCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) + ff.Bool(apeCmd.ChainIDHexFlag, false, apeCmd.ChainIDHexFlagDesc) - addCmd.MarkFlagsMutuallyExclusive(pathFlag, ruleFlag) + addCmd.MarkFlagsMutuallyExclusive(apeCmd.PathFlag, apeCmd.RuleFlag) } diff --git a/cmd/frostfs-cli/modules/ape_manager/list_chain.go b/cmd/frostfs-cli/modules/ape_manager/list_chain.go index 9955c8c03..b07ecc52f 100644 --- a/cmd/frostfs-cli/modules/ape_manager/list_chain.go +++ b/cmd/frostfs-cli/modules/ape_manager/list_chain.go @@ -43,7 +43,7 @@ func initListCmd() { commonflags.Init(listCmd) ff := listCmd.Flags() - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = listCmd.MarkFlagRequired(targetTypeFlag) + ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) + ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = listCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) } diff --git a/cmd/frostfs-cli/modules/ape_manager/remove_chain.go b/cmd/frostfs-cli/modules/ape_manager/remove_chain.go index 179bd5c9e..136ca81c3 100644 --- a/cmd/frostfs-cli/modules/ape_manager/remove_chain.go +++ b/cmd/frostfs-cli/modules/ape_manager/remove_chain.go @@ -1,29 +1,23 @@ package apemanager import ( - "encoding/hex" - "errors" - internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" client_sdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "github.com/spf13/cobra" ) -var ( - errEmptyChainID = errors.New("chain id cannot be empty") - - removeCmd = &cobra.Command{ - Use: "remove", - Short: "Remove rule chain for a target", - Run: remove, - PersistentPreRun: func(cmd *cobra.Command, _ []string) { - commonflags.Bind(cmd) - }, - } -) +var removeCmd = &cobra.Command{ + Use: "remove", + Short: "Remove rule chain for a target", + Run: remove, + PersistentPreRun: func(cmd *cobra.Command, _ []string) { + commonflags.Bind(cmd) + }, +} func remove(cmd *cobra.Command, _ []string) { target := parseTarget(cmd) @@ -31,19 +25,9 @@ func remove(cmd *cobra.Command, _ []string) { key := key.Get(cmd) cli := internalclient.GetSDKClientByFlag(cmd, key, commonflags.RPC) - chainID, _ := cmd.Flags().GetString(chainIDFlag) - if chainID == "" { - commonCmd.ExitOnErr(cmd, "read chain id error: %w", errEmptyChainID) - } + chainID := apeCmd.ParseChainID(cmd) chainIDRaw := []byte(chainID) - hexEncoded, _ := cmd.Flags().GetBool(chainIDHexFlag) - if hexEncoded { - var err error - chainIDRaw, err = hex.DecodeString(chainID) - commonCmd.ExitOnErr(cmd, "can't decode chain ID as hex: %w", err) - } - _, err := cli.APEManagerRemoveChain(cmd.Context(), client_sdk.PrmAPEManagerRemoveChain{ ChainTarget: target, ChainID: chainIDRaw, @@ -58,9 +42,10 @@ func initRemoveCmd() { commonflags.Init(removeCmd) ff := removeCmd.Flags() - ff.String(targetNameFlag, "", targetNameDesc) - ff.String(targetTypeFlag, "", targetTypeDesc) - _ = removeCmd.MarkFlagRequired(targetTypeFlag) - ff.String(chainIDFlag, "", "Chain id") - ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex") + ff.String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) + ff.String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = removeCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) + ff.String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc) + _ = removeCmd.MarkFlagRequired(apeCmd.ChainIDFlag) + ff.Bool(apeCmd.ChainIDHexFlag, false, apeCmd.ChainIDHexFlagDesc) } From 4ab4ed6f96e3538a25110e1eb5229e90e438de49 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 18 Nov 2024 14:07:14 +0300 Subject: [PATCH 198/591] [#1501] cli: Refactor bearer subcommand * Use `cmd/internal/common/ape` parser commands within `generate-ape-override` subcommand * Use flag names from `cmd/internal/common/ape` Signed-off-by: Airat Arifullin --- .../modules/bearer/generate_override.go | 52 +++---------------- 1 file changed, 6 insertions(+), 46 deletions(-) diff --git a/cmd/frostfs-cli/modules/bearer/generate_override.go b/cmd/frostfs-cli/modules/bearer/generate_override.go index c3df0294e..13fe07995 100644 --- a/cmd/frostfs-cli/modules/bearer/generate_override.go +++ b/cmd/frostfs-cli/modules/bearer/generate_override.go @@ -1,32 +1,20 @@ package bearer import ( - "errors" "fmt" "os" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "github.com/spf13/cobra" ) -var ( - errChainIDCannotBeEmpty = errors.New("chain id cannot be empty") - errRuleIsNotParsed = errors.New("rule is not passed") -) - const ( - chainIDFlag = "chain-id" - chainIDHexFlag = "chain-id-hex" - ruleFlag = "rule" - pathFlag = "path" - outputFlag = "output" + outputFlag = "output" ) var generateAPEOverrideCmd = &cobra.Command{ @@ -41,7 +29,7 @@ Generated APE override can be dumped to a file in JSON format that is passed to } func genereateAPEOverride(cmd *cobra.Command, _ []string) { - c := parseChain(cmd) + c := apeCmd.ParseChain(cmd) targetCID, _ := cmd.Flags().GetString(commonflags.CIDFlag) var cid cidSDK.ID @@ -78,39 +66,11 @@ func init() { ff.StringP(commonflags.CIDFlag, "", "", "Target container ID.") _ = cobra.MarkFlagRequired(createCmd.Flags(), commonflags.CIDFlag) - ff.StringArray(ruleFlag, []string{}, "Rule statement") - ff.String(pathFlag, "", "Path to encoded chain in JSON or binary format") - ff.String(chainIDFlag, "", "Assign ID to the parsed chain") - ff.Bool(chainIDHexFlag, false, "Flag to parse chain ID as hex") + ff.StringArray(apeCmd.RuleFlag, []string{}, "Rule statement") + ff.String(apeCmd.PathFlag, "", "Path to encoded chain in JSON or binary format") + ff.String(apeCmd.ChainIDFlag, "", "Assign ID to the parsed chain") + ff.Bool(apeCmd.ChainIDHexFlag, false, "Flag to parse chain ID as hex") ff.String(outputFlag, "", "Output path to dump result JSON-encoded APE override") _ = cobra.MarkFlagFilename(createCmd.Flags(), outputFlag) } - -func parseChainID(cmd *cobra.Command) apechain.ID { - chainID, _ := cmd.Flags().GetString(chainIDFlag) - if chainID == "" { - commonCmd.ExitOnErr(cmd, "read chain id error: %w", - errChainIDCannotBeEmpty) - } - return apechain.ID(chainID) -} - -func parseChain(cmd *cobra.Command) *apechain.Chain { - chain := new(apechain.Chain) - - if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 { - commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules)) - } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" { - commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath)) - } else { - commonCmd.ExitOnErr(cmd, "parser error: %w", errRuleIsNotParsed) - } - - chain.ID = parseChainID(cmd) - - cmd.Println("Parsed chain:") - apeCmd.PrintHumanReadableAPEChain(cmd, chain) - - return chain -} From a339b52a6038f0746a54acd8f668ea57a8541fd5 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 18 Nov 2024 14:11:21 +0300 Subject: [PATCH 199/591] [#1501] adm: Refactor APE-chains managing subcommands * Use `cmd/internal/common/ape` parser commands within `ape` subcommands * Use flag names from `cmd/internal/common/ape Signed-off-by: Airat Arifullin --- .../internal/modules/morph/ape/ape.go | 86 ++++++--------- .../internal/modules/morph/ape/ape_util.go | 103 ++++-------------- cmd/internal/common/ape/commands.go | 7 +- 3 files changed, 58 insertions(+), 138 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go index fb363f903..8fcd4a441 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go @@ -14,26 +14,10 @@ import ( ) const ( - namespaceTarget = "namespace" - containerTarget = "container" - userTarget = "user" - groupTarget = "group" - jsonFlag = "json" - jsonFlagDesc = "Output rule chains in JSON format" - chainIDFlag = "chain-id" - chainIDDesc = "Rule chain ID" - ruleFlag = "rule" - ruleFlagDesc = "Rule chain in text format" - pathFlag = "path" - pathFlagDesc = "path to encoded chain in JSON or binary format" - targetNameFlag = "target-name" - targetNameDesc = "Resource name in APE resource name format" - targetTypeFlag = "target-type" - targetTypeDesc = "Resource type(container/namespace)" - addrAdminFlag = "addr" - addrAdminDesc = "The address of the admins wallet" - chainNameFlag = "chain-name" - chainNameFlagDesc = "Chain name(ingress|s3)" + jsonFlag = "json" + jsonFlagDesc = "Output rule chains in JSON format" + addrAdminFlag = "addr" + addrAdminDesc = "The address of the admins wallet" ) var ( @@ -101,17 +85,17 @@ func initAddRuleChainCmd() { addRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) addRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - addRuleChainCmd.Flags().String(targetTypeFlag, "", targetTypeDesc) - _ = addRuleChainCmd.MarkFlagRequired(targetTypeFlag) - addRuleChainCmd.Flags().String(targetNameFlag, "", targetNameDesc) - _ = addRuleChainCmd.MarkFlagRequired(targetNameFlag) + addRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) + addRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetTypeFlagDesc) + _ = addRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag) - addRuleChainCmd.Flags().String(chainIDFlag, "", chainIDDesc) - _ = addRuleChainCmd.MarkFlagRequired(chainIDFlag) - addRuleChainCmd.Flags().StringArray(ruleFlag, []string{}, ruleFlagDesc) - addRuleChainCmd.Flags().String(pathFlag, "", pathFlagDesc) - addRuleChainCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc) - addRuleChainCmd.MarkFlagsMutuallyExclusive(ruleFlag, pathFlag) + addRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc) + _ = addRuleChainCmd.MarkFlagRequired(apeCmd.ChainIDFlag) + addRuleChainCmd.Flags().StringArray(apeCmd.RuleFlag, []string{}, apeCmd.RuleFlagDesc) + addRuleChainCmd.Flags().String(apeCmd.PathFlag, "", apeCmd.PathFlagDesc) + addRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc) + addRuleChainCmd.MarkFlagsMutuallyExclusive(apeCmd.RuleFlag, apeCmd.PathFlag) } func initRemoveRuleChainCmd() { @@ -120,26 +104,25 @@ func initRemoveRuleChainCmd() { removeRuleChainCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) removeRuleChainCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) - removeRuleChainCmd.Flags().String(targetTypeFlag, "", targetTypeDesc) - _ = removeRuleChainCmd.MarkFlagRequired(targetTypeFlag) - removeRuleChainCmd.Flags().String(targetNameFlag, "", targetNameDesc) - _ = removeRuleChainCmd.MarkFlagRequired(targetNameFlag) - removeRuleChainCmd.Flags().String(chainIDFlag, "", chainIDDesc) - removeRuleChainCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc) + removeRuleChainCmd.Flags().String(apeCmd.TargetTypeFlag, "", apeCmd.TargetTypeFlagDesc) + _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) + removeRuleChainCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) + _ = removeRuleChainCmd.MarkFlagRequired(apeCmd.TargetNameFlag) + removeRuleChainCmd.Flags().String(apeCmd.ChainIDFlag, "", apeCmd.ChainIDFlagDesc) + removeRuleChainCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc) removeRuleChainCmd.Flags().Bool(commonflags.AllFlag, false, "Remove all chains for target") - removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, chainIDFlag) + removeRuleChainCmd.MarkFlagsMutuallyExclusive(commonflags.AllFlag, apeCmd.ChainIDFlag) } func initListRuleChainsCmd() { Cmd.AddCommand(listRuleChainsCmd) listRuleChainsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - listRuleChainsCmd.Flags().StringP(targetTypeFlag, "t", "", targetTypeDesc) - _ = listRuleChainsCmd.MarkFlagRequired(targetTypeFlag) - listRuleChainsCmd.Flags().String(targetNameFlag, "", targetNameDesc) - _ = listRuleChainsCmd.MarkFlagRequired(targetNameFlag) + listRuleChainsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc) + _ = listRuleChainsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) + listRuleChainsCmd.Flags().String(apeCmd.TargetNameFlag, "", apeCmd.TargetNameFlagDesc) listRuleChainsCmd.Flags().Bool(jsonFlag, false, jsonFlagDesc) - listRuleChainsCmd.Flags().String(chainNameFlag, ingress, chainNameFlagDesc) + listRuleChainsCmd.Flags().String(apeCmd.ChainNameFlag, apeCmd.Ingress, apeCmd.ChainNameFlagDesc) } func initSetAdminCmd() { @@ -161,15 +144,15 @@ func initListTargetsCmd() { Cmd.AddCommand(listTargetsCmd) listTargetsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - listTargetsCmd.Flags().StringP(targetTypeFlag, "t", "", targetTypeDesc) - _ = listTargetsCmd.MarkFlagRequired(targetTypeFlag) + listTargetsCmd.Flags().StringP(apeCmd.TargetTypeFlag, "t", "", apeCmd.TargetTypeFlagDesc) + _ = listTargetsCmd.MarkFlagRequired(apeCmd.TargetTypeFlag) } func addRuleChain(cmd *cobra.Command, _ []string) { - chain := parseChain(cmd) + chain := apeCmd.ParseChain(cmd) target := parseTarget(cmd) pci, ac := newPolicyContractInterface(cmd) - h, vub, err := pci.AddMorphRuleChain(parseChainName(cmd), target, chain) + h, vub, err := pci.AddMorphRuleChain(apeCmd.ParseChainName(cmd), target, chain) cmd.Println("Waiting for transaction to persist...") _, err = ac.Wait(h, vub, err) commonCmd.ExitOnErr(cmd, "add rule chain error: %w", err) @@ -181,14 +164,14 @@ func removeRuleChain(cmd *cobra.Command, _ []string) { pci, ac := newPolicyContractInterface(cmd) removeAll, _ := cmd.Flags().GetBool(commonflags.AllFlag) if removeAll { - h, vub, err := pci.RemoveMorphRuleChainsByTarget(parseChainName(cmd), target) + h, vub, err := pci.RemoveMorphRuleChainsByTarget(apeCmd.ParseChainName(cmd), target) cmd.Println("Waiting for transaction to persist...") _, err = ac.Wait(h, vub, err) commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err) cmd.Println("All chains for target removed successfully") } else { - chainID := parseChainID(cmd) - h, vub, err := pci.RemoveMorphRuleChain(parseChainName(cmd), target, chainID) + chainID := apeCmd.ParseChainID(cmd) + h, vub, err := pci.RemoveMorphRuleChain(apeCmd.ParseChainName(cmd), target, chainID) cmd.Println("Waiting for transaction to persist...") _, err = ac.Wait(h, vub, err) commonCmd.ExitOnErr(cmd, "remove rule chain error: %w", err) @@ -199,7 +182,7 @@ func removeRuleChain(cmd *cobra.Command, _ []string) { func listRuleChains(cmd *cobra.Command, _ []string) { target := parseTarget(cmd) pci, _ := newPolicyContractReaderInterface(cmd) - chains, err := pci.ListMorphRuleChains(parseChainName(cmd), target) + chains, err := pci.ListMorphRuleChains(apeCmd.ParseChainName(cmd), target) commonCmd.ExitOnErr(cmd, "list rule chains error: %w", err) if len(chains) == 0 { return @@ -235,8 +218,7 @@ func getAdmin(cmd *cobra.Command, _ []string) { } func listTargets(cmd *cobra.Command, _ []string) { - typ, err := parseTargetType(cmd) - commonCmd.ExitOnErr(cmd, "parse target type error: %w", err) + typ := apeCmd.ParseTargetType(cmd) pci, inv := newPolicyContractReaderInterface(cmd) sid, it, err := pci.ListTargetsIterator(typ) diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go index 1a70dd4a2..6780e6dd3 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go @@ -2,14 +2,12 @@ package ape import ( "errors" - "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" - parseutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/ape" - apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" morph "git.frostfs.info/TrueCloudLab/policy-engine/pkg/morph/policy" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" @@ -19,90 +17,29 @@ import ( "github.com/spf13/viper" ) -const ( - ingress = "ingress" - s3 = "s3" -) - -var mChainName = map[string]apechain.Name{ - ingress: apechain.Ingress, - s3: apechain.S3, -} - -var ( - errUnknownTargetType = errors.New("unknown target type") - errChainIDCannotBeEmpty = errors.New("chain id cannot be empty") - errRuleIsNotParsed = errors.New("rule is not passed") - errUnsupportedChainName = errors.New("unsupported chain name") -) +var errUnknownTargetType = errors.New("unknown target type") func parseTarget(cmd *cobra.Command) policyengine.Target { - name, _ := cmd.Flags().GetString(targetNameFlag) - typ, err := parseTargetType(cmd) - - // interpret "root" namespace as empty - if typ == policyengine.Namespace && name == "root" { - name = "" - } - - commonCmd.ExitOnErr(cmd, "read target type error: %w", err) - - return policyengine.Target{ - Name: name, - Type: typ, - } -} - -func parseTargetType(cmd *cobra.Command) (policyengine.TargetType, error) { - typ, _ := cmd.Flags().GetString(targetTypeFlag) + typ := apeCmd.ParseTargetType(cmd) + name, _ := cmd.Flags().GetString(apeCmd.TargetNameFlag) switch typ { - case namespaceTarget: - return policyengine.Namespace, nil - case containerTarget: - return policyengine.Container, nil - case userTarget: - return policyengine.User, nil - case groupTarget: - return policyengine.Group, nil + case policyengine.Namespace: + if name == "root" { + name = "" + } + return policyengine.NamespaceTarget(name) + case policyengine.Container: + var cnr cid.ID + commonCmd.ExitOnErr(cmd, "can't decode container ID: %w", cnr.DecodeString(name)) + return policyengine.ContainerTarget(name) + case policyengine.User: + return policyengine.UserTarget(name) + case policyengine.Group: + return policyengine.GroupTarget(name) + default: + commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType) } - return -1, errUnknownTargetType -} - -func parseChainID(cmd *cobra.Command) apechain.ID { - chainID, _ := cmd.Flags().GetString(chainIDFlag) - if chainID == "" { - commonCmd.ExitOnErr(cmd, "read chain id error: %w", - errChainIDCannotBeEmpty) - } - return apechain.ID(chainID) -} - -func parseChain(cmd *cobra.Command) *apechain.Chain { - chain := new(apechain.Chain) - - if rules, _ := cmd.Flags().GetStringArray(ruleFlag); len(rules) > 0 { - commonCmd.ExitOnErr(cmd, "parser error: %w", parseutil.ParseAPEChain(chain, rules)) - } else if encPath, _ := cmd.Flags().GetString(pathFlag); encPath != "" { - commonCmd.ExitOnErr(cmd, "decode binary or json error: %w", parseutil.ParseAPEChainBinaryOrJSON(chain, encPath)) - } else { - commonCmd.ExitOnErr(cmd, "parser error: %w", errRuleIsNotParsed) - } - - chain.ID = parseChainID(cmd) - - cmd.Println("Parsed chain:") - apeCmd.PrintHumanReadableAPEChain(cmd, chain) - - return chain -} - -func parseChainName(cmd *cobra.Command) apechain.Name { - chainName, _ := cmd.Flags().GetString(chainNameFlag) - apeChainName, ok := mChainName[strings.ToLower(chainName)] - if !ok { - commonCmd.ExitOnErr(cmd, "", errUnsupportedChainName) - } - return apeChainName + panic("unreachable") } // invokerAdapter adapats invoker.Invoker to ContractStorageInvoker interface. diff --git a/cmd/internal/common/ape/commands.go b/cmd/internal/common/ape/commands.go index 354747330..e5a35ab71 100644 --- a/cmd/internal/common/ape/commands.go +++ b/cmd/internal/common/ape/commands.go @@ -97,7 +97,7 @@ func ParseTarget(cmd *cobra.Command) engine.Target { default: commonCmd.ExitOnErr(cmd, "read target type error: %w", errUnknownTargetType) } - return engine.Target{} + panic("unreachable") } // ParseTargetType handles target type parsing of an APE chain. @@ -112,9 +112,10 @@ func ParseTargetType(cmd *cobra.Command) engine.TargetType { return engine.User case groupTarget: return engine.Group + default: + commonCmd.ExitOnErr(cmd, "parse target type error: %w", errUnknownTargetType) } - commonCmd.ExitOnErr(cmd, "parse target type error: %w", errUnknownTargetType) - return engine.TargetType(0) + panic("unreachable") } // ParseChainID handles the parsing of APE-chain identifier. From 3042490340fcfe223fa8069006876f84e1721082 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 20 Nov 2024 10:43:38 +0300 Subject: [PATCH 200/591] [#1507] timer: Remove unused OnDelta() method Signed-off-by: Evgenii Stratonikov --- pkg/morph/timer/block.go | 73 +---------------- pkg/morph/timer/block_test.go | 142 ++++------------------------------ 2 files changed, 17 insertions(+), 198 deletions(-) diff --git a/pkg/morph/timer/block.go b/pkg/morph/timer/block.go index be20d3571..974be1120 100644 --- a/pkg/morph/timer/block.go +++ b/pkg/morph/timer/block.go @@ -15,41 +15,19 @@ type BlockTickHandler func() // It can tick the blocks and perform certain actions // on block time intervals. type BlockTimer struct { - rolledBack bool - mtx sync.Mutex dur BlockMeter baseDur uint32 - mul, div uint32 - cur, tgt uint32 last uint32 h BlockTickHandler - ps []BlockTimer - once bool - - deltaCfg -} - -// DeltaOption is an option of delta-interval handler. -type DeltaOption func(*deltaCfg) - -type deltaCfg struct { - pulse bool -} - -// WithPulse returns option to call delta-interval handler multiple times. -func WithPulse() DeltaOption { - return func(c *deltaCfg) { - c.pulse = true - } } // StaticBlockMeter returns BlockMeters that always returns (d, nil). @@ -65,52 +43,19 @@ func StaticBlockMeter(d uint32) BlockMeter { func NewBlockTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer { return &BlockTimer{ dur: dur, - mul: 1, - div: 1, h: h, - deltaCfg: deltaCfg{ - pulse: true, - }, } } // NewOneTickTimer creates a new BlockTimer that ticks only once. -// -// Do not use delta handlers with pulse in this timer. func NewOneTickTimer(dur BlockMeter, h BlockTickHandler) *BlockTimer { return &BlockTimer{ dur: dur, - mul: 1, - div: 1, h: h, once: true, } } -// OnDelta registers handler which is executed on (mul / div * BlockMeter()) block -// after basic interval reset. -// -// If WithPulse option is provided, handler is executed (mul / div * BlockMeter()) block -// during base interval. -func (t *BlockTimer) OnDelta(mul, div uint32, h BlockTickHandler, opts ...DeltaOption) { - c := deltaCfg{ - pulse: false, - } - - for i := range opts { - opts[i](&c) - } - - t.ps = append(t.ps, BlockTimer{ - mul: mul, - div: div, - h: h, - once: t.once, - - deltaCfg: c, - }) -} - // Reset resets previous ticks of the BlockTimer. // // Returns BlockMeter's error upon occurrence. @@ -124,29 +69,18 @@ func (t *BlockTimer) Reset() error { t.resetWithBaseInterval(d) - for i := range t.ps { - t.ps[i].resetWithBaseInterval(d) - } - t.mtx.Unlock() return nil } func (t *BlockTimer) resetWithBaseInterval(d uint32) { - t.rolledBack = false t.baseDur = d t.reset() } func (t *BlockTimer) reset() { - mul, div := t.mul, t.div - - if !t.pulse && t.rolledBack && mul < div { - mul, div = 1, 1 - } - - delta := mul * t.baseDur / div + delta := t.baseDur if delta == 0 { delta = 1 } @@ -180,12 +114,7 @@ func (t *BlockTimer) tick(h uint32) { if !t.once { t.cur = 0 - t.rolledBack = true t.reset() } } - - for i := range t.ps { - t.ps[i].tick(h) - } } diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go index ee6091845..c0af6c5c3 100644 --- a/pkg/morph/timer/block_test.go +++ b/pkg/morph/timer/block_test.go @@ -63,85 +63,6 @@ func TestBlockTimer(t *testing.T) { tickN(bt, intervalNum*blockDur) require.Equal(t, intervalNum, uint32(baseCallCounter)) - - // add half-interval handler - halfCallCounter := uint32(0) - - bt.OnDelta(1, 2, func() { - halfCallCounter++ - }) - - // add double interval handler - doubleCallCounter := uint32(0) - - bt.OnDelta(2, 1, func() { - doubleCallCounter++ - }) - - require.NoError(t, bt.Reset()) - - baseCallCounter = 0 - intervalNum = 20 - - tickN(bt, intervalNum*blockDur) - - require.Equal(t, intervalNum, uint32(halfCallCounter)) - require.Equal(t, intervalNum, uint32(baseCallCounter)) - require.Equal(t, intervalNum/2, uint32(doubleCallCounter)) -} - -func TestDeltaPulse(t *testing.T) { - blockDur := uint32(9) - baseCallCounter := uint32(0) - - bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() { - baseCallCounter++ - }) - - deltaCallCounter := uint32(0) - - div := uint32(3) - - bt.OnDelta(1, div, func() { - deltaCallCounter++ - }, timer.WithPulse()) - - require.NoError(t, bt.Reset()) - - intervalNum := uint32(7) - - tickN(bt, intervalNum*blockDur) - - require.Equal(t, intervalNum, uint32(baseCallCounter)) - require.Equal(t, intervalNum*div, uint32(deltaCallCounter)) -} - -func TestDeltaReset(t *testing.T) { - blockDur := uint32(6) - baseCallCounter := 0 - - bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() { - baseCallCounter++ - }) - - detlaCallCounter := 0 - - bt.OnDelta(1, 3, func() { - detlaCallCounter++ - }) - - require.NoError(t, bt.Reset()) - - tickN(bt, 6) - - require.Equal(t, 1, baseCallCounter) - require.Equal(t, 1, detlaCallCounter) - - require.NoError(t, bt.Reset()) - - tickN(bt, 3) - - require.Equal(t, 2, detlaCallCounter) } func TestNewOneTickTimer(t *testing.T) { @@ -168,82 +89,51 @@ func TestNewOneTickTimer(t *testing.T) { tickN(bt, 10) require.Equal(t, 1, baseCallCounter) }) - - t.Run("delta without pulse", func(t *testing.T) { - blockDur = uint32(10) - baseCallCounter = 0 - - bt = timer.NewOneTickTimer(timer.StaticBlockMeter(blockDur), func() { - baseCallCounter++ - }) - - detlaCallCounter := 0 - - bt.OnDelta(1, 10, func() { - detlaCallCounter++ - }) - - require.NoError(t, bt.Reset()) - - tickN(bt, 10) - require.Equal(t, 1, baseCallCounter) - require.Equal(t, 1, detlaCallCounter) - - tickN(bt, 10) // 10 more ticks must not affect counters - require.Equal(t, 1, baseCallCounter) - require.Equal(t, 1, detlaCallCounter) - }) } func TestBlockTimer_TickSameHeight(t *testing.T) { - var baseCounter, deltaCounter int + var baseCounter int blockDur := uint32(2) bt := timer.NewBlockTimer( func() (uint32, error) { return blockDur, nil }, func() { baseCounter++ }) - bt.OnDelta(2, 1, func() { - deltaCounter++ - }) require.NoError(t, bt.Reset()) - check := func(t *testing.T, h uint32, base, delta int) { + check := func(t *testing.T, h uint32, base int) { for range 2 * int(blockDur) { bt.Tick(h) require.Equal(t, base, baseCounter) - require.Equal(t, delta, deltaCounter) } } - check(t, 1, 0, 0) - check(t, 2, 1, 0) - check(t, 3, 1, 0) - check(t, 4, 2, 1) + check(t, 1, 0) + check(t, 2, 1) + check(t, 3, 1) + check(t, 4, 2) t.Run("works the same way after `Reset()`", func(t *testing.T) { t.Run("same block duration", func(t *testing.T) { require.NoError(t, bt.Reset()) baseCounter = 0 - deltaCounter = 0 - check(t, 1, 0, 0) - check(t, 2, 1, 0) - check(t, 3, 1, 0) - check(t, 4, 2, 1) + check(t, 1, 0) + check(t, 2, 1) + check(t, 3, 1) + check(t, 4, 2) }) t.Run("different block duration", func(t *testing.T) { blockDur = 3 require.NoError(t, bt.Reset()) baseCounter = 0 - deltaCounter = 0 - check(t, 1, 0, 0) - check(t, 2, 0, 0) - check(t, 3, 1, 0) - check(t, 4, 1, 0) - check(t, 5, 1, 0) - check(t, 6, 2, 1) + check(t, 1, 0) + check(t, 2, 0) + check(t, 3, 1) + check(t, 4, 1) + check(t, 5, 1) + check(t, 6, 2) }) }) } From 2e974f734c38161c251e6556daacbc8fbf4fbfd3 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 20 Nov 2024 10:53:33 +0300 Subject: [PATCH 201/591] [#1507] timer/test: Improve test coverage Signed-off-by: Evgenii Stratonikov --- pkg/morph/timer/block_test.go | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go index c0af6c5c3..615631dcb 100644 --- a/pkg/morph/timer/block_test.go +++ b/pkg/morph/timer/block_test.go @@ -1,6 +1,7 @@ package timer_test import ( + "errors" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/timer" @@ -48,6 +49,38 @@ func TestIRBlockTimer_Reset(t *testing.T) { require.Equal(t, baseCounter[0], baseCounter[1]) } +func TestBlockTimer_ResetChangeDuration(t *testing.T) { + var dur uint32 = 2 + var err error + var counter int + + bt := timer.NewBlockTimer( + func() (uint32, error) { return dur, err }, + func() { counter++ }) + + require.NoError(t, bt.Reset()) + + tickN(bt, 2) + require.Equal(t, 1, counter) + + t.Run("return error", func(t *testing.T) { + dur = 5 + err = errors.New("my awesome error") + require.ErrorIs(t, bt.Reset(), err) + + tickN(bt, 2) + require.Equal(t, 2, counter) + }) + t.Run("change duration", func(t *testing.T) { + dur = 5 + err = nil + require.NoError(t, bt.Reset()) + + tickN(bt, 5) + require.Equal(t, 3, counter) + }) +} + func TestBlockTimer(t *testing.T) { blockDur := uint32(10) baseCallCounter := uint32(0) From 49a4e727fd864da271c72ee6a53f1b315a959efd Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 20 Nov 2024 10:55:24 +0300 Subject: [PATCH 202/591] [#1507] timer/test: Use const for constants Make it easy to see what the test is about. Signed-off-by: Evgenii Stratonikov --- pkg/morph/timer/block_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go index 615631dcb..a144b3db6 100644 --- a/pkg/morph/timer/block_test.go +++ b/pkg/morph/timer/block_test.go @@ -18,7 +18,7 @@ func tickN(t *timer.BlockTimer, n uint32) { // "resetting" consists of ticking the current height as well and invoking `Reset`. func TestIRBlockTimer_Reset(t *testing.T) { var baseCounter [2]int - blockDur := uint32(3) + const blockDur = uint32(3) bt1 := timer.NewBlockTimer( func() (uint32, error) { return blockDur, nil }, @@ -82,7 +82,7 @@ func TestBlockTimer_ResetChangeDuration(t *testing.T) { } func TestBlockTimer(t *testing.T) { - blockDur := uint32(10) + const blockDur = uint32(10) baseCallCounter := uint32(0) bt := timer.NewBlockTimer(timer.StaticBlockMeter(blockDur), func() { From 6ae8667fb4a7c00fc5d3a72ff181a8da54a0eba3 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 20 Nov 2024 11:40:13 +0300 Subject: [PATCH 203/591] [#1509] .forgejo: Run actions on push to master Signed-off-by: Evgenii Stratonikov --- .forgejo/workflows/build.yml | 6 +++++- .forgejo/workflows/pre-commit.yml | 7 ++++++- .forgejo/workflows/tests.yml | 7 ++++++- .forgejo/workflows/vulncheck.yml | 7 ++++++- 4 files changed, 23 insertions(+), 4 deletions(-) diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml index ce2d64dd9..9129d136e 100644 --- a/.forgejo/workflows/build.yml +++ b/.forgejo/workflows/build.yml @@ -1,6 +1,10 @@ name: Build -on: [pull_request] +on: + pull_request: + push: + branches: + - master jobs: build: diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml index 8b06a2fdf..b27e7a39a 100644 --- a/.forgejo/workflows/pre-commit.yml +++ b/.forgejo/workflows/pre-commit.yml @@ -1,5 +1,10 @@ name: Pre-commit hooks -on: [pull_request] + +on: + pull_request: + push: + branches: + - master jobs: precommit: diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml index 07ba5c268..4f1bebe61 100644 --- a/.forgejo/workflows/tests.yml +++ b/.forgejo/workflows/tests.yml @@ -1,5 +1,10 @@ name: Tests and linters -on: [pull_request] + +on: + pull_request: + push: + branches: + - master jobs: lint: diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index 2951a8059..cf15005b1 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -1,5 +1,10 @@ name: Vulncheck -on: [pull_request] + +on: + pull_request: + push: + branches: + - master jobs: vulncheck: From 2e2c62147db434c3834be92e7ebcc3ed80a44a57 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 20 Nov 2024 14:16:17 +0300 Subject: [PATCH 204/591] [#1513] adm: Move ProtoConfigPath from `constants` to `commonflags` package Refs #932 Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/commonflags/flags.go | 1 + cmd/frostfs-adm/internal/modules/morph/constants/const.go | 1 - .../internal/modules/morph/helper/local_client.go | 3 ++- .../internal/modules/morph/initialize/initialize_test.go | 2 +- cmd/frostfs-adm/internal/modules/morph/initialize/root.go | 5 ++--- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go index 81395edb0..87692d013 100644 --- a/cmd/frostfs-adm/internal/commonflags/flags.go +++ b/cmd/frostfs-adm/internal/commonflags/flags.go @@ -20,6 +20,7 @@ const ( AlphabetWalletsFlagDesc = "Path to alphabet wallets dir" LocalDumpFlag = "local-dump" + ProtoConfigPath = "protocol" ContractsInitFlag = "contracts" ContractsInitFlagDesc = "Path to archive with compiled FrostFS contracts (the default is to fetch the latest release from the official repository)" ContractsURLFlag = "contracts-url" diff --git a/cmd/frostfs-adm/internal/modules/morph/constants/const.go b/cmd/frostfs-adm/internal/modules/morph/constants/const.go index a3b4f129a..be4041a86 100644 --- a/cmd/frostfs-adm/internal/modules/morph/constants/const.go +++ b/cmd/frostfs-adm/internal/modules/morph/constants/const.go @@ -4,7 +4,6 @@ import "time" const ( ConsensusAccountName = "consensus" - ProtoConfigPath = "protocol" // MaxAlphabetNodes is the maximum number of candidates allowed, which is currently limited by the size // of the invocation script. diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go index 375fa84d7..ed028fb7c 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go @@ -8,6 +8,7 @@ import ( "sort" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/config" @@ -47,7 +48,7 @@ type LocalClient struct { } func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet, dumpPath string) (*LocalClient, error) { - cfg, err := config.LoadFile(v.GetString(constants.ProtoConfigPath)) + cfg, err := config.LoadFile(v.GetString(commonflags.ProtoConfigPath)) if err != nil { return nil, err } diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go index 74f5d3e88..9bc51c055 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go @@ -62,7 +62,7 @@ func testInitialize(t *testing.T, committeeSize int) { v := viper.GetViper() require.NoError(t, generateTestData(testdataDir, committeeSize)) - v.Set(constants.ProtoConfigPath, filepath.Join(testdataDir, protoFileName)) + v.Set(commonflags.ProtoConfigPath, filepath.Join(testdataDir, protoFileName)) // Set to the path or remove the next statement to download from the network. require.NoError(t, Cmd.Flags().Set(commonflags.ContractsInitFlag, contractsPath)) diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/root.go b/cmd/frostfs-adm/internal/modules/morph/initialize/root.go index b7885c512..50f14e728 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/root.go @@ -2,7 +2,6 @@ package initialize import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -32,7 +31,7 @@ var Cmd = &cobra.Command{ _ = viper.BindPFlag(commonflags.ContainerFeeInitFlag, cmd.Flags().Lookup(containerFeeCLIFlag)) _ = viper.BindPFlag(commonflags.ContainerAliasFeeInitFlag, cmd.Flags().Lookup(containerAliasFeeCLIFlag)) _ = viper.BindPFlag(commonflags.WithdrawFeeInitFlag, cmd.Flags().Lookup(withdrawFeeCLIFlag)) - _ = viper.BindPFlag(constants.ProtoConfigPath, cmd.Flags().Lookup(constants.ProtoConfigPath)) + _ = viper.BindPFlag(commonflags.ProtoConfigPath, cmd.Flags().Lookup(commonflags.ProtoConfigPath)) }, RunE: initializeSideChainCmd, } @@ -48,7 +47,7 @@ func initInitCmd() { // Defaults are taken from neo-preodolenie. Cmd.Flags().Uint64(containerFeeCLIFlag, 1000, "Container registration fee") Cmd.Flags().Uint64(containerAliasFeeCLIFlag, 500, "Container alias fee") - Cmd.Flags().String(constants.ProtoConfigPath, "", "Path to the consensus node configuration") + Cmd.Flags().String(commonflags.ProtoConfigPath, "", "Path to the consensus node configuration") Cmd.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file") Cmd.MarkFlagsMutuallyExclusive(commonflags.ContractsInitFlag, commonflags.ContractsURLFlag) } From f12f04199e8437a9a6f440cac7ef5dfdd4454efc Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Thu, 21 Nov 2024 13:17:16 +0300 Subject: [PATCH 205/591] [#1516] traverser: Check for placement vector out of range Placement vector may contain fewer nodes count than it required by policy due to the outage of the one of the node. Signed-off-by: Anton Nikiforov --- .../object_manager/placement/traverser.go | 5 +- .../placement/traverser_test.go | 46 +++++++++++++++++++ 2 files changed, 49 insertions(+), 2 deletions(-) diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go index 6440f187d..7c720b204 100644 --- a/pkg/services/object_manager/placement/traverser.go +++ b/pkg/services/object_manager/placement/traverser.go @@ -114,8 +114,9 @@ func NewTraverser(opts ...Option) (*Traverser, error) { var unsortedVector []netmap.NodeInfo var regularVector []netmap.NodeInfo for i := range rem { - unsortedVector = append(unsortedVector, ns[i][:rem[i]]...) - regularVector = append(regularVector, ns[i][rem[i]:]...) + pivot := min(len(ns[i]), rem[i]) + unsortedVector = append(unsortedVector, ns[i][:pivot]...) + regularVector = append(regularVector, ns[i][pivot:]...) } rem = []int{-1, -1} diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go index 38f62aa07..f96e5c8a7 100644 --- a/pkg/services/object_manager/placement/traverser_test.go +++ b/pkg/services/object_manager/placement/traverser_test.go @@ -356,6 +356,52 @@ func TestTraverserPriorityMetrics(t *testing.T) { require.Nil(t, next) }) + t.Run("one rep one metric fewer nodes", func(t *testing.T) { + selectors := []int{2} + replicas := []int{3} + + nodes, cnr := testPlacement(selectors, replicas) + + // Node_0, PK - ip4/0.0.0.0/tcp/0 + nodes[0][0].SetAttribute("ClusterName", "A") + // Node_1, PK - ip4/0.0.0.0/tcp/1 + nodes[0][1].SetAttribute("ClusterName", "B") + + sdkNode := testNode(5) + sdkNode.SetAttribute("ClusterName", "B") + + nodesCopy := copyVectors(nodes) + + m := []Metric{NewAttributeMetric("ClusterName")} + + tr, err := NewTraverser( + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + // Without priority metric `ClusterName` the order will be: + // [ {Node_0 A}, {Node_1 A} ] + // With priority metric `ClusterName` and current node in cluster B + // the order should be: + // [ {Node_1 B}, {Node_0 A} ] + next := tr.Next() + require.NotNil(t, next) + require.Equal(t, 2, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + }) + t.Run("two reps two metrics", func(t *testing.T) { selectors := []int{3, 3} replicas := []int{2, 2} From efa4ce00b8eef3b896d94f0119edcdf7cbbc24b8 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 21 Nov 2024 10:10:56 +0300 Subject: [PATCH 206/591] [#1514] go.mod: Update frostfs-contract to v0.21.0-rc.3 Signed-off-by: Evgenii Stratonikov --- go.mod | 2 +- go.sum | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 84f9f9a66..c538a3178 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22 require ( code.gitea.io/sdk/gitea v0.17.1 - git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0 + git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 diff --git a/go.sum b/go.sum index d93e9b74e..064f3274e 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0 h1:8Z5iPhieCrbcdhxBuY/Bajh6V5fki7Whh0b4S2zYJYU= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0/go.mod h1:Y2Xorxc8SBO4phoek7n3XxaPZz5rIrFgDsU4TOjmlGA= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4 h1:o3iqVmbvFsfe8kpB2Hvuix6Q/tAhbiPLP91xK4lmoBQ= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= @@ -267,6 +267,7 @@ github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4J github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= From 2771fdb8c7916c39dc855a0ee1a8eae97bb10205 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 21 Nov 2024 10:39:04 +0300 Subject: [PATCH 207/591] [#1514] adm/nns: Use nns.GetAllRecords() wrapper It was not possible previously, because GetAllRecords() was not declared safe in frostfs-contract. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/nns/record.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go index 66bb1b94f..32c88481f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go @@ -8,7 +8,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/spf13/cobra" ) @@ -77,11 +76,11 @@ func addRecord(cmd *cobra.Command, _ []string) { } func getRecords(cmd *cobra.Command, _ []string) { - c, act, hash := getRPCClient(cmd) + c, act, _ := getRPCClient(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) if recordType == "" { - sid, r, err := unwrap.SessionIterator(act.Invoker.Call(hash, "getAllRecords", name)) + sid, r, err := c.GetAllRecords(name) commonCmd.ExitOnErr(cmd, "unable to get records: %w", err) defer func() { _ = act.Invoker.TerminateSession(sid) From 9073e555db7f0f5e0a389ad3ac9c23885c2df67f Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 21 Nov 2024 10:43:53 +0300 Subject: [PATCH 208/591] [#1514] adm/nns: Do not create actor for readonly commands `nns get-records` and `nns tokens` command do not need to sign anything, so remove useless actor and use invoker directly. `NewLocalActor()` is only used in `ape` and `nns` packages. `ape` package seem to use it correctly, only when alphabet wallets are provided, so no changes there. Also, remove --alphabet-wallets flag from commands that do not need it. Signed-off-by: Evgenii Stratonikov --- .../internal/modules/morph/helper/actor.go | 48 +++++++------------ .../internal/modules/morph/nns/helper.go | 13 +++++ .../internal/modules/morph/nns/record.go | 9 ++-- .../internal/modules/morph/nns/tokens.go | 5 +- 4 files changed, 36 insertions(+), 39 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go index ff0421335..eb0444408 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go @@ -38,38 +38,24 @@ func NewLocalActor(cmd *cobra.Command, c actor.RPCActor, accName string) (*Local walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) var act *actor.Actor var accounts []*wallet.Account - if walletDir == "" { - account, err := wallet.NewAccount() - commonCmd.ExitOnErr(cmd, "unable to create dummy account: %w", err) - act, err = actor.New(c, []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: account.Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: account, - }}) - if err != nil { - return nil, err - } - } else { - wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir) - commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err) - for _, w := range wallets { - acc, err := GetWalletAccount(w, accName) - commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err) - accounts = append(accounts, acc) - } - act, err = actor.New(c, []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: accounts[0].Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: accounts[0], - }}) - if err != nil { - return nil, err - } + wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir) + commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err) + + for _, w := range wallets { + acc, err := GetWalletAccount(w, accName) + commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err) + accounts = append(accounts, acc) + } + act, err = actor.New(c, []actor.SignerAccount{{ + Signer: transaction.Signer{ + Account: accounts[0].Contract.ScriptHash(), + Scopes: transaction.Global, + }, + Account: accounts[0], + }}) + if err != nil { + return nil, err } return &LocalActor{ neoActor: act, diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go index de439acd1..68a629c3c 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go @@ -5,6 +5,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/spf13/cobra" @@ -24,3 +25,15 @@ func getRPCClient(cmd *cobra.Command) (*client.Contract, *helper.LocalActor, uti commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err) return client.New(ac, nnsCs.Hash), ac, nnsCs.Hash } + +func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) { + c, err := helper.GetN3Client(viper.GetViper()) + commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) + + inv := invoker.New(c, nil) + r := management.NewReader(inv) + nnsCs, err := helper.GetContractByID(r, 1) + commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err) + + return client.NewReader(inv, nnsCs.Hash), inv +} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go index 32c88481f..5a6db6e72 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go @@ -28,7 +28,6 @@ func initAddRecordCmd() { func initGetRecordsCmd() { Cmd.AddCommand(getRecordsCmd) getRecordsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - getRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) getRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) getRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) @@ -76,16 +75,16 @@ func addRecord(cmd *cobra.Command, _ []string) { } func getRecords(cmd *cobra.Command, _ []string) { - c, act, _ := getRPCClient(cmd) + c, inv := nnsReader(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) if recordType == "" { sid, r, err := c.GetAllRecords(name) commonCmd.ExitOnErr(cmd, "unable to get records: %w", err) defer func() { - _ = act.Invoker.TerminateSession(sid) + _ = inv.TerminateSession(sid) }() - items, err := act.Invoker.TraverseIterator(sid, &r, 0) + items, err := inv.TraverseIterator(sid, &r, 0) commonCmd.ExitOnErr(cmd, "unable to get records: %w", err) for len(items) != 0 { for j := range items { @@ -96,7 +95,7 @@ func getRecords(cmd *cobra.Command, _ []string) { recordTypeToString(nns.RecordType(rs[1].Value().(*big.Int).Int64())), string(bs)) } - items, err = act.Invoker.TraverseIterator(sid, &r, 0) + items, err = inv.TraverseIterator(sid, &r, 0) commonCmd.ExitOnErr(cmd, "unable to get records: %w", err) } } else { diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go index 3c7136e9d..4ccbb1677 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go @@ -18,12 +18,11 @@ const ( func initTokensCmd() { Cmd.AddCommand(tokensCmd) tokensCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - tokensCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) tokensCmd.Flags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand, false, verboseDesc) } func listTokens(cmd *cobra.Command, _ []string) { - c, _, _ := getRPCClient(cmd) + c, _ := nnsReader(cmd) it, err := c.Tokens() commonCmd.ExitOnErr(cmd, "unable to get tokens: %w", err) for toks, err := it.Next(10); err == nil && len(toks) > 0; toks, err = it.Next(10) { @@ -41,7 +40,7 @@ func listTokens(cmd *cobra.Command, _ []string) { } } -func getCnameRecord(c *client.Contract, token []byte) (string, error) { +func getCnameRecord(c *client.ContractReader, token []byte) (string, error) { items, err := c.GetRecords(string(token), big.NewInt(int64(nns.CNAME))) // GetRecords returns the error "not an array" if the domain does not contain records. From e5ea95c045fa5de03a699204102943cdb4bd7e1e Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 21 Nov 2024 10:51:38 +0300 Subject: [PATCH 209/591] [#1514] adm/nns: Do not return hash from getRPCClient() It was unused and we employ better abstractions now. gopatch: ``` @@ var a, b expression @@ -a, b, _ := getRPCClient(...) +a, b := getRPCClient(...) ``` Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/nns/domains.go | 4 ++-- cmd/frostfs-adm/internal/modules/morph/nns/helper.go | 5 ++--- cmd/frostfs-adm/internal/modules/morph/nns/record.go | 6 +++--- cmd/frostfs-adm/internal/modules/morph/nns/renew.go | 2 +- cmd/frostfs-adm/internal/modules/morph/nns/update.go | 2 +- 5 files changed, 9 insertions(+), 10 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go index 3684db94a..c11154595 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go @@ -24,7 +24,7 @@ func initRegisterCmd() { } func registerDomain(cmd *cobra.Command, _ []string) { - c, actor, _ := getRPCClient(cmd) + c, actor := getRPCClient(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) email, _ := cmd.Flags().GetString(nnsEmailFlag) @@ -53,7 +53,7 @@ func initDeleteCmd() { } func deleteDomain(cmd *cobra.Command, _ []string) { - c, actor, _ := getRPCClient(cmd) + c, actor := getRPCClient(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) h, vub, err := c.DeleteDomain(name) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go index 68a629c3c..4b7ae4664 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go @@ -7,12 +7,11 @@ import ( commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" - "github.com/nspcc-dev/neo-go/pkg/util" "github.com/spf13/cobra" "github.com/spf13/viper" ) -func getRPCClient(cmd *cobra.Command) (*client.Contract, *helper.LocalActor, util.Uint160) { +func getRPCClient(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { v := viper.GetViper() c, err := helper.GetN3Client(v) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) @@ -23,7 +22,7 @@ func getRPCClient(cmd *cobra.Command) (*client.Contract, *helper.LocalActor, uti r := management.NewReader(ac.Invoker) nnsCs, err := helper.GetContractByID(r, 1) commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err) - return client.New(ac, nnsCs.Hash), ac, nnsCs.Hash + return client.New(ac, nnsCs.Hash), ac } func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) { diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go index 5a6db6e72..cb6c5e094 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go @@ -59,7 +59,7 @@ func initDelRecordCmd() { } func addRecord(cmd *cobra.Command, _ []string) { - c, actor, _ := getRPCClient(cmd) + c, actor := getRPCClient(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) data, _ := cmd.Flags().GetString(nnsRecordDataFlag) recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) @@ -112,7 +112,7 @@ func getRecords(cmd *cobra.Command, _ []string) { } func delRecords(cmd *cobra.Command, _ []string) { - c, actor, _ := getRPCClient(cmd) + c, actor := getRPCClient(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) typ, err := getRecordType(recordType) @@ -127,7 +127,7 @@ func delRecords(cmd *cobra.Command, _ []string) { } func delRecord(cmd *cobra.Command, _ []string) { - c, actor, _ := getRPCClient(cmd) + c, actor := getRPCClient(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) data, _ := cmd.Flags().GetString(nnsRecordDataFlag) recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go b/cmd/frostfs-adm/internal/modules/morph/nns/renew.go index b13092240..80105ded2 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/renew.go @@ -14,7 +14,7 @@ func initRenewCmd() { } func renewDomain(cmd *cobra.Command, _ []string) { - c, actor, _ := getRPCClient(cmd) + c, actor := getRPCClient(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) h, vub, err := c.Renew(name) commonCmd.ExitOnErr(cmd, "unable to renew domain: %w", err) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/update.go b/cmd/frostfs-adm/internal/modules/morph/nns/update.go index 3437316e3..dc8dcb3a3 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/update.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/update.go @@ -30,7 +30,7 @@ func initUpdateCmd() { } func updateSOA(cmd *cobra.Command, _ []string) { - c, actor, _ := getRPCClient(cmd) + c, actor := getRPCClient(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) email, _ := cmd.Flags().GetString(nnsEmailFlag) From 256f96e252f52b9dafd7b14d1608498d622df350 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 21 Nov 2024 10:54:14 +0300 Subject: [PATCH 210/591] [#1514] adm/nns: Rename getRPCClient() to nnsWriter() Make it more specific and similar to nnsReader(). Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/nns/domains.go | 4 ++-- cmd/frostfs-adm/internal/modules/morph/nns/helper.go | 2 +- cmd/frostfs-adm/internal/modules/morph/nns/record.go | 6 +++--- cmd/frostfs-adm/internal/modules/morph/nns/renew.go | 2 +- cmd/frostfs-adm/internal/modules/morph/nns/update.go | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go index c11154595..1668bb327 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go @@ -24,7 +24,7 @@ func initRegisterCmd() { } func registerDomain(cmd *cobra.Command, _ []string) { - c, actor := getRPCClient(cmd) + c, actor := nnsWriter(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) email, _ := cmd.Flags().GetString(nnsEmailFlag) @@ -53,7 +53,7 @@ func initDeleteCmd() { } func deleteDomain(cmd *cobra.Command, _ []string) { - c, actor := getRPCClient(cmd) + c, actor := nnsWriter(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) h, vub, err := c.DeleteDomain(name) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go index 4b7ae4664..29b0a24ae 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go @@ -11,7 +11,7 @@ import ( "github.com/spf13/viper" ) -func getRPCClient(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { +func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { v := viper.GetViper() c, err := helper.GetN3Client(v) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go index cb6c5e094..09ed92ab3 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go @@ -59,7 +59,7 @@ func initDelRecordCmd() { } func addRecord(cmd *cobra.Command, _ []string) { - c, actor := getRPCClient(cmd) + c, actor := nnsWriter(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) data, _ := cmd.Flags().GetString(nnsRecordDataFlag) recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) @@ -112,7 +112,7 @@ func getRecords(cmd *cobra.Command, _ []string) { } func delRecords(cmd *cobra.Command, _ []string) { - c, actor := getRPCClient(cmd) + c, actor := nnsWriter(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) typ, err := getRecordType(recordType) @@ -127,7 +127,7 @@ func delRecords(cmd *cobra.Command, _ []string) { } func delRecord(cmd *cobra.Command, _ []string) { - c, actor := getRPCClient(cmd) + c, actor := nnsWriter(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) data, _ := cmd.Flags().GetString(nnsRecordDataFlag) recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go b/cmd/frostfs-adm/internal/modules/morph/nns/renew.go index 80105ded2..53bd943f0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/renew.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/renew.go @@ -14,7 +14,7 @@ func initRenewCmd() { } func renewDomain(cmd *cobra.Command, _ []string) { - c, actor := getRPCClient(cmd) + c, actor := nnsWriter(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) h, vub, err := c.Renew(name) commonCmd.ExitOnErr(cmd, "unable to renew domain: %w", err) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/update.go b/cmd/frostfs-adm/internal/modules/morph/nns/update.go index dc8dcb3a3..c6d77ead6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/update.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/update.go @@ -30,7 +30,7 @@ func initUpdateCmd() { } func updateSOA(cmd *cobra.Command, _ []string) { - c, actor := getRPCClient(cmd) + c, actor := nnsWriter(cmd) name, _ := cmd.Flags().GetString(nnsNameFlag) email, _ := cmd.Flags().GetString(nnsEmailFlag) From 99f9e59de974e1219ad78befd89331826f3712f3 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 21 Nov 2024 11:52:50 +0300 Subject: [PATCH 211/591] [#1514] adm: Remove --alphabet-wallets flag from readonly commands Signed-off-by: Evgenii Stratonikov --- .../internal/modules/morph/frostfsid/frostfsid.go | 8 -------- cmd/frostfs-adm/internal/modules/morph/netmap/root.go | 1 - 2 files changed, 9 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go index 091d6634a..74da52a8f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go @@ -61,7 +61,6 @@ var ( Use: "list-namespaces", Short: "List all namespaces in frostfsid", PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) }, Run: frostfsidListNamespaces, @@ -91,7 +90,6 @@ var ( Use: "list-subjects", Short: "List subjects in namespace", PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) }, Run: frostfsidListSubjects, @@ -121,7 +119,6 @@ var ( Use: "list-groups", Short: "List groups in namespace", PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) }, Run: frostfsidListGroups, @@ -151,7 +148,6 @@ var ( Use: "list-group-subjects", Short: "List subjects in group", PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) }, Run: frostfsidListGroupSubjects, @@ -169,7 +165,6 @@ func initFrostfsIDCreateNamespaceCmd() { func initFrostfsIDListNamespacesCmd() { Cmd.AddCommand(frostfsidListNamespacesCmd) frostfsidListNamespacesCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - frostfsidListNamespacesCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func initFrostfsIDCreateSubjectCmd() { @@ -193,7 +188,6 @@ func initFrostfsIDListSubjectsCmd() { frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects") frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") - frostfsidListSubjectsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func initFrostfsIDCreateGroupCmd() { @@ -217,7 +211,6 @@ func initFrostfsIDListGroupsCmd() { Cmd.AddCommand(frostfsidListGroupsCmd) frostfsidListGroupsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListGroupsCmd.Flags().String(namespaceFlag, "", "Namespace to list groups") - frostfsidListGroupsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func initFrostfsIDAddSubjectToGroupCmd() { @@ -242,7 +235,6 @@ func initFrostfsIDListGroupSubjectsCmd() { frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name") frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id") frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") - frostfsidListGroupSubjectsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) { diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go index 55b7e64f0..291482e0f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go @@ -12,7 +12,6 @@ var ( Short: "List netmap candidates nodes", PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) - _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) }, Run: listNetmapCandidatesNodes, } From 1ed7ab75fbb62f293e17ecd6afc62b76c543206e Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Fri, 22 Nov 2024 15:19:51 +0300 Subject: [PATCH 212/591] [#1517] cli: Print the reason of ape manager error Signed-off-by: Airat Arifullin --- cmd/internal/common/exit.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cmd/internal/common/exit.go b/cmd/internal/common/exit.go index 9e4fa3098..b8acf0143 100644 --- a/cmd/internal/common/exit.go +++ b/cmd/internal/common/exit.go @@ -26,13 +26,15 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) { _ = iota internal aclDenied + apemanagerDenied ) var ( code int - internalErr = new(sdkstatus.ServerInternal) - accessErr = new(sdkstatus.ObjectAccessDenied) + internalErr = new(sdkstatus.ServerInternal) + accessErr = new(sdkstatus.ObjectAccessDenied) + apemanagerErr = new(sdkstatus.APEManagerAccessDenied) ) switch { @@ -41,6 +43,9 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) { case errors.As(err, &accessErr): code = aclDenied err = fmt.Errorf("%w: %s", err, accessErr.Reason()) + case errors.As(err, &apemanagerErr): + code = apemanagerDenied + err = fmt.Errorf("%w: %s", err, apemanagerErr.Reason()) default: code = internal } From 3ebd560f4225b96788ca12cc6662245486d54d88 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Fri, 22 Nov 2024 15:50:47 +0300 Subject: [PATCH 213/591] [#1519] cli: Make descriptive help for`--rule` option Signed-off-by: Airat Arifullin --- cmd/internal/common/ape/flags.go | 62 +++++++++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 1 deletion(-) diff --git a/cmd/internal/common/ape/flags.go b/cmd/internal/common/ape/flags.go index c5e2a3a99..d8b2e88a2 100644 --- a/cmd/internal/common/ape/flags.go +++ b/cmd/internal/common/ape/flags.go @@ -2,7 +2,6 @@ package ape const ( RuleFlag = "rule" - RuleFlagDesc = "Rule statement" PathFlag = "path" PathFlagDesc = "Path to encoded chain in JSON or binary format" TargetNameFlag = "target-name" @@ -17,3 +16,64 @@ const ( ChainNameFlagDesc = "Chain name(ingress|s3)" AllFlag = "all" ) + +const RuleFlagDesc = `Defines an Access Policy Engine (APE) rule in the format: + [:status_detail] ... ... ... + +Status: + - allow Permits specified actions + - deny Prohibits specified actions + - deny:QuotaLimitReached Denies access due to quota limits + +Actions: + Object operations: + - Object.Put, Object.Get, etc. + - Object.* (all object operations) + Container operations: + - Container.Put, Container.Get, etc. + - Container.* (all container operations) + +Conditions: + ResourceCondition: + Format: ResourceCondition:"key"=value, "key"!=value + Reserved properties (use '\' before '$'): + - $Object:version + - $Object:objectID + - $Object:containerID + - $Object:ownerID + - $Object:creationEpoch + - $Object:payloadLength + - $Object:payloadHash + - $Object:objectType + - $Object:homomorphicHash + +RequestCondition: + Format: RequestCondition:"key"=value, "key"!=value + Reserved properties (use '\' before '$'): + - $Actor:publicKey + - $Actor:role + + Example: + ResourceCondition:"check_key"!="check_value" RequestCondition:"$Actor:role"=others + +Resources: + For objects: + - namespace/cid/oid (specific object) + - namespace/cid/* (all objects in container) + - namespace/* (all objects in namespace) + - * (all objects) + - /* (all objects in root namespace) + - /cid/* (all objects in root container) + - /cid/oid (specific object in root container) + + For containers: + - namespace/cid (specific container) + - namespace/* (all containers in namespace) + - * (all containers) + - /cid (root container) + - /* (all root containers) + +Notes: + - Cannot mix object and container operations in one rule + - Default behavior is Any=false unless 'any' is specified + - Use 'all' keyword to explicitly set Any=false` From 0e5524dac75b3e8a287ba0b820dc4f8fccdecfb4 Mon Sep 17 00:00:00 2001 From: George Bartolomey Date: Sat, 23 Nov 2024 15:08:50 +0300 Subject: [PATCH 214/591] [#1515] adm: Print address in base58 format in morph ape get-admin Signed-off-by: George Bartolomey --- cmd/frostfs-adm/internal/modules/morph/ape/ape.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go index 8fcd4a441..1960faab4 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape.go @@ -8,7 +8,7 @@ import ( commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" apeCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/ape" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" - "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/encoding/address" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -200,7 +200,7 @@ func listRuleChains(cmd *cobra.Command, _ []string) { func setAdmin(cmd *cobra.Command, _ []string) { s, _ := cmd.Flags().GetString(addrAdminFlag) - addr, err := util.Uint160DecodeStringLE(s) + addr, err := address.StringToUint160(s) commonCmd.ExitOnErr(cmd, "can't decode admin addr: %w", err) pci, ac := newPolicyContractInterface(cmd) h, vub, err := pci.SetAdmin(addr) @@ -214,7 +214,7 @@ func getAdmin(cmd *cobra.Command, _ []string) { pci, _ := newPolicyContractReaderInterface(cmd) addr, err := pci.GetAdmin() commonCmd.ExitOnErr(cmd, "unable to get admin: %w", err) - cmd.Println(addr.StringLE()) + cmd.Println(address.Uint160ToString(addr)) } func listTargets(cmd *cobra.Command, _ []string) { From 7eac5fb18bd1d234b65a311bacdf1326d256e283 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 25 Nov 2024 13:55:14 +0300 Subject: [PATCH 215/591] Release v0.44.0 Signed-off-by: Evgenii Stratonikov --- CHANGELOG.md | 24 ++++++++++++++++++++++++ VERSION | 2 +- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e4ba6a5d6..92c84ab16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,30 @@ Changelog for FrostFS Node ### Removed ### Updated +## [v0.44.0] - 2024-25-11 - Rongbuk + +### Added +- Allow to prioritize nodes during GET traversal via attributes (#1439) +- Add metrics for the frostfsid cache (#1464) +- Customize constant attributes attached to every tracing span (#1488) +- Manage additional keys in the `frostfsid` contract (#1505) +- Describe `--rule` flag in detail for `frostfs-cli ape-manager` subcommands (#1519) + +### Changed +- Support richer interaction with the console in `frostfs-cli container policy-playground` (#1396) +- Print address in base58 format in `frostfs-adm morph policy set-admin` (#1515) + +### Fixed +- Fix EC object search (#1408) +- Fix EC object put when one of the nodes is unavailable (#1427) + +### Removed +- Drop most of the eACL-related code (#1425) +- Remove `--basic-acl` flag from `frostfs-cli container create` (#1483) + +### Upgrading from v0.43.0 +The metabase schema has changed completely, resync is required. + ## [v0.42.0] ### Added diff --git a/VERSION b/VERSION index 01efe7f3a..9052dab96 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v0.42.0 +v0.44.0 From caa4253249a5713d201d262939343a28df70a620 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 22 Nov 2024 09:59:43 +0300 Subject: [PATCH 216/591] [#1522] adm: Remove unnecessary variable declaration It is better to have small scope. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go index 6780e6dd3..0d28383a2 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go @@ -57,12 +57,11 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) inv := invoker.New(c, nil) - var ch util.Uint160 r := management.NewReader(inv) nnsCs, err := helper.GetContractByID(r, 1) commonCmd.ExitOnErr(cmd, "can't get NNS contract state: %w", err) - ch, err = helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract)) + ch, err := helper.NNSResolveHash(inv, nnsCs.Hash, helper.DomainOf(constants.PolicyContract)) commonCmd.ExitOnErr(cmd, "unable to resolve policy contract hash: %w", err) invokerAdapter := &invokerAdapter{ From b1766e47c766ed2591e8f8a6b97dbfc13ad6976c Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 22 Nov 2024 10:12:54 +0300 Subject: [PATCH 217/591] [#1522] adm/helper: Remove unused GetCommittee() method from the Client interface Signed-off-by: Evgenii Stratonikov --- .../internal/modules/morph/helper/local_client.go | 5 ----- cmd/frostfs-adm/internal/modules/morph/helper/n3client.go | 2 -- 2 files changed, 7 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go index ed028fb7c..17bee7858 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go @@ -128,11 +128,6 @@ func (l *LocalClient) GetApplicationLog(h util.Uint256, t *trigger.Type) (*resul return &a, nil } -func (l *LocalClient) GetCommittee() (keys.PublicKeys, error) { - // not used by `morph init` command - panic("unexpected call") -} - // InvokeFunction is implemented via `InvokeScript`. func (l *LocalClient) InvokeFunction(h util.Uint160, method string, sPrm []smartcontract.Parameter, ss []transaction.Signer) (*result.Invoke, error) { var err error diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go index e62a21b3f..5563b1fd9 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/neorpc/result" "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" @@ -32,7 +31,6 @@ type Client interface { GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error) GetVersion() (*result.Version, error) SendRawTransaction(*transaction.Transaction) (util.Uint256, error) - GetCommittee() (keys.PublicKeys, error) CalculateNetworkFee(tx *transaction.Transaction) (int64, error) } From 16053916286fd9cc3e7badc068514cb4d267c03f Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 22 Nov 2024 10:14:00 +0300 Subject: [PATCH 218/591] [#1522] adm/helper: Simplify Client interface Just reuse `actor.RPCActor`. No functional changes. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/helper/n3client.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go index 5563b1fd9..066ce01fc 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go @@ -24,14 +24,10 @@ import ( // Client represents N3 client interface capable of test-invoking scripts // and sending signed transactions to chain. type Client interface { - invoker.RPCInvoke + actor.RPCActor - GetBlockCount() (uint32, error) GetNativeContracts() ([]state.Contract, error) GetApplicationLog(util.Uint256, *trigger.Type) (*result.ApplicationLog, error) - GetVersion() (*result.Version, error) - SendRawTransaction(*transaction.Transaction) (util.Uint256, error) - CalculateNetworkFee(tx *transaction.Transaction) (int64, error) } type HashVUBPair struct { From b10c9543772b9ac4b00abf06a28b9da62f0ef022 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 22 Nov 2024 10:22:54 +0300 Subject: [PATCH 219/591] [#1522] adm: Split NewLocalClient() into functions No functional changes. Signed-off-by: Evgenii Stratonikov --- .../modules/morph/helper/local_client.go | 87 +++++++++++-------- 1 file changed, 52 insertions(+), 35 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go index 17bee7858..34ce5938a 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go @@ -58,17 +58,59 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet return nil, err } - m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ProtocolConfiguration.ValidatorsCount)) - accounts := make([]*wallet.Account, len(wallets)) - for i := range accounts { - accounts[i], err = GetWalletAccount(wallets[i], constants.ConsensusAccountName) - if err != nil { - return nil, err + go bc.Run() + + accounts, err := getBlockSigningAccounts(cfg.ProtocolConfiguration, wallets) + if err != nil { + return nil, err + } + + if cmd.Name() != "init" { + if err := restoreDump(bc, dumpPath); err != nil { + return nil, fmt.Errorf("restore dump: %w", err) } } + return &LocalClient{ + bc: bc, + dumpPath: dumpPath, + accounts: accounts, + }, nil +} + +func restoreDump(bc *core.Blockchain, dumpPath string) error { + f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600) + if err != nil { + return fmt.Errorf("can't open local dump: %w", err) + } + defer f.Close() + + r := io.NewBinReaderFromIO(f) + + var skip uint32 + if bc.BlockHeight() != 0 { + skip = bc.BlockHeight() + 1 + } + + count := r.ReadU32LE() - skip + if err := chaindump.Restore(bc, r, skip, count, nil); err != nil { + return err + } + return nil +} + +func getBlockSigningAccounts(cfg config.ProtocolConfiguration, wallets []*wallet.Wallet) ([]*wallet.Account, error) { + accounts := make([]*wallet.Account, len(wallets)) + for i := range accounts { + acc, err := GetWalletAccount(wallets[i], constants.ConsensusAccountName) + if err != nil { + return nil, err + } + accounts[i] = acc + } + indexMap := make(map[string]int) - for i, pub := range cfg.ProtocolConfiguration.StandbyCommittee { + for i, pub := range cfg.StandbyCommittee { indexMap[pub] = i } @@ -77,37 +119,12 @@ func NewLocalClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet pj := accounts[j].PrivateKey().PublicKey().Bytes() return indexMap[string(pi)] < indexMap[string(pj)] }) - sort.Slice(accounts[:cfg.ProtocolConfiguration.ValidatorsCount], func(i, j int) bool { + sort.Slice(accounts[:cfg.ValidatorsCount], func(i, j int) bool { return accounts[i].PublicKey().Cmp(accounts[j].PublicKey()) == -1 }) - go bc.Run() - - if cmd.Name() != "init" { - f, err := os.OpenFile(dumpPath, os.O_RDONLY, 0o600) - if err != nil { - return nil, fmt.Errorf("can't open local dump: %w", err) - } - defer f.Close() - - r := io.NewBinReaderFromIO(f) - - var skip uint32 - if bc.BlockHeight() != 0 { - skip = bc.BlockHeight() + 1 - } - - count := r.ReadU32LE() - skip - if err := chaindump.Restore(bc, r, skip, count, nil); err != nil { - return nil, fmt.Errorf("can't restore local dump: %w", err) - } - } - - return &LocalClient{ - bc: bc, - dumpPath: dumpPath, - accounts: accounts[:m], - }, nil + m := smartcontract.GetDefaultHonestNodeCount(int(cfg.ValidatorsCount)) + return accounts[:m], nil } func (l *LocalClient) GetBlockCount() (uint32, error) { From 61ee1b56103bd77ea3318f23dceebb1928b846a3 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 22 Nov 2024 10:33:24 +0300 Subject: [PATCH 220/591] [#1522] adm: Simplify LocalClient.SendRawTransaction() The old code was there before Copy() method was introduced. It was also supposed to check errors, however, they are already checked server-side. Signed-off-by: Evgenii Stratonikov --- .../internal/modules/morph/helper/local_client.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go index 34ce5938a..d0a05d5c7 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go @@ -308,13 +308,7 @@ func (l *LocalClient) InvokeScript(script []byte, signers []transaction.Signer) } func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint256, error) { - // We need to test that transaction was formed correctly to catch as many errors as we can. - bs := tx.Bytes() - _, err := transaction.NewTransactionFromBytes(bs) - if err != nil { - return tx.Hash(), fmt.Errorf("invalid transaction: %w", err) - } - + tx = tx.Copy() l.transactions = append(l.transactions, tx) return tx.Hash(), nil } From 49959c4166d92cd46df04d352c7928f92fbf1aa9 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 22 Nov 2024 10:45:47 +0300 Subject: [PATCH 221/591] [#1522] adm/helper: Unexport GetFrostfsIDAdmin() It is used in `helper` package only, besides unit-tests. Move unit-tests to the same package, where they belong. Signed-off-by: Evgenii Stratonikov --- .../morph/frostfsid/frostfsid_util_test.go | 47 ---------------- .../internal/modules/morph/helper/contract.go | 2 +- .../modules/morph/helper/frostfsid.go | 2 +- .../modules/morph/helper/frostfsid_test.go | 53 +++++++++++++++++++ 4 files changed, 55 insertions(+), 49 deletions(-) create mode 100644 cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go index cce859d2f..1d0bc8441 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid_util_test.go @@ -1,59 +1,12 @@ package frostfsid import ( - "encoding/hex" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/ape" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/spf13/viper" "github.com/stretchr/testify/require" ) -func TestFrostfsIDConfig(t *testing.T) { - pks := make([]*keys.PrivateKey, 4) - for i := range pks { - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - pks[i] = pk - } - - fmts := []string{ - pks[0].GetScriptHash().StringLE(), - address.Uint160ToString(pks[1].GetScriptHash()), - hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()), - hex.EncodeToString(pks[3].PublicKey().Bytes()), - } - - for i := range fmts { - v := viper.New() - v.Set("frostfsid.admin", fmts[i]) - - actual, found, err := helper.GetFrostfsIDAdmin(v) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, pks[i].GetScriptHash(), actual) - } - - t.Run("bad key", func(t *testing.T) { - v := viper.New() - v.Set("frostfsid.admin", "abc") - - _, found, err := helper.GetFrostfsIDAdmin(v) - require.Error(t, err) - require.True(t, found) - }) - t.Run("missing key", func(t *testing.T) { - v := viper.New() - - _, found, err := helper.GetFrostfsIDAdmin(v) - require.NoError(t, err) - require.False(t, found) - }) -} - func TestNamespaceRegexp(t *testing.T) { for _, tc := range []struct { name string diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go index eea3b040e..64d1c6393 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go @@ -82,7 +82,7 @@ func GetContractDeployData(c *InitializeContext, ctrName string, keysParam []any h, found, err = getFrostfsIDAdminFromContract(c.ReadOnlyInvoker) } if method != constants.UpdateMethodName || err == nil && !found { - h, found, err = GetFrostfsIDAdmin(viper.GetViper()) + h, found, err = getFrostfsIDAdmin(viper.GetViper()) } if err != nil { return nil, err diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go index f29042b82..fce2dfb74 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid.go @@ -11,7 +11,7 @@ import ( const frostfsIDAdminConfigKey = "frostfsid.admin" -func GetFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) { +func getFrostfsIDAdmin(v *viper.Viper) (util.Uint160, bool, error) { admin := v.GetString(frostfsIDAdminConfigKey) if admin == "" { return util.Uint160{}, false, nil diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go new file mode 100644 index 000000000..38991e962 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/morph/helper/frostfsid_test.go @@ -0,0 +1,53 @@ +package helper + +import ( + "encoding/hex" + "testing" + + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/encoding/address" + "github.com/spf13/viper" + "github.com/stretchr/testify/require" +) + +func TestFrostfsIDConfig(t *testing.T) { + pks := make([]*keys.PrivateKey, 4) + for i := range pks { + pk, err := keys.NewPrivateKey() + require.NoError(t, err) + pks[i] = pk + } + + fmts := []string{ + pks[0].GetScriptHash().StringLE(), + address.Uint160ToString(pks[1].GetScriptHash()), + hex.EncodeToString(pks[2].PublicKey().UncompressedBytes()), + hex.EncodeToString(pks[3].PublicKey().Bytes()), + } + + for i := range fmts { + v := viper.New() + v.Set("frostfsid.admin", fmts[i]) + + actual, found, err := getFrostfsIDAdmin(v) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, pks[i].GetScriptHash(), actual) + } + + t.Run("bad key", func(t *testing.T) { + v := viper.New() + v.Set("frostfsid.admin", "abc") + + _, found, err := getFrostfsIDAdmin(v) + require.Error(t, err) + require.True(t, found) + }) + t.Run("missing key", func(t *testing.T) { + v := viper.New() + + _, found, err := getFrostfsIDAdmin(v) + require.NoError(t, err) + require.False(t, found) + }) +} From a6ef4ab52437802432d0ce022d42ad7fbafd844b Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 22 Nov 2024 10:53:09 +0300 Subject: [PATCH 222/591] [#1522] adm/helper: Rename GetN3Client() -> NewRemoteClient() Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go | 4 ++-- cmd/frostfs-adm/internal/modules/morph/balance/balance.go | 2 +- cmd/frostfs-adm/internal/modules/morph/config/config.go | 2 +- cmd/frostfs-adm/internal/modules/morph/container/container.go | 4 ++-- .../internal/modules/morph/contract/dump_hashes.go | 2 +- cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go | 2 +- .../internal/modules/morph/helper/initialize_ctx.go | 2 +- cmd/frostfs-adm/internal/modules/morph/helper/n3client.go | 2 +- .../internal/modules/morph/netmap/netmap_candidates.go | 2 +- cmd/frostfs-adm/internal/modules/morph/nns/helper.go | 4 ++-- cmd/frostfs-adm/internal/modules/morph/notary/notary.go | 2 +- cmd/frostfs-adm/internal/modules/morph/policy/policy.go | 2 +- 12 files changed, 15 insertions(+), 15 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go index 0d28383a2..914682647 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go @@ -53,7 +53,7 @@ func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke { } func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorageReader, *invoker.Invoker) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) inv := invoker.New(c, nil) @@ -73,7 +73,7 @@ func newPolicyContractReaderInterface(cmd *cobra.Command) (*morph.ContractStorag } func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *helper.LocalActor) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName) diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go index 5519705d4..be42f2aa5 100644 --- a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go +++ b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go @@ -51,7 +51,7 @@ func dumpBalances(cmd *cobra.Command, _ []string) error { nmHash util.Uint160 ) - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return err } diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config/config.go index 3a7f84acb..65ccc9f9f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/config/config.go +++ b/cmd/frostfs-adm/internal/modules/morph/config/config.go @@ -26,7 +26,7 @@ import ( const forceConfigSet = "force" func dumpNetworkConfig(cmd *cobra.Command, _ []string) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return fmt.Errorf("can't create N3 client: %w", err) } diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go index 6f08d1655..e72dc15e9 100644 --- a/cmd/frostfs-adm/internal/modules/morph/container/container.go +++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go @@ -76,7 +76,7 @@ func dumpContainers(cmd *cobra.Command, _ []string) error { return fmt.Errorf("invalid filename: %w", err) } - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return fmt.Errorf("can't create N3 client: %w", err) } @@ -157,7 +157,7 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo } func listContainers(cmd *cobra.Command, _ []string) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return fmt.Errorf("can't create N3 client: %w", err) } diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go index be2134b77..437e2480d 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go @@ -36,7 +36,7 @@ type contractDumpInfo struct { } func dumpContractHashes(cmd *cobra.Command, _ []string) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return fmt.Errorf("can't create N3 client: %w", err) } diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go index 74da52a8f..c3a232710 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go @@ -514,7 +514,7 @@ func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, se } func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Uint160) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err) inv := invoker.New(c, nil) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go index b5b6adf05..335ec5ac9 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go @@ -191,7 +191,7 @@ func createClient(cmd *cobra.Command, v *viper.Viper, wallets []*wallet.Wallet) } c, err = NewLocalClient(cmd, v, wallets, ldf.Value.String()) } else { - c, err = GetN3Client(v) + c, err = NewRemoteClient(v) } if err != nil { return nil, fmt.Errorf("can't create N3 client: %w", err) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go index 066ce01fc..25160305d 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go @@ -42,7 +42,7 @@ type ClientContext struct { SentTxs []HashVUBPair } -func GetN3Client(v *viper.Viper) (Client, error) { +func NewRemoteClient(v *viper.Viper) (Client, error) { // number of opened connections // by neo-go client per one host const ( diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go b/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go index d8471bb9a..a689e0ec1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go +++ b/cmd/frostfs-adm/internal/modules/morph/netmap/netmap_candidates.go @@ -13,7 +13,7 @@ import ( ) func listNetmapCandidatesNodes(cmd *cobra.Command, _ []string) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "can't create N3 client: %w", err) inv := invoker.New(c, nil) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go index 29b0a24ae..b13cbc8a1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go @@ -13,7 +13,7 @@ import ( func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { v := viper.GetViper() - c, err := helper.GetN3Client(v) + c, err := helper.NewRemoteClient(v) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName) @@ -26,7 +26,7 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { } func nnsReader(cmd *cobra.Command) (*client.ContractReader, *invoker.Invoker) { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) inv := invoker.New(c, nil) diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go index 9b213da4e..fd42d5a4a 100644 --- a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go +++ b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go @@ -89,7 +89,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error { } func transferGas(cmd *cobra.Command, acc *wallet.Account, accHash util.Uint160, gasAmount fixedn.Fixed8, till int64) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) if err != nil { return err } diff --git a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go index 36547e22c..686a244f0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go +++ b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go @@ -62,7 +62,7 @@ func SetPolicyCmd(cmd *cobra.Command, args []string) error { } func dumpPolicyCmd(cmd *cobra.Command, _ []string) error { - c, err := helper.GetN3Client(viper.GetViper()) + c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "can't create N3 client:", err) inv := invoker.New(c, nil) From 2469e0c6831604b70e11b4418b42ddf8f1fc2593 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 22 Nov 2024 10:55:51 +0300 Subject: [PATCH 223/591] [#1522] adm/helper: Remove NewActor() helper It is used once, it is used only internally and it is single-statement. I see no justification in having it as a separate function. It introduces confusion, because we also have NewLocalActor(). Signed-off-by: Evgenii Stratonikov --- .../internal/modules/morph/helper/n3client.go | 8 +++++++- .../internal/modules/morph/helper/util.go | 12 ------------ 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go index 25160305d..03009ebe3 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go @@ -83,7 +83,13 @@ func NewRemoteClient(v *viper.Viper) (Client, error) { } func DefaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) { - commAct, err := NewActor(c, committeeAcc) + commAct, err := actor.New(c, []actor.SignerAccount{{ + Signer: transaction.Signer{ + Account: committeeAcc.Contract.ScriptHash(), + Scopes: transaction.Global, + }, + Account: committeeAcc, + }}) if err != nil { return nil, err } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go index 8c6b90539..c26aa447b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/util.go @@ -15,10 +15,8 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "github.com/nspcc-dev/neo-go/pkg/core/state" - "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/viper" @@ -87,16 +85,6 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er return wallets, nil } -func NewActor(c actor.RPCActor, committeeAcc *wallet.Account) (*actor.Actor, error) { - return actor.New(c, []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: committeeAcc.Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: committeeAcc, - }}) -} - func ReadContract(ctrPath, ctrName string) (*ContractState, error) { rawNef, err := os.ReadFile(filepath.Join(ctrPath, ctrName+"_contract.nef")) if err != nil { From 9e275d44c8bb9c32134c4dabeb2757580da7ea75 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 22 Nov 2024 13:11:43 +0300 Subject: [PATCH 224/591] [#1522] adm/helper: Unexport DefaultClientContext() Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go | 2 +- cmd/frostfs-adm/internal/modules/morph/helper/n3client.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go index 335ec5ac9..08a87b999 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go @@ -139,7 +139,7 @@ func NewInitializeContext(cmd *cobra.Command, v *viper.Viper) (*InitializeContex return nil, err } - cliCtx, err := DefaultClientContext(c, committeeAcc) + cliCtx, err := defaultClientContext(c, committeeAcc) if err != nil { return nil, fmt.Errorf("client context: %w", err) } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go index 03009ebe3..3f3a66cb6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go @@ -82,7 +82,7 @@ func NewRemoteClient(v *viper.Viper) (Client, error) { return c, nil } -func DefaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) { +func defaultClientContext(c Client, committeeAcc *wallet.Account) (*ClientContext, error) { commAct, err := actor.New(c, []actor.SignerAccount{{ Signer: transaction.Signer{ Account: committeeAcc.Contract.ScriptHash(), From 11703707533e085d37cd7d77be41801da5def702 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 22 Nov 2024 13:14:02 +0300 Subject: [PATCH 225/591] [#1522] adm/helper: Rename createSingleAccounts() -> getSingleAccounts() It doesn't create any accounts, purely finds them in the wallet. Signed-off-by: Evgenii Stratonikov --- .../internal/modules/morph/helper/initialize_ctx.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go index 08a87b999..8e5615baa 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go @@ -134,7 +134,7 @@ func NewInitializeContext(cmd *cobra.Command, v *viper.Viper) (*InitializeContex return nil, err } - accounts, err := createWalletAccounts(wallets) + accounts, err := getSingleAccounts(wallets) if err != nil { return nil, err } @@ -211,7 +211,7 @@ func getContractsPath(cmd *cobra.Command, needContracts bool) (string, error) { return ctrPath, nil } -func createWalletAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) { +func getSingleAccounts(wallets []*wallet.Wallet) ([]*wallet.Account, error) { accounts := make([]*wallet.Account, len(wallets)) for i, w := range wallets { acc, err := GetWalletAccount(w, constants.SingleAccountName) From aac65001e524b789c6e8f1d55c70bca6f47293d1 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 22 Nov 2024 13:19:58 +0300 Subject: [PATCH 226/591] [#1522] adm/frostfsid: Remove unreachable condition SendConsensusTx() modifies SendTxs field, if it is not the case, there is a bug in code. Signed-off-by: Evgenii Stratonikov --- .../internal/modules/morph/frostfsid/frostfsid.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go index c3a232710..db98bb8ad 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go @@ -1,7 +1,6 @@ package frostfsid import ( - "errors" "fmt" "math/big" "sort" @@ -489,10 +488,6 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) { } f.bw.Reset() - if len(f.wCtx.SentTxs) == 0 { - return nil, errors.New("no transactions to wait") - } - f.wCtx.Command.Println("Waiting for transactions to persist...") return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil) } From 01acec708fa8f0c536ebc79e8f03ee2420ef3731 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 28 Nov 2024 09:15:29 +0300 Subject: [PATCH 227/591] [#1525] pilorama: Use AppendUint* helpers from stdlib gopatch: ``` @@ var slice, e expression @@ +import "encoding/binary" -append(slice, byte(e), byte(e >> 8)) +binary.LittleEndian.AppendUint16(slice, e) ``` Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/pilorama/boltdb.go | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index c62d728b1..6e68e9986 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -1240,7 +1240,7 @@ func (t *boltForest) fillSortedChildren(b *bbolt.Bucket, nodeIDs MultiNode, h *f nodes = nil length = actualLength + 1 count = 0 - c.Seek(append(prefix, byte(length), byte(length>>8))) + c.Seek(binary.LittleEndian.AppendUint16(prefix, length)) c.Prev() // c.Next() will be performed by for loop } } @@ -1664,7 +1664,7 @@ func internalKeyPrefix(key []byte, k string) []byte { key = append(key, 'i') l := len(k) - key = append(key, byte(l), byte(l>>8)) + key = binary.LittleEndian.AppendUint16(key, uint16(l)) key = append(key, k...) return key } @@ -1679,14 +1679,10 @@ func internalKey(key []byte, k, v string, parent, node Node) []byte { key = internalKeyPrefix(key, k) l := len(v) - key = append(key, byte(l), byte(l>>8)) + key = binary.LittleEndian.AppendUint16(key, uint16(l)) key = append(key, v...) - var raw [8]byte - binary.LittleEndian.PutUint64(raw[:], parent) - key = append(key, raw[:]...) - - binary.LittleEndian.PutUint64(raw[:], node) - key = append(key, raw[:]...) + key = binary.LittleEndian.AppendUint64(key, parent) + key = binary.LittleEndian.AppendUint64(key, node) return key } From bba1892fa13d0c8aa904dc6b6e992a6407f57b64 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Wed, 27 Nov 2024 15:48:16 +0300 Subject: [PATCH 228/591] [#1524] ape: Make APE checker return error without status Signed-off-by: Airat Arifullin --- pkg/services/common/ape/checker.go | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go index eb4fd03c7..30580da12 100644 --- a/pkg/services/common/ape/checker.go +++ b/pkg/services/common/ape/checker.go @@ -11,7 +11,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" @@ -104,14 +103,7 @@ func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error { if found && status == apechain.Allow { return nil } - err = fmt.Errorf("access to operation %s is denied by access policy engine: %s", prm.Request.Operation(), status.String()) - return apeErr(err) -} - -func apeErr(err error) error { - errAccessDenied := &apistatus.ObjectAccessDenied{} - errAccessDenied.WriteReason(err.Error()) - return errAccessDenied + return fmt.Errorf("access to operation %s is denied by access policy engine: %s", prm.Request.Operation(), status.String()) } // isValidBearer checks whether bearer token was correctly signed by authorized From 00c608c05e3c6574b5258f9c63b5ed8bfca264a7 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Wed, 27 Nov 2024 15:52:23 +0300 Subject: [PATCH 229/591] [#1524] tree: Make check APE error get wrapped to api status Signed-off-by: Airat Arifullin --- pkg/services/tree/signature.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index 4fd4a7e1e..80f5b3590 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -11,6 +11,7 @@ import ( core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" @@ -62,7 +63,16 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, return fmt.Errorf("can't get request role: %w", err) } - return s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey) + if err = s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey); err != nil { + return apeErr(err) + } + return nil +} + +func apeErr(err error) error { + errAccessDenied := &apistatus.ObjectAccessDenied{} + errAccessDenied.WriteReason(err.Error()) + return errAccessDenied } // Returns true iff the operation is read-only and request was signed From e0ac3a583f9207684d0cbfa12ecd59f5c4b83642 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Thu, 28 Nov 2024 10:41:59 +0300 Subject: [PATCH 230/591] [#1523] metabase: Remove `(*DB).IterateCoveredByTombstones` Remove this method because it isn't used anywhere since 7799f8e4c. Signed-off-by: Aleksey Savchuk --- .../metabase/iterators.go | 65 ------------------- .../metabase/iterators_test.go | 63 ------------------ 2 files changed, 128 deletions(-) diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go index 5d42e4125..0d438e102 100644 --- a/pkg/local_object_storage/metabase/iterators.go +++ b/pkg/local_object_storage/metabase/iterators.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "errors" - "fmt" "strconv" "time" @@ -111,70 +110,6 @@ func (db *DB) iterateExpired(tx *bbolt.Tx, epoch uint64, h ExpiredObjectHandler) return nil } -// IterateCoveredByTombstones iterates over all objects in DB which are covered -// by tombstone with string address from tss. Locked objects are not included -// (do not confuse with objects of type LOCK). -// -// If h returns ErrInterruptIterator, nil returns immediately. -// Returns other errors of h directly. -// -// Does not modify tss. -func (db *DB) IterateCoveredByTombstones(ctx context.Context, tss map[string]oid.Address, h func(oid.Address) error) error { - var ( - startedAt = time.Now() - success = false - ) - defer func() { - db.metrics.AddMethodDuration("IterateCoveredByTombstones", time.Since(startedAt), success) - }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateCoveredByTombstones") - defer span.End() - - db.modeMtx.RLock() - defer db.modeMtx.RUnlock() - - if db.mode.NoMetabase() { - return ErrDegradedMode - } - - return db.boltDB.View(func(tx *bbolt.Tx) error { - return db.iterateCoveredByTombstones(tx, tss, h) - }) -} - -func (db *DB) iterateCoveredByTombstones(tx *bbolt.Tx, tss map[string]oid.Address, h func(oid.Address) error) error { - bktGraveyard := tx.Bucket(graveyardBucketName) - - err := bktGraveyard.ForEach(func(k, v []byte) error { - var addr oid.Address - if err := decodeAddressFromKey(&addr, v); err != nil { - return err - } - if _, ok := tss[addr.EncodeToString()]; ok { - var addr oid.Address - - err := decodeAddressFromKey(&addr, k) - if err != nil { - return fmt.Errorf("could not parse address of the object under tombstone: %w", err) - } - - if objectLocked(tx, addr.Container(), addr.Object()) { - return nil - } - - return h(addr) - } - - return nil - }) - - if errors.Is(err, ErrInterruptIterator) { - err = nil - } - - return err -} - func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) error) error { var cid cid.ID var oid oid.ID diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go index 7eed32c55..4c9579965 100644 --- a/pkg/local_object_storage/metabase/iterators_test.go +++ b/pkg/local_object_storage/metabase/iterators_test.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" @@ -67,65 +66,3 @@ func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt return object2.AddressOf(obj) } - -func TestDB_IterateCoveredByTombstones(t *testing.T) { - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - - cnr := cidtest.ID() - ts := oidtest.Address() - protected1 := oidtest.Address() - protected2 := oidtest.Address() - protectedLocked := oidtest.Address() - garbage := oidtest.Address() - ts.SetContainer(cnr) - protected1.SetContainer(cnr) - protected2.SetContainer(cnr) - protectedLocked.SetContainer(cnr) - - var prm meta.InhumePrm - var err error - - prm.SetAddresses(protected1, protected2, protectedLocked) - prm.SetTombstoneAddress(ts) - - _, err = db.Inhume(context.Background(), prm) - require.NoError(t, err) - - prm.SetAddresses(garbage) - prm.SetGCMark() - - _, err = db.Inhume(context.Background(), prm) - require.NoError(t, err) - - var handled []oid.Address - - tss := map[string]oid.Address{ - ts.EncodeToString(): ts, - } - - err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error { - handled = append(handled, addr) - return nil - }) - require.NoError(t, err) - - require.Len(t, handled, 3) - require.Contains(t, handled, protected1) - require.Contains(t, handled, protected2) - require.Contains(t, handled, protectedLocked) - - err = db.Lock(context.Background(), protectedLocked.Container(), oidtest.ID(), []oid.ID{protectedLocked.Object()}) - require.NoError(t, err) - - handled = handled[:0] - - err = db.IterateCoveredByTombstones(context.Background(), tss, func(addr oid.Address) error { - handled = append(handled, addr) - return nil - }) - require.NoError(t, err) - - require.Len(t, handled, 2) - require.NotContains(t, handled, protectedLocked) -} From edfa3f4825ecefd40f9dbda0dfa0356c062a4fa7 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Mon, 2 Dec 2024 13:31:32 +0300 Subject: [PATCH 231/591] [#1528] node: Keep order for equal elements when sort priority metrics Signed-off-by: Anton Nikiforov --- pkg/services/object_manager/placement/traverser.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go index 7c720b204..6a949e938 100644 --- a/pkg/services/object_manager/placement/traverser.go +++ b/pkg/services/object_manager/placement/traverser.go @@ -202,7 +202,7 @@ func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) ([]netmap.NodeInfo, metrics: m, } } - slices.SortFunc(nm, func(a, b nodeMetrics) int { + slices.SortStableFunc(nm, func(a, b nodeMetrics) int { return slices.Compare(a.metrics, b.metrics) }) sortedVector := make([]netmap.NodeInfo, len(unsortedVector)) From 635a292ae49a9e6bfdb82575cd8c1421c03c4e37 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Mon, 2 Dec 2024 13:35:59 +0300 Subject: [PATCH 232/591] [#1528] cli: Keep order for required nodes in the result of `object nodes` Signed-off-by: Anton Nikiforov --- cmd/frostfs-cli/modules/object/nodes.go | 32 ------------------------- 1 file changed, 32 deletions(-) diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index e6918dfc9..31682c0e1 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -1,15 +1,12 @@ package object import ( - "bytes" - "cmp" "context" "crypto/ecdsa" "encoding/hex" "encoding/json" "errors" "fmt" - "slices" "sync" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" @@ -507,7 +504,6 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, } func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { - normilizeObjectNodesResult(objects, result) if json, _ := cmd.Flags().GetBool(commonflags.JSON); json { printObjectNodesAsJSON(cmd, objID, objects, result) } else { @@ -515,34 +511,6 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul } } -func normilizeObjectNodesResult(objects []phyObject, result *objectNodesResult) { - slices.SortFunc(objects, func(lhs, rhs phyObject) int { - if lhs.ecHeader == nil && rhs.ecHeader == nil { - return bytes.Compare(lhs.objectID[:], rhs.objectID[:]) - } - if lhs.ecHeader == nil { - return -1 - } - if rhs.ecHeader == nil { - return 1 - } - if lhs.ecHeader.parent == rhs.ecHeader.parent { - return cmp.Compare(lhs.ecHeader.index, rhs.ecHeader.index) - } - return bytes.Compare(lhs.ecHeader.parent[:], rhs.ecHeader.parent[:]) - }) - for _, obj := range objects { - op := result.placements[obj.objectID] - slices.SortFunc(op.confirmedNodes, func(lhs, rhs netmapSDK.NodeInfo) int { - return bytes.Compare(lhs.PublicKey(), rhs.PublicKey()) - }) - slices.SortFunc(op.requiredNodes, func(lhs, rhs netmapSDK.NodeInfo) int { - return bytes.Compare(lhs.PublicKey(), rhs.PublicKey()) - }) - result.placements[obj.objectID] = op - } -} - func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects)) From 60feed3b5f71f58e8694873721cb1c115bcf6bfd Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Mon, 2 Dec 2024 15:37:25 +0300 Subject: [PATCH 233/591] [#1527] engine/test: Allow to specify current epoch in `epochState` Signed-off-by: Aleksey Savchuk --- pkg/local_object_storage/engine/engine_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index a7cb90bae..9a3eaadc0 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -17,10 +17,12 @@ import ( "github.com/stretchr/testify/require" ) -type epochState struct{} +type epochState struct { + currEpoch uint64 +} func (s epochState) CurrentEpoch() uint64 { - return 0 + return s.currEpoch } type testEngineWrapper struct { From 9cabca9dfe73999b4b68f19ef9422d1fafc0b23a Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Mon, 2 Dec 2024 16:12:12 +0300 Subject: [PATCH 234/591] [#1527] engine/test: Move default metabase options to separate function Signed-off-by: Aleksey Savchuk --- pkg/local_object_storage/engine/engine_test.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 9a3eaadc0..926ff43f3 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -89,12 +89,16 @@ func testGetDefaultShardOptions(t testing.TB) []shard.Option { blobstor.WithLogger(test.NewLogger(t)), ), shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))), - shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(t.TempDir(), "metabase")), - meta.WithPermissions(0o700), - meta.WithEpochState(epochState{}), - meta.WithLogger(test.NewLogger(t)), - ), + shard.WithMetaBaseOptions(testGetDefaultMetabaseOptions(t)...), + } +} + +func testGetDefaultMetabaseOptions(t testing.TB) []meta.Option { + return []meta.Option{ + meta.WithPath(filepath.Join(t.TempDir(), "metabase")), + meta.WithPermissions(0o700), + meta.WithEpochState(epochState{}), + meta.WithLogger(test.NewLogger(t)), } } From 432042c534ec3f8774b2d75e4236e3419f466482 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Mon, 2 Dec 2024 16:16:50 +0300 Subject: [PATCH 235/591] [#1527] engine: Add tests for handling expired objects on inhume and lock Currently, it's allowed to inhume or lock an expired object. Consider the following scenario: 1) An user inhumes or locks an object 2) The object expires 3) GC hasn't yet deleted the object 4) The node loses the associated tombstone or lock 5) Another node replicates tombstone or lock to the first node In this case, the second node succeeds, which is the desired behavior. Signed-off-by: Aleksey Savchuk --- .../engine/inhume_test.go | 53 +++++++++++++++++++ pkg/local_object_storage/engine/lock_test.go | 53 +++++++++++++++++++ 2 files changed, 106 insertions(+) diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 6980afb07..b89cf09a8 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -2,13 +2,17 @@ package engine import ( "context" + "strconv" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" + meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "github.com/stretchr/testify/require" ) @@ -84,3 +88,52 @@ func TestStorageEngine_Inhume(t *testing.T) { require.Empty(t, addrs) }) } + +func TestInhumeExpiredRegularObject(t *testing.T) { + t.Parallel() + + const currEpoch = 42 + const objectExpiresAfter = currEpoch - 1 + + engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option { + return []shard.Option{ + shard.WithDisabledGC(), + shard.WithMetaBaseOptions(append( + testGetDefaultMetabaseOptions(t), + meta.WithEpochState(epochState{currEpoch}), + )...), + } + }).prepare(t).engine + + cnr := cidtest.ID() + + generateAndPutObject := func() *objectSDK.Object { + obj := testutil.GenerateObjectWithCID(cnr) + testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter)) + + var putPrm PutPrm + putPrm.Object = obj + require.NoError(t, engine.Put(context.Background(), putPrm)) + return obj + } + + t.Run("inhume with tombstone", func(t *testing.T) { + obj := generateAndPutObject() + ts := oidtest.Address() + ts.SetContainer(cnr) + + var prm InhumePrm + prm.WithTarget(ts, object.AddressOf(obj)) + _, err := engine.Inhume(context.Background(), prm) + require.NoError(t, err) + }) + + t.Run("inhume without tombstone", func(t *testing.T) { + obj := generateAndPutObject() + + var prm InhumePrm + prm.MarkAsGarbage(object.AddressOf(obj)) + _, err := engine.Inhume(context.Background(), prm) + require.NoError(t, err) + }) +} diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go index feca9cb69..7bb9e3934 100644 --- a/pkg/local_object_storage/engine/lock_test.go +++ b/pkg/local_object_storage/engine/lock_test.go @@ -292,3 +292,56 @@ func TestLockForceRemoval(t *testing.T) { _, err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) } + +func TestLockExpiredRegularObject(t *testing.T) { + const currEpoch = 42 + const objectExpiresAfter = currEpoch - 1 + + engine := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option { + return []shard.Option{ + shard.WithDisabledGC(), + shard.WithMetaBaseOptions(append( + testGetDefaultMetabaseOptions(t), + meta.WithEpochState(epochState{currEpoch}), + )...), + } + }).prepare(t).engine + + cnr := cidtest.ID() + + object := testutil.GenerateObjectWithCID(cnr) + testutil.AddAttribute(object, objectV2.SysAttributeExpEpoch, strconv.Itoa(objectExpiresAfter)) + + address := objectcore.AddressOf(object) + + var putPrm PutPrm + putPrm.Object = object + require.NoError(t, engine.Put(context.Background(), putPrm)) + + var getPrm GetPrm + var errNotFound *apistatus.ObjectNotFound + + getPrm.WithAddress(address) + _, err := engine.Get(context.Background(), getPrm) + require.ErrorAs(t, err, &errNotFound) + + t.Run("lock expired regular object", func(t *testing.T) { + engine.Lock(context.Background(), + address.Container(), + oidtest.ID(), + []oid.ID{address.Object()}, + ) + + res, err := engine.IsLocked(context.Background(), objectcore.AddressOf(object)) + require.NoError(t, err) + require.True(t, res) + }) + + t.Run("get expired and locked regular object", func(t *testing.T) { + getPrm.WithAddress(objectcore.AddressOf(object)) + + res, err := engine.Get(context.Background(), getPrm) + require.NoError(t, err) + require.Equal(t, res.Object(), object) + }) +} From 47dfd8840c14fc47ebc621aea1980d8fe002fce6 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 3 Dec 2024 15:34:26 +0300 Subject: [PATCH 236/591] [#1532] node: Allow to omit metabase.path if shard is disabled Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/config/engine/config.go | 8 ++++---- cmd/frostfs-node/config/engine/config_test.go | 16 ++++++++++++++++ .../config/engine/testdata/shards.env | 3 +++ .../config/engine/testdata/shards.json | 13 +++++++++++++ .../config/engine/testdata/shards.yaml | 7 +++++++ 5 files changed, 43 insertions(+), 4 deletions(-) create mode 100644 cmd/frostfs-node/config/engine/testdata/shards.env create mode 100644 cmd/frostfs-node/config/engine/testdata/shards.json create mode 100644 cmd/frostfs-node/config/engine/testdata/shards.yaml diff --git a/cmd/frostfs-node/config/engine/config.go b/cmd/frostfs-node/config/engine/config.go index c944d1c58..e5735e88b 100644 --- a/cmd/frostfs-node/config/engine/config.go +++ b/cmd/frostfs-node/config/engine/config.go @@ -41,6 +41,10 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config) c.Sub(si), ) + if sc.Mode() == mode.Disabled { + continue + } + // Path for the blobstor can't be present in the default section, because different shards // must have different paths, so if it is missing, the shard is not here. // At the same time checking for "blobstor" section doesn't work proper @@ -50,10 +54,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config) } (*config.Config)(sc).SetDefault(def) - if sc.Mode() == mode.Disabled { - continue - } - if err := f(sc); err != nil { return err } diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go index 19ad0e7ac..ef6380a62 100644 --- a/cmd/frostfs-node/config/engine/config_test.go +++ b/cmd/frostfs-node/config/engine/config_test.go @@ -18,6 +18,22 @@ import ( "github.com/stretchr/testify/require" ) +func TestIterateShards(t *testing.T) { + fileConfigTest := func(c *config.Config) { + var res []string + require.NoError(t, + engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { + res = append(res, sc.Metabase().Path()) + return nil + })) + require.Equal(t, []string{"abc", "xyz"}, res) + } + + const cfgDir = "./testdata/shards" + configtest.ForEachFileType(cfgDir, fileConfigTest) + configtest.ForEnvFileType(t, cfgDir, fileConfigTest) +} + func TestEngineSection(t *testing.T) { t.Run("defaults", func(t *testing.T) { empty := configtest.EmptyConfig() diff --git a/cmd/frostfs-node/config/engine/testdata/shards.env b/cmd/frostfs-node/config/engine/testdata/shards.env new file mode 100644 index 000000000..079789b0f --- /dev/null +++ b/cmd/frostfs-node/config/engine/testdata/shards.env @@ -0,0 +1,3 @@ +FROSTFS_STORAGE_SHARD_0_METABASE_PATH=abc +FROSTFS_STORAGE_SHARD_1_MODE=disabled +FROSTFS_STORAGE_SHARD_2_METABASE_PATH=xyz diff --git a/cmd/frostfs-node/config/engine/testdata/shards.json b/cmd/frostfs-node/config/engine/testdata/shards.json new file mode 100644 index 000000000..b3d6abe85 --- /dev/null +++ b/cmd/frostfs-node/config/engine/testdata/shards.json @@ -0,0 +1,13 @@ +{ + "storage.shard": { + "0": { + "metabase.path": "abc" + }, + "1": { + "mode": "disabled" + }, + "2": { + "metabase.path": "xyz" + } + } +} diff --git a/cmd/frostfs-node/config/engine/testdata/shards.yaml b/cmd/frostfs-node/config/engine/testdata/shards.yaml new file mode 100644 index 000000000..bbbba3af8 --- /dev/null +++ b/cmd/frostfs-node/config/engine/testdata/shards.yaml @@ -0,0 +1,7 @@ +storage.shard: + 0: + metabase.path: abc + 1: + mode: disabled + 2: + metabase.path: xyz From 748edd19993e6cc70329f5eeb782f850e2cef87b Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Mon, 2 Dec 2024 15:15:50 +0300 Subject: [PATCH 237/591] [#1450] engine: Return shard-level error if object is expired on inhume Since we have errors defined on the shard-level, it looks strage that we check an error againt the shard-level error `ErrLockObjectRemoval`, but then return the metabase-level error. Let's return the same shard-level error instead. Since we have errors defined on the shard-level Signed-off-by: Aleksey Savchuk --- pkg/local_object_storage/engine/inhume.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index e89a8d048..74e5b5660 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -169,7 +169,7 @@ func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm sh retErr = new(apistatus.ObjectLocked) return true case errors.Is(err, shard.ErrLockObjectRemoval): - retErr = meta.ErrLockObjectRemoval + retErr = shard.ErrLockObjectRemoval return true case errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, shard.ErrDegradedMode): retErr = err From b348b20289e6bc08fc923a8882d7dc24409ee0d6 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Wed, 4 Dec 2024 10:08:34 +0300 Subject: [PATCH 238/591] [#1450] engine: Add benchmark for `Inhume` operation Signed-off-by: Aleksey Savchuk --- .../engine/inhume_test.go | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index b89cf09a8..9d7196d94 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -2,6 +2,7 @@ package engine import ( "context" + "fmt" "strconv" "testing" @@ -12,8 +13,11 @@ import ( objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test" "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" ) func TestStorageEngine_Inhume(t *testing.T) { @@ -137,3 +141,59 @@ func TestInhumeExpiredRegularObject(t *testing.T) { require.NoError(t, err) }) } + +func BenchmarkInhumeMultipart(b *testing.B) { + // The benchmark result insignificantly depends on the number of shards, + // so do not use it as a benchmark parameter, just set it big enough. + numShards := 100 + + for numObjects := 1; numObjects <= 10000; numObjects *= 10 { + b.Run( + fmt.Sprintf("objects=%d", numObjects), + func(b *testing.B) { + benchmarkInhumeMultipart(b, numShards, numObjects) + }, + ) + } +} + +func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { + b.StopTimer() + + engine := testNewEngine(b, WithShardPoolSize(uint32(numObjects))). + setShardsNum(b, numShards).prepare(b).engine + defer func() { require.NoError(b, engine.Close(context.Background())) }() + + cnt := cidtest.ID() + eg := errgroup.Group{} + + for range b.N { + addrs := make([]oid.Address, numObjects) + + for i := range numObjects { + prm := PutPrm{} + + prm.Object = objecttest.Object().Parent() + prm.Object.SetContainerID(cnt) + prm.Object.SetType(objectSDK.TypeRegular) + + addrs[i] = object.AddressOf(prm.Object) + + eg.Go(func() error { + return engine.Put(context.Background(), prm) + }) + } + require.NoError(b, eg.Wait()) + + ts := oidtest.Address() + ts.SetContainer(cnt) + + prm := InhumePrm{} + prm.WithTarget(ts, addrs...) + + b.StartTimer() + _, err := engine.Inhume(context.Background(), prm) + require.NoError(b, err) + b.StopTimer() + } +} From 281d65435e05c6b6b37224c46c755a939b486f99 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Mon, 18 Nov 2024 14:40:10 +0300 Subject: [PATCH 239/591] [#1450] engine: Group object by shard before `Inhume` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ``` goos: linux goarch: amd64 pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine cpu: 12th Gen Intel(R) Core(TM) i5-1235U │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ InhumeMultipart/objects=1-12 11.42m ± 1% 10.71m ± 0% -6.27% (p=0.000 n=10) InhumeMultipart/objects=10-12 113.5m ± 0% 100.9m ± 3% -11.08% (p=0.000 n=10) InhumeMultipart/objects=100-12 1135.4m ± 1% 681.3m ± 2% -40.00% (p=0.000 n=10) InhumeMultipart/objects=1000-12 11.358 ± 0% 1.089 ± 1% -90.41% (p=0.000 n=10) InhumeMultipart/objects=10000-12 113.251 ± 0% 1.645 ± 1% -98.55% (p=0.000 n=10) geomean 1.136 265.5m -76.63% ``` Signed-off-by: Aleksey Savchuk --- pkg/local_object_storage/engine/inhume.go | 184 +++++++++++++--------- 1 file changed, 110 insertions(+), 74 deletions(-) diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index 74e5b5660..80c77af54 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -81,110 +81,146 @@ func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRe } func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { + addrsPerShard, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) + if err != nil { + return InhumeRes{}, err + } + var shPrm shard.InhumePrm if prm.forceRemoval { shPrm.ForceRemoval() } - for i := range prm.addrs { - if !prm.forceRemoval { - locked, err := e.IsLocked(ctx, prm.addrs[i]) - if err != nil { - e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck, - zap.Error(err), - zap.Stringer("addr", prm.addrs[i]), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) - } else if locked { - return InhumeRes{}, new(apistatus.ObjectLocked) - } - } + var errLocked *apistatus.ObjectLocked + for shardID, addrs := range addrsPerShard { if prm.tombstone != nil { - shPrm.SetTarget(*prm.tombstone, prm.addrs[i]) + shPrm.SetTarget(*prm.tombstone, addrs...) } else { - shPrm.MarkAsGarbage(prm.addrs[i]) + shPrm.MarkAsGarbage(addrs...) } - ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, true) - if err != nil { - return InhumeRes{}, err + sh, exists := e.shards[shardID] + if !exists { + e.log.Warn(ctx, logs.EngineCouldNotInhumeObjectInShard, + zap.Error(errors.New("this shard was expected to exist")), + zap.String("shard_id", shardID), + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), + ) + return InhumeRes{}, errInhumeFailure } - if !ok { - ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, false) - if err != nil { - return InhumeRes{}, err - } else if !ok { - return InhumeRes{}, errInhumeFailure + + if _, err := sh.Inhume(ctx, shPrm); err != nil { + switch { + case errors.As(err, &errLocked): + case errors.Is(err, shard.ErrLockObjectRemoval): + case errors.Is(err, shard.ErrReadOnlyMode): + case errors.Is(err, shard.ErrDegradedMode): + default: + e.reportShardError(ctx, sh, "couldn't inhume object in shard", err) } + return InhumeRes{}, err } } return InhumeRes{}, nil } -// Returns ok if object was inhumed during this invocation or before. -func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm shard.InhumePrm, checkExists bool) (bool, error) { - root := false - var existPrm shard.ExistsPrm - var retErr error - var ok bool +// groupObjectsByShard groups objects based on the shard(s) they are stored on. +// +// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of +// the objects are locked. +func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (map[string][]oid.Address, error) { + groups := make(map[string][]oid.Address) + + for _, addr := range addrs { + ids, err := e.findShards(ctx, addr, checkLocked) + if err != nil { + return nil, err + } + for _, id := range ids { + groups[id] = append(groups[id], addr) + } + } + + return groups, nil +} + +// findShards determines the shard(s) where the object is stored. +// +// If the object is a root object, multiple shards will be returned. +// +// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of +// the objects are locked. +func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkLocked bool) ([]string, error) { + var ( + ids []string + retErr error + + prm shard.ExistsPrm + + siErr *objectSDK.SplitInfoError + ecErr *objectSDK.ECInfoError + + isRootObject bool + objectExists bool + ) e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { - defer func() { - // if object is root we continue since information about it - // can be presented in other shards - if checkExists && root { - stop = false - } - }() + objectExists = false - if checkExists { - existPrm.Address = addr - exRes, err := sh.Exists(ctx, existPrm) - if err != nil { - if client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err) { - // inhumed once - no need to be inhumed again - ok = true - return true - } - - var siErr *objectSDK.SplitInfoError - var ecErr *objectSDK.ECInfoError - if !(errors.As(err, &siErr) || errors.As(err, &ecErr)) { - e.reportShardError(ctx, sh, "could not check for presents in shard", err, zap.Stringer("address", addr)) - return - } - - root = true - } else if !exRes.Exists() { - return - } + prm.Address = addr + switch res, err := sh.Exists(ctx, prm); { + case client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err): + // NOTE(@a-savchuk): there were some considerations that we can stop + // immediately if the object is already removed or expired. However, + // the previous method behavior was: + // - keep iterating if it's a root object and already removed, + // - stop iterating if it's not a root object and removed. + // + // Since my task was only improving method speed, let's keep the + // previous method behavior. Continue if it's a root object. + return !isRootObject + case errors.As(err, &siErr) || errors.As(err, &ecErr): + isRootObject = true + objectExists = true + case err != nil: + e.reportShardError( + ctx, sh, "couldn't check for presence in shard", + err, zap.Stringer("address", addr), + ) + case res.Exists(): + objectExists = true + default: } - _, err := sh.Inhume(ctx, prm) - if err != nil { - var errLocked *apistatus.ObjectLocked - switch { - case errors.As(err, &errLocked): + if !objectExists { + return + } + + if checkLocked { + if isLocked, err := sh.IsLocked(ctx, addr); err != nil { + e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck, + zap.Error(err), + zap.Stringer("address", addr), + zap.String("trace_id", tracingPkg.GetTraceID(ctx)), + ) + } else if isLocked { retErr = new(apistatus.ObjectLocked) return true - case errors.Is(err, shard.ErrLockObjectRemoval): - retErr = shard.ErrLockObjectRemoval - return true - case errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, shard.ErrDegradedMode): - retErr = err - return true } - - e.reportShardError(ctx, sh, "could not inhume object in shard", err, zap.Stringer("address", addr)) - return false } - ok = true - return true + ids = append(ids, sh.ID().String()) + + // Continue if it's a root object. + return !isRootObject }) - return ok, retErr + if retErr != nil { + return nil, retErr + } + return ids, nil } // IsLocked checks whether an object is locked according to StorageEngine's state. From 6c679d15350e8d28b25ede822d682c3354b06ebe Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 4 Dec 2024 11:03:42 +0300 Subject: [PATCH 240/591] [#1535] morph: Unify client creation error messages Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/balance/client.go | 2 +- pkg/morph/client/container/client.go | 2 +- pkg/morph/client/frostfs/client.go | 2 +- pkg/morph/client/frostfsid/client.go | 2 +- pkg/morph/client/netmap/client.go | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/morph/client/balance/client.go b/pkg/morph/client/balance/client.go index b05c526dc..d229e5900 100644 --- a/pkg/morph/client/balance/client.go +++ b/pkg/morph/client/balance/client.go @@ -39,7 +39,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, staticClient, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...) if err != nil { - return nil, fmt.Errorf("could not create static client of Balance contract: %w", err) + return nil, fmt.Errorf("create 'balance' contract client: %w", err) } return &Client{ diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go index bdbcce917..b95b1ea06 100644 --- a/pkg/morph/client/container/client.go +++ b/pkg/morph/client/container/client.go @@ -48,7 +48,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, sc, err := client.NewStatic(cli, contract, fee, o.staticOpts...) if err != nil { - return nil, fmt.Errorf("can't create container static client: %w", err) + return nil, fmt.Errorf("create 'container' contract client: %w", err) } return &Client{client: sc}, nil diff --git a/pkg/morph/client/frostfs/client.go b/pkg/morph/client/frostfs/client.go index 571915c27..cd6a9849e 100644 --- a/pkg/morph/client/frostfs/client.go +++ b/pkg/morph/client/frostfs/client.go @@ -35,7 +35,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...) if err != nil { - return nil, fmt.Errorf("could not create client of FrostFS contract: %w", err) + return nil, fmt.Errorf("create 'frostfs' contract client: %w", err) } return &Client{client: sc}, nil diff --git a/pkg/morph/client/frostfsid/client.go b/pkg/morph/client/frostfsid/client.go index 4c31f42de..61eb03f09 100644 --- a/pkg/morph/client/frostfsid/client.go +++ b/pkg/morph/client/frostfsid/client.go @@ -27,7 +27,7 @@ var _ frostfsidcore.SubjectProvider = (*Client)(nil) func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8) (*Client, error) { sc, err := client.NewStatic(cli, contract, fee, client.TryNotary(), client.AsAlphabet()) if err != nil { - return nil, fmt.Errorf("could not create client of FrostFS ID contract: %w", err) + return nil, fmt.Errorf("create 'frostfsid' contract client: %w", err) } return &Client{client: sc}, nil diff --git a/pkg/morph/client/netmap/client.go b/pkg/morph/client/netmap/client.go index eafa097e9..da7eb0719 100644 --- a/pkg/morph/client/netmap/client.go +++ b/pkg/morph/client/netmap/client.go @@ -52,7 +52,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, sc, err := client.NewStatic(cli, contract, fee, ([]client.StaticClientOption)(*o)...) if err != nil { - return nil, fmt.Errorf("can't create netmap static client: %w", err) + return nil, fmt.Errorf("create 'netmap' contract client: %w", err) } return &Client{client: sc}, nil From e37dcdf88bfb6e159b1e2812b43774227e7d3366 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 4 Dec 2024 11:07:43 +0300 Subject: [PATCH 241/591] [#1535] morph/netmap: Unify error messages for config retrieval Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/netmap/config.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go index 0a3c351db..29bd1517d 100644 --- a/pkg/morph/client/netmap/config.go +++ b/pkg/morph/client/netmap/config.go @@ -28,7 +28,7 @@ const ( func (c *Client) MaxObjectSize() (uint64, error) { objectSize, err := c.readUInt64Config(MaxObjectSizeConfig) if err != nil { - return 0, fmt.Errorf("(%T) could not get epoch number: %w", c, err) + return 0, err } return objectSize, nil @@ -38,7 +38,7 @@ func (c *Client) MaxObjectSize() (uint64, error) { func (c *Client) EpochDuration() (uint64, error) { epochDuration, err := c.readUInt64Config(EpochDurationConfig) if err != nil { - return 0, fmt.Errorf("(%T) could not get epoch duration: %w", c, err) + return 0, err } return epochDuration, nil @@ -49,7 +49,7 @@ func (c *Client) EpochDuration() (uint64, error) { func (c *Client) ContainerFee() (uint64, error) { fee, err := c.readUInt64Config(ContainerFeeConfig) if err != nil { - return 0, fmt.Errorf("(%T) could not get container fee: %w", c, err) + return 0, err } return fee, nil @@ -60,7 +60,7 @@ func (c *Client) ContainerFee() (uint64, error) { func (c *Client) ContainerAliasFee() (uint64, error) { fee, err := c.readUInt64Config(ContainerAliasFeeConfig) if err != nil { - return 0, fmt.Errorf("(%T) could not get container alias fee: %w", c, err) + return 0, err } return fee, nil @@ -79,7 +79,7 @@ func (c *Client) HomomorphicHashDisabled() (bool, error) { func (c *Client) InnerRingCandidateFee() (uint64, error) { fee, err := c.readUInt64Config(IrCandidateFeeConfig) if err != nil { - return 0, fmt.Errorf("(%T) could not get inner ring candidate fee: %w", c, err) + return 0, err } return fee, nil @@ -90,7 +90,7 @@ func (c *Client) InnerRingCandidateFee() (uint64, error) { func (c *Client) WithdrawFee() (uint64, error) { fee, err := c.readUInt64Config(WithdrawFeeConfig) if err != nil { - return 0, fmt.Errorf("(%T) could not get withdraw fee: %w", c, err) + return 0, err } return fee, nil @@ -108,7 +108,7 @@ func (c *Client) MaintenanceModeAllowed() (bool, error) { func (c *Client) readUInt64Config(key string) (uint64, error) { v, err := c.config([]byte(key), IntegerAssert) if err != nil { - return 0, err + return 0, fmt.Errorf("read netconfig value '%s': %w", key, err) } // IntegerAssert is guaranteed to return int64 if the error is nil. @@ -124,7 +124,7 @@ func (c *Client) readBoolConfig(key string) (bool, error) { return false, nil } - return false, fmt.Errorf("read boolean configuration value %s from the Sidechain: %w", key, err) + return false, fmt.Errorf("read netconfig value '%s': %w", key, err) } // BoolAssert is guaranteed to return bool if the error is nil. From e3487d5af5d513115659294a1699bde18269887f Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 4 Dec 2024 11:17:13 +0300 Subject: [PATCH 242/591] [#1535] morph: Unify test invoke error messages Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/balance/balanceOf.go | 2 +- pkg/morph/client/balance/decimals.go | 2 +- pkg/morph/client/container/deletion_info.go | 2 +- pkg/morph/client/container/get.go | 2 +- pkg/morph/client/container/list.go | 2 +- pkg/morph/client/frostfsid/subject.go | 4 ++-- pkg/morph/client/netmap/config.go | 4 ++-- pkg/morph/client/netmap/epoch.go | 4 ++-- pkg/morph/client/netmap/innerring.go | 2 +- pkg/morph/client/netmap/netmap.go | 6 +++--- pkg/morph/client/netmap/snapshot.go | 4 +++- pkg/morph/client/notary.go | 4 ++-- 12 files changed, 20 insertions(+), 18 deletions(-) diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go index aae245acd..73aab1736 100644 --- a/pkg/morph/client/balance/balanceOf.go +++ b/pkg/morph/client/balance/balanceOf.go @@ -23,7 +23,7 @@ func (c *Client) BalanceOf(id user.ID) (*big.Int, error) { prms, err := c.client.TestInvoke(invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", balanceOfMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", balanceOfMethod, err) } else if ln := len(prms); ln != 1 { return nil, fmt.Errorf("unexpected stack item count (%s): %d", balanceOfMethod, ln) } diff --git a/pkg/morph/client/balance/decimals.go b/pkg/morph/client/balance/decimals.go index 39e4b28e5..28329ee6e 100644 --- a/pkg/morph/client/balance/decimals.go +++ b/pkg/morph/client/balance/decimals.go @@ -14,7 +14,7 @@ func (c *Client) Decimals() (uint32, error) { prms, err := c.client.TestInvoke(invokePrm) if err != nil { - return 0, fmt.Errorf("could not perform test invocation (%s): %w", decimalsMethod, err) + return 0, fmt.Errorf("test invoke (%s): %w", decimalsMethod, err) } else if ln := len(prms); ln != 1 { return 0, fmt.Errorf("unexpected stack item count (%s): %d", decimalsMethod, ln) } diff --git a/pkg/morph/client/container/deletion_info.go b/pkg/morph/client/container/deletion_info.go index dda6bf98c..436ca3c01 100644 --- a/pkg/morph/client/container/deletion_info.go +++ b/pkg/morph/client/container/deletion_info.go @@ -39,7 +39,7 @@ func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) { if strings.Contains(err.Error(), containerContract.NotFoundError) { return nil, new(apistatus.ContainerNotFound) } - return nil, fmt.Errorf("could not perform test invocation (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", deletionInfoMethod, err) } else if ln := len(res); ln != 1 { return nil, fmt.Errorf("unexpected stack item count (%s): %d", deletionInfoMethod, ln) } diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go index ea57a3a95..1d84e9109 100644 --- a/pkg/morph/client/container/get.go +++ b/pkg/morph/client/container/get.go @@ -53,7 +53,7 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) { if strings.Contains(err.Error(), containerContract.NotFoundError) { return nil, new(apistatus.ContainerNotFound) } - return nil, fmt.Errorf("could not perform test invocation (%s): %w", getMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", getMethod, err) } else if ln := len(res); ln != 1 { return nil, fmt.Errorf("unexpected stack item count (%s): %d", getMethod, ln) } diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go index 6fed46c1a..55317375a 100644 --- a/pkg/morph/client/container/list.go +++ b/pkg/morph/client/container/list.go @@ -27,7 +27,7 @@ func (c *Client) list(idUser *user.ID) ([]cid.ID, error) { res, err := c.client.TestInvoke(prm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", listMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", listMethod, err) } else if ln := len(res); ln != 1 { return nil, fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln) } diff --git a/pkg/morph/client/frostfsid/subject.go b/pkg/morph/client/frostfsid/subject.go index 0852f536c..7c93f4922 100644 --- a/pkg/morph/client/frostfsid/subject.go +++ b/pkg/morph/client/frostfsid/subject.go @@ -21,7 +21,7 @@ func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) res, err := c.client.TestInvoke(prm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubject, err) + return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubject, err) } structArr, err := checkStackItem(res) @@ -44,7 +44,7 @@ func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.Subject res, err := c.client.TestInvoke(prm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", methodGetSubjectExtended, err) + return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubjectExtended, err) } structArr, err := checkStackItem(res) diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go index 29bd1517d..2b87df6f7 100644 --- a/pkg/morph/client/netmap/config.go +++ b/pkg/morph/client/netmap/config.go @@ -206,7 +206,7 @@ func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) { items, err := c.client.TestInvoke(prm) if err != nil { - return res, fmt.Errorf("could not perform test invocation (%s): %w", + return res, fmt.Errorf("test invoke (%s): %w", configListMethod, err) } @@ -292,7 +292,7 @@ func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (a items, err := c.client.TestInvoke(prm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", + return nil, fmt.Errorf("test invoke (%s): %w", configMethod, err) } diff --git a/pkg/morph/client/netmap/epoch.go b/pkg/morph/client/netmap/epoch.go index 92d569ae2..6d909a7a1 100644 --- a/pkg/morph/client/netmap/epoch.go +++ b/pkg/morph/client/netmap/epoch.go @@ -14,7 +14,7 @@ func (c *Client) Epoch() (uint64, error) { items, err := c.client.TestInvoke(prm) if err != nil { - return 0, fmt.Errorf("could not perform test invocation (%s): %w", + return 0, fmt.Errorf("test invoke (%s): %w", epochMethod, err) } @@ -38,7 +38,7 @@ func (c *Client) LastEpochBlock() (uint32, error) { items, err := c.client.TestInvoke(prm) if err != nil { - return 0, fmt.Errorf("could not perform test invocation (%s): %w", + return 0, fmt.Errorf("test invoke (%s): %w", lastEpochBlockMethod, err) } diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go index c9dc7d2fc..824827d6f 100644 --- a/pkg/morph/client/netmap/innerring.go +++ b/pkg/morph/client/netmap/innerring.go @@ -46,7 +46,7 @@ func (c *Client) GetInnerRingList() (keys.PublicKeys, error) { prms, err := c.client.TestInvoke(invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", innerRingListMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", innerRingListMethod, err) } return irKeysFromStackItem(prms, innerRingListMethod) diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go index f7b5c3ba4..a0009ea73 100644 --- a/pkg/morph/client/netmap/netmap.go +++ b/pkg/morph/client/netmap/netmap.go @@ -18,7 +18,7 @@ func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { res, err := c.client.TestInvoke(invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", + return nil, fmt.Errorf("test invoke (%s): %w", epochSnapshotMethod, err) } @@ -40,7 +40,7 @@ func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) { res, err := c.client.TestInvoke(invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", netMapCandidatesMethod, err) + return nil, fmt.Errorf("test invoke (%s): %w", netMapCandidatesMethod, err) } if len(res) > 0 { @@ -57,7 +57,7 @@ func (c *Client) NetMap() (*netmap.NetMap, error) { res, err := c.client.TestInvoke(invokePrm) if err != nil { - return nil, fmt.Errorf("could not perform test invocation (%s): %w", + return nil, fmt.Errorf("test invoke (%s): %w", netMapMethod, err) } diff --git a/pkg/morph/client/netmap/snapshot.go b/pkg/morph/client/netmap/snapshot.go index ba2c26af7..a5134bcef 100644 --- a/pkg/morph/client/netmap/snapshot.go +++ b/pkg/morph/client/netmap/snapshot.go @@ -1,6 +1,8 @@ package netmap import ( + "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -13,7 +15,7 @@ func (c *Client) GetNetMap(diff uint64) (*netmap.NetMap, error) { res, err := c.client.TestInvoke(prm) if err != nil { - return nil, err + return nil, fmt.Errorf("test invoke (%s): %w", snapshotMethod, err) } return DecodeNetMap(res) diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 71232cb33..8516c89a9 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -239,7 +239,7 @@ func (c *Client) GetNotaryDeposit() (res int64, err error) { items, err := c.TestInvoke(c.notary.notary, notaryBalanceOfMethod, sh) if err != nil { - return 0, fmt.Errorf("%v: %w", notaryBalanceErrMsg, err) + return 0, fmt.Errorf("test invoke (%s): %w", notaryBalanceOfMethod, err) } if len(items) != 1 { @@ -654,7 +654,7 @@ func (c *Client) notaryTxValidationLimit() (uint32, error) { func (c *Client) depositExpirationOf() (int64, error) { expirationRes, err := c.TestInvoke(c.notary.notary, notaryExpirationOfMethod, c.acc.PrivateKey().GetScriptHash()) if err != nil { - return 0, fmt.Errorf("can't invoke method: %w", err) + return 0, fmt.Errorf("test invoke (%s): %w", notaryExpirationOfMethod, err) } if len(expirationRes) != 1 { From 2d5d4093be45cd26660ddcd9ea28d7b625a6e203 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 4 Dec 2024 13:19:37 +0300 Subject: [PATCH 243/591] [#1537] morph: Use `(user.ID).ScriptHash()` where possible Pick up changes from TrueCloudLab/frostfs-sdk-go#198. gopatch: ``` @@ var user expression @@ -address.StringToUint160(user.EncodeToString()) +user.ScriptHash() ``` Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/balance/balanceOf.go | 3 +-- pkg/morph/client/balance/transfer.go | 5 ++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go index 73aab1736..12a499ffb 100644 --- a/pkg/morph/client/balance/balanceOf.go +++ b/pkg/morph/client/balance/balanceOf.go @@ -6,13 +6,12 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" ) // BalanceOf receives the amount of funds in the client's account // through the Balance contract call, and returns it. func (c *Client) BalanceOf(id user.ID) (*big.Int, error) { - h, err := address.StringToUint160(id.EncodeToString()) + h, err := id.ScriptHash() if err != nil { return nil, err } diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go index 65a0b70a6..9638b5c45 100644 --- a/pkg/morph/client/balance/transfer.go +++ b/pkg/morph/client/balance/transfer.go @@ -6,7 +6,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" ) // TransferPrm groups parameters of TransferX method. @@ -25,12 +24,12 @@ type TransferPrm struct { // // If TryNotary is provided, calls notary contract. func (c *Client) TransferX(ctx context.Context, p TransferPrm) error { - from, err := address.StringToUint160(p.From.EncodeToString()) + from, err := p.From.ScriptHash() if err != nil { return err } - to, err := address.StringToUint160(p.To.EncodeToString()) + to, err := p.To.ScriptHash() if err != nil { return err } From 5c3b2d95ba860327963fce069553f5ec953982a1 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 4 Dec 2024 15:17:50 +0300 Subject: [PATCH 244/591] [#1538] node: Assume notary is enabled Notaryless environments are not tested at all since a while. We use neo-go only and it has notary contract enabled. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/config.go | 2 -- cmd/frostfs-node/morph.go | 28 +++++++--------------------- cmd/frostfs-node/netmap.go | 18 ++++++++---------- cmd/frostfs-node/object.go | 28 ++-------------------------- 4 files changed, 17 insertions(+), 59 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 5af37865f..e63786b7d 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -591,8 +591,6 @@ type cfgMorph struct { client *client.Client - notaryEnabled bool - // TTL of Sidechain cached values. Non-positive value disables caching. cacheTTL time.Duration diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 67d2d1c06..3b9175981 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -35,18 +35,14 @@ func (c *cfg) initMorphComponents(ctx context.Context) { lookupScriptHashesInNNS(c) // smart contract auto negotiation - if c.cfgMorph.notaryEnabled { - err := c.cfgMorph.client.EnableNotarySupport( - client.WithProxyContract( - c.cfgMorph.proxyScriptHash, - ), - ) - fatalOnErr(err) - } - - c.log.Info(ctx, logs.FrostFSNodeNotarySupport, - zap.Bool("sidechain_enabled", c.cfgMorph.notaryEnabled), + err := c.cfgMorph.client.EnableNotarySupport( + client.WithProxyContract( + c.cfgMorph.proxyScriptHash, + ), ) + fatalOnErr(err) + + c.log.Info(ctx, logs.FrostFSNodeNotarySupport) wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary()) fatalOnErr(err) @@ -116,15 +112,9 @@ func initMorphClient(ctx context.Context, c *cfg) { } c.cfgMorph.client = cli - c.cfgMorph.notaryEnabled = cli.ProbeNotary() } func makeAndWaitNotaryDeposit(ctx context.Context, c *cfg) { - // skip notary deposit in non-notary environments - if !c.cfgMorph.notaryEnabled { - return - } - tx, vub, err := makeNotaryDeposit(ctx, c) fatalOnErr(err) @@ -282,10 +272,6 @@ func lookupScriptHashesInNNS(c *cfg) { ) for _, t := range targets { - if t.nnsName == client.NNSProxyContractName && !c.cfgMorph.notaryEnabled { - continue // ignore proxy contract if notary disabled - } - if emptyHash.Equals(*t.h) { *t.h, err = c.cfgMorph.client.NNSContractAddress(t.nnsName) fatalOnErrDetails(fmt.Sprintf("can't resolve %s in NNS", t.nnsName), err) diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 9127d1123..e94428fcb 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -193,16 +193,14 @@ func addNewEpochNotificationHandlers(c *cfg) { } }) - if c.cfgMorph.notaryEnabled { - addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) { - _, _, err := makeNotaryDeposit(ctx, c) - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit, - zap.String("error", err.Error()), - ) - } - }) - } + addNewEpochAsyncNotificationHandler(c, func(ctx context.Context, _ event.Event) { + _, _, err := makeNotaryDeposit(ctx, c) + if err != nil { + c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit, + zap.String("error", err.Error()), + ) + } + }) } // bootstrapNode adds current node to the Network map. diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index c4205a620..6804aae59 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -13,7 +13,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" @@ -137,24 +136,6 @@ func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) { return result, nil } -type innerRingFetcherWithoutNotary struct { - nm *nmClient.Client -} - -func (f *innerRingFetcherWithoutNotary) InnerRingKeys() ([][]byte, error) { - keys, err := f.nm.GetInnerRingList() - if err != nil { - return nil, fmt.Errorf("can't get inner ring keys from netmap contract: %w", err) - } - - result := make([][]byte, 0, len(keys)) - for i := range keys { - result = append(result, keys[i].Bytes()) - } - - return result, nil -} - func initObjectService(c *cfg) { keyStorage := util.NewKeyStorage(&c.key.PrivateKey, c.privateTokenStore, c.cfgNetmap.state) @@ -305,13 +286,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl } func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher { - if c.cfgMorph.client.ProbeNotary() { - return &innerRingFetcherWithNotary{ - sidechain: c.cfgMorph.client, - } - } - return &innerRingFetcherWithoutNotary{ - nm: c.cfgNetmap.wrapper, + return &innerRingFetcherWithNotary{ + sidechain: c.cfgMorph.client, } } From 6a51086030bc9ae8f6403928579313d629549110 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 4 Dec 2024 15:25:36 +0300 Subject: [PATCH 245/591] [#1538] morph/client: Remove TryNotary() option from side-chain contracts The notary is always enabled and this option does always work. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/metabase/upgrade.go | 2 +- cmd/frostfs-node/config.go | 2 +- cmd/frostfs-node/container.go | 2 +- cmd/frostfs-node/morph.go | 2 +- pkg/innerring/initialization.go | 5 ++--- pkg/morph/client/balance/client.go | 10 +--------- pkg/morph/client/balance/transfer.go | 2 -- pkg/morph/client/container/client.go | 10 +--------- pkg/morph/client/container/delete.go | 2 -- pkg/morph/client/container/put.go | 2 -- pkg/morph/client/netmap/client.go | 10 +--------- 11 files changed, 9 insertions(+), 40 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go index 00b30c9b2..beced0d7a 100644 --- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go +++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go @@ -135,7 +135,7 @@ func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, er if err != nil { return nil, fmt.Errorf("resolve container contract hash: %w", err) } - cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary()) + cc, err := morphcontainer.NewFromMorph(cli, sh, 0) if err != nil { return nil, fmt.Errorf("create morph container client: %w", err) } diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index e63786b7d..9b727e41a 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1464,7 +1464,7 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker { func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider { return container.NewInfoProvider(func() (container.Source, error) { c.initMorphComponents(ctx) - cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary()) + cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) if err != nil { return nil, err } diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index d3e1b2766..7d558dacb 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -28,7 +28,7 @@ import ( func initContainerService(_ context.Context, c *cfg) { // container wrapper that tries to invoke notary // requests if chain is configured so - wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, cntClient.TryNotary()) + wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) fatalOnErr(err) c.shared.cnrClient = wrap diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 3b9175981..81579c7fc 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -44,7 +44,7 @@ func (c *cfg) initMorphComponents(ctx context.Context) { c.log.Info(ctx, logs.FrostFSNodeNotarySupport) - wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0, nmClient.TryNotary()) + wrap, err := nmClient.NewFromMorph(c.cfgMorph.client, c.cfgNetmap.scriptHash, 0) fatalOnErr(err) var netmapSource netmap.Source diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index 25f4ff034..5481354e1 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -380,7 +380,6 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) { // form morph container client's options morphCnrOpts := make([]container.Option, 0, 3) morphCnrOpts = append(morphCnrOpts, - container.TryNotary(), container.AsAlphabet(), ) @@ -390,12 +389,12 @@ func (s *Server) initClientsFromMorph() (*serverMorphClients, error) { } s.containerClient = result.CnrClient - s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.TryNotary(), nmClient.AsAlphabet()) + s.netmapClient, err = nmClient.NewFromMorph(s.morphClient, s.contracts.netmap, fee, nmClient.AsAlphabet()) if err != nil { return nil, err } - s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.TryNotary(), balanceClient.AsAlphabet()) + s.balanceClient, err = balanceClient.NewFromMorph(s.morphClient, s.contracts.balance, fee, balanceClient.AsAlphabet()) if err != nil { return nil, err } diff --git a/pkg/morph/client/balance/client.go b/pkg/morph/client/balance/client.go index d229e5900..1dacb9574 100644 --- a/pkg/morph/client/balance/client.go +++ b/pkg/morph/client/balance/client.go @@ -54,15 +54,7 @@ type Option func(*opts) type opts []client.StaticClientOption func defaultOpts() *opts { - return new(opts) -} - -// TryNotary returns option to enable -// notary invocation tries. -func TryNotary() Option { - return func(o *opts) { - *o = append(*o, client.TryNotary()) - } + return &opts{client.TryNotary()} } // AsAlphabet returns option to sign main TX diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go index 9638b5c45..16c8f3982 100644 --- a/pkg/morph/client/balance/transfer.go +++ b/pkg/morph/client/balance/transfer.go @@ -21,8 +21,6 @@ type TransferPrm struct { // TransferX transfers p.Amount of GASe-12 from p.From to p.To // with details p.Details through direct smart contract call. -// -// If TryNotary is provided, calls notary contract. func (c *Client) TransferX(ctx context.Context, p TransferPrm) error { from, err := p.From.ScriptHash() if err != nil { diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go index b95b1ea06..51aa1a93a 100644 --- a/pkg/morph/client/container/client.go +++ b/pkg/morph/client/container/client.go @@ -73,15 +73,7 @@ type opts struct { } func defaultOpts() *opts { - return new(opts) -} - -// TryNotary returns option to enable -// notary invocation tries. -func TryNotary() Option { - return func(o *opts) { - o.staticOpts = append(o.staticOpts, client.TryNotary()) - } + return &opts{staticOpts: []client.StaticClientOption{client.TryNotary()}} } // AsAlphabet returns option to sign main TX diff --git a/pkg/morph/client/container/delete.go b/pkg/morph/client/container/delete.go index 5696645b2..ef5cc8c38 100644 --- a/pkg/morph/client/container/delete.go +++ b/pkg/morph/client/container/delete.go @@ -66,8 +66,6 @@ func (d *DeletePrm) SetKey(key []byte) { // // Returns valid until block and any error encountered that caused // the removal to interrupt. -// -// If TryNotary is provided, calls notary contract. func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) { if len(p.signature) == 0 && !p.IsControl() { return 0, errNilArgument diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go index 74d9f6da8..1eb03694f 100644 --- a/pkg/morph/client/container/put.go +++ b/pkg/morph/client/container/put.go @@ -94,8 +94,6 @@ func (p *PutPrm) SetZone(zone string) { // // Returns calculated container identifier and any error // encountered that caused the saving to interrupt. -// -// If TryNotary is provided, calls notary contract. func (c *Client) Put(ctx context.Context, p PutPrm) error { if len(p.sig) == 0 || len(p.key) == 0 { return errNilArgument diff --git a/pkg/morph/client/netmap/client.go b/pkg/morph/client/netmap/client.go index da7eb0719..de8afbfb5 100644 --- a/pkg/morph/client/netmap/client.go +++ b/pkg/morph/client/netmap/client.go @@ -65,15 +65,7 @@ type Option func(*opts) type opts []client.StaticClientOption func defaultOpts() *opts { - return new(opts) -} - -// TryNotary returns option to enable -// notary invocation tries. -func TryNotary() Option { - return func(o *opts) { - *o = append(*o, client.TryNotary()) - } + return &opts{client.TryNotary()} } // AsAlphabet returns option to sign main TX From 84b4051b4da05e3a0433793c6de89fcb5b5e2e9c Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 4 Dec 2024 15:28:33 +0300 Subject: [PATCH 246/591] [#1538] morph/container: Make opts struct similar to that of other contracts Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/container/client.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go index 51aa1a93a..be684619b 100644 --- a/pkg/morph/client/container/client.go +++ b/pkg/morph/client/container/client.go @@ -46,7 +46,7 @@ func NewFromMorph(cli *client.Client, contract util.Uint160, fee fixedn.Fixed8, opts[i](o) } - sc, err := client.NewStatic(cli, contract, fee, o.staticOpts...) + sc, err := client.NewStatic(cli, contract, fee, *o...) if err != nil { return nil, fmt.Errorf("create 'container' contract client: %w", err) } @@ -68,12 +68,10 @@ func (c Client) ContractAddress() util.Uint160 { // parameter of Wrapper. type Option func(*opts) -type opts struct { - staticOpts []client.StaticClientOption -} +type opts []client.StaticClientOption func defaultOpts() *opts { - return &opts{staticOpts: []client.StaticClientOption{client.TryNotary()}} + return &opts{client.TryNotary()} } // AsAlphabet returns option to sign main TX @@ -83,6 +81,6 @@ func defaultOpts() *opts { // Considered to be used by IR nodes only. func AsAlphabet() Option { return func(o *opts) { - o.staticOpts = append(o.staticOpts, client.AsAlphabet()) + *o = append(*o, client.AsAlphabet()) } } From 5fe78e51d1c4cc19d53e2106de4c7577f6d4f272 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 5 Dec 2024 10:32:20 +0300 Subject: [PATCH 247/591] [#1540] getSvc: Do not log context canceled errors during EC assemble Those errors are fired when it is enough chunks retrieved and error group cancels other requests. Signed-off-by: Dmitrii Stepanov --- pkg/services/object/get/assemblerec.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go index b0895e13e..127be2b52 100644 --- a/pkg/services/object/get/assemblerec.go +++ b/pkg/services/object/get/assemblerec.go @@ -238,13 +238,13 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object var object *objectSDK.Object if a.head { object, err = a.localStorage.Head(ctx, addr, false) - if err != nil { + if err != nil && !errors.Is(err, context.Canceled) { a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) return nil } } else { object, err = a.localStorage.Get(ctx, addr) - if err != nil { + if err != nil && !errors.Is(err, context.Canceled) { a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) return nil } @@ -286,13 +286,13 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli var object *objectSDK.Object if a.head { object, err = a.remoteStorage.headObjectFromNode(ctx, addr, node, false) - if err != nil { + if err != nil && !errors.Is(err, context.Canceled) { a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) return nil } } else { object, err = a.remoteStorage.getObjectFromNode(ctx, addr, node) - if err != nil { + if err != nil && !errors.Is(err, context.Canceled) { a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) return nil } From 7df3520d486555a0211f7e37ee3e0fa9a96cf92c Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 5 Dec 2024 10:32:30 +0300 Subject: [PATCH 248/591] [#1540] getSvc: Drop redundant returns Signed-off-by: Dmitrii Stepanov --- pkg/services/object/get/assemblerec.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go index 127be2b52..a53299480 100644 --- a/pkg/services/object/get/assemblerec.go +++ b/pkg/services/object/get/assemblerec.go @@ -240,13 +240,11 @@ func (a *assemblerec) tryGetChunkFromLocalStorage(ctx context.Context, ch object object, err = a.localStorage.Head(ctx, addr, false) if err != nil && !errors.Is(err, context.Canceled) { a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) - return nil } } else { object, err = a.localStorage.Get(ctx, addr) if err != nil && !errors.Is(err, context.Canceled) { a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", "local"), zap.Stringer("part_id", objID), zap.Error(err)) - return nil } } return object @@ -288,13 +286,11 @@ func (a *assemblerec) tryGetChunkFromRemoteStorage(ctx context.Context, node cli object, err = a.remoteStorage.headObjectFromNode(ctx, addr, node, false) if err != nil && !errors.Is(err, context.Canceled) { a.log.Warn(ctx, logs.GetUnableToHeadPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) - return nil } } else { object, err = a.remoteStorage.getObjectFromNode(ctx, addr, node) if err != nil && !errors.Is(err, context.Canceled) { a.log.Warn(ctx, logs.GetUnableToGetPartECObject, zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Stringer("part_id", objID), zap.Error(err)) - return nil } } return object From d5d5ce2074d1a5ef60ebe04465c35f047dfdca15 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 5 Dec 2024 11:17:12 +0300 Subject: [PATCH 249/591] [#1541] morph/event: Simplify balance contract event parsing Signed-off-by: Evgenii Stratonikov --- pkg/morph/event/balance/lock.go | 68 +++++----------------------- pkg/morph/event/balance/lock_test.go | 3 +- 2 files changed, 13 insertions(+), 58 deletions(-) diff --git a/pkg/morph/event/balance/lock.go b/pkg/morph/event/balance/lock.go index 062a2a886..99f80584a 100644 --- a/pkg/morph/event/balance/lock.go +++ b/pkg/morph/event/balance/lock.go @@ -3,7 +3,7 @@ package balance import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/balance" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -47,61 +47,17 @@ func (l Lock) TxHash() util.Uint256 { return l.txHash } // ParseLock from notification into lock structure. func ParseLock(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev Lock - err error - ) - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + var le balance.LockEvent + if err := le.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse balance.LockEvent: %w", err) } - if ln := len(params); ln != 5 { - return nil, event.WrongNumberOfParameters(5, ln) - } - - // parse id - ev.id, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get lock id: %w", err) - } - - // parse user - user, err := client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get lock user value: %w", err) - } - - ev.user, err = util.Uint160DecodeBytesBE(user) - if err != nil { - return nil, fmt.Errorf("could not convert lock user value to uint160: %w", err) - } - - // parse lock account - lock, err := client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get lock account value: %w", err) - } - - ev.lock, err = util.Uint160DecodeBytesBE(lock) - if err != nil { - return nil, fmt.Errorf("could not convert lock account value to uint160: %w", err) - } - - // parse amount - ev.amount, err = client.IntFromStackItem(params[3]) - if err != nil { - return nil, fmt.Errorf("could not get lock amount: %w", err) - } - - // parse until deadline - ev.until, err = client.IntFromStackItem(params[4]) - if err != nil { - return nil, fmt.Errorf("could not get lock deadline: %w", err) - } - - ev.txHash = e.Container - - return ev, nil + return Lock{ + id: le.TxID, + user: le.From, + lock: le.To, + amount: le.Amount.Int64(), + until: le.Until.Int64(), + txHash: e.Container, + }, nil } diff --git a/pkg/morph/event/balance/lock_test.go b/pkg/morph/event/balance/lock_test.go index 9199bcd55..87b91aede 100644 --- a/pkg/morph/event/balance/lock_test.go +++ b/pkg/morph/event/balance/lock_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" @@ -28,7 +27,7 @@ func TestParseLock(t *testing.T) { } _, err := ParseLock(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(5, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong id parameter", func(t *testing.T) { From d5c46d812a641a93e299d468b0daa49aa2f9e323 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 5 Dec 2024 11:43:24 +0300 Subject: [PATCH 250/591] [#1541] go.mod: Update frostfs-contract New version contains more idiomatic types in the auto-generated code. Signed-off-by: Evgenii Stratonikov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c538a3178..7e7fa584e 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22 require ( code.gitea.io/sdk/gitea v0.17.1 - git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4 + git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 diff --git a/go.sum b/go.sum index 064f3274e..dec34ff6e 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4 h1:o3iqVmbvFsfe8kpB2Hvuix6Q/tAhbiPLP91xK4lmoBQ= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.0-rc.4/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 h1:tl1TT+zNk1lF/J5EaD3syDrTaYbQwvJKVOVENM4oQ+k= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= From a353d45742e9ac48260a9d2f32b81c4b3c7c769c Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 5 Dec 2024 11:52:30 +0300 Subject: [PATCH 251/591] [#1541] morph/event: Simplify container contract event parsing Signed-off-by: Evgenii Stratonikov --- pkg/morph/event/container/delete.go | 32 ++++++-------------- pkg/morph/event/container/delete_test.go | 3 +- pkg/morph/event/container/put.go | 37 ++++++------------------ pkg/morph/event/container/put_test.go | 28 +++++++++++++----- 4 files changed, 39 insertions(+), 61 deletions(-) diff --git a/pkg/morph/event/container/delete.go b/pkg/morph/event/container/delete.go index a206307f8..d28f6d521 100644 --- a/pkg/morph/event/container/delete.go +++ b/pkg/morph/event/container/delete.go @@ -3,7 +3,7 @@ package container import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -58,28 +58,14 @@ func (DeleteSuccess) MorphEvent() {} // ParseDeleteSuccess decodes notification event thrown by Container contract into // DeleteSuccess and returns it as event.Event. func ParseDeleteSuccess(e *state.ContainedNotificationEvent) (event.Event, error) { - items, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("parse stack array from raw notification event: %w", err) + var dse container.DeleteSuccessEvent + if err := dse.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse container.DeleteSuccessEvent: %w", err) } - const expectedItemNumDeleteSuccess = 1 - - if ln := len(items); ln != expectedItemNumDeleteSuccess { - return nil, event.WrongNumberOfParameters(expectedItemNumDeleteSuccess, ln) - } - - binID, err := client.BytesFromStackItem(items[0]) - if err != nil { - return nil, fmt.Errorf("parse container ID item: %w", err) - } - - var res DeleteSuccess - - err = res.ID.Decode(binID) - if err != nil { - return nil, fmt.Errorf("decode container ID: %w", err) - } - - return res, nil + var cnr cid.ID + cnr.SetSHA256(dse.ContainerID) + return DeleteSuccess{ + ID: cnr, + }, nil } diff --git a/pkg/morph/event/container/delete_test.go b/pkg/morph/event/container/delete_test.go index 627c5fcf5..62e7d7277 100644 --- a/pkg/morph/event/container/delete_test.go +++ b/pkg/morph/event/container/delete_test.go @@ -4,7 +4,6 @@ import ( "crypto/sha256" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" @@ -18,7 +17,7 @@ func TestParseDeleteSuccess(t *testing.T) { } _, err := ParseDeleteSuccess(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong container parameter", func(t *testing.T) { diff --git a/pkg/morph/event/container/put.go b/pkg/morph/event/container/put.go index 335034bf3..b09394ba4 100644 --- a/pkg/morph/event/container/put.go +++ b/pkg/morph/event/container/put.go @@ -3,7 +3,7 @@ package container import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -78,33 +78,14 @@ func (PutSuccess) MorphEvent() {} // ParsePutSuccess decodes notification event thrown by Container contract into // PutSuccess and returns it as event.Event. func ParsePutSuccess(e *state.ContainedNotificationEvent) (event.Event, error) { - items, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("parse stack array from raw notification event: %w", err) + var pse container.PutSuccessEvent + if err := pse.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse container.PutSuccessEvent: %w", err) } - const expectedItemNumPutSuccess = 2 - - if ln := len(items); ln != expectedItemNumPutSuccess { - return nil, event.WrongNumberOfParameters(expectedItemNumPutSuccess, ln) - } - - binID, err := client.BytesFromStackItem(items[0]) - if err != nil { - return nil, fmt.Errorf("parse container ID item: %w", err) - } - - _, err = client.BytesFromStackItem(items[1]) - if err != nil { - return nil, fmt.Errorf("parse public key item: %w", err) - } - - var res PutSuccess - - err = res.ID.Decode(binID) - if err != nil { - return nil, fmt.Errorf("decode container ID: %w", err) - } - - return res, nil + var cnr cid.ID + cnr.SetSHA256(pse.ContainerID) + return PutSuccess{ + ID: cnr, + }, nil } diff --git a/pkg/morph/event/container/put_test.go b/pkg/morph/event/container/put_test.go index 3622f9943..dd5c7ea93 100644 --- a/pkg/morph/event/container/put_test.go +++ b/pkg/morph/event/container/put_test.go @@ -4,8 +4,8 @@ import ( "crypto/sha256" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" ) @@ -17,7 +17,7 @@ func TestParsePutSuccess(t *testing.T) { } _, err := ParsePutSuccess(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(2, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong container ID parameter", func(t *testing.T) { @@ -35,18 +35,30 @@ func TestParsePutSuccess(t *testing.T) { id.Encode(binID) t.Run("wrong public key parameter", func(t *testing.T) { - _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ - stackitem.NewByteArray(binID), - stackitem.NewMap(), - })) + t.Run("wrong type", func(t *testing.T) { + _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ + stackitem.NewByteArray(binID), + stackitem.NewMap(), + })) - require.Error(t, err) + require.Error(t, err) + }) + t.Run("garbage data", func(t *testing.T) { + _, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ + stackitem.NewByteArray(binID), + stackitem.NewByteArray([]byte("key")), + })) + require.Error(t, err) + }) }) t.Run("correct behavior", func(t *testing.T) { + pk, err := keys.NewPrivateKey() + require.NoError(t, err) + ev, err := ParsePutSuccess(createNotifyEventFromItems([]stackitem.Item{ stackitem.NewByteArray(binID), - stackitem.NewByteArray([]byte("key")), + stackitem.NewByteArray(pk.PublicKey().Bytes()), })) require.NoError(t, err) From 1c12f23b841511b95efac32b62470a7b1e316fb3 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 5 Dec 2024 11:58:38 +0300 Subject: [PATCH 252/591] [#1541] morph/event: Simplify netmap contract event parsing Signed-off-by: Evgenii Stratonikov --- pkg/morph/event/netmap/epoch.go | 21 +++++---------------- pkg/morph/event/netmap/epoch_test.go | 3 +-- 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/pkg/morph/event/netmap/epoch.go b/pkg/morph/event/netmap/epoch.go index e454e2a6a..4dcc0d035 100644 --- a/pkg/morph/event/netmap/epoch.go +++ b/pkg/morph/event/netmap/epoch.go @@ -1,9 +1,7 @@ package netmap import ( - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -37,22 +35,13 @@ func (s NewEpoch) TxHash() util.Uint256 { // // Result is type of NewEpoch. func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) { - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) - } - - if ln := len(params); ln != 1 { - return nil, event.WrongNumberOfParameters(1, ln) - } - - prmEpochNum, err := client.IntFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get integer epoch number: %w", err) + var nee netmap.NewEpochEvent + if err := nee.FromStackItem(e.Item); err != nil { + return nil, err } return NewEpoch{ - Num: uint64(prmEpochNum), + Num: uint64(nee.Epoch.Uint64()), Hash: e.Container, }, nil } diff --git a/pkg/morph/event/netmap/epoch_test.go b/pkg/morph/event/netmap/epoch_test.go index bc267ecb6..6ff692327 100644 --- a/pkg/morph/event/netmap/epoch_test.go +++ b/pkg/morph/event/netmap/epoch_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" ) @@ -17,7 +16,7 @@ func TestParseNewEpoch(t *testing.T) { } _, err := ParseNewEpoch(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong first parameter type", func(t *testing.T) { From d1bc4351c3d2904bedfad970d13c9bcde55e3d31 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 6 Dec 2024 13:59:15 +0300 Subject: [PATCH 253/591] [#1545] morph/event: Simplify frostfs contract event parsing Signed-off-by: Evgenii Stratonikov --- pkg/morph/event/frostfs/cheque.go | 59 ++++++------------------ pkg/morph/event/frostfs/cheque_test.go | 3 +- pkg/morph/event/frostfs/config.go | 44 ++++-------------- pkg/morph/event/frostfs/config_test.go | 3 +- pkg/morph/event/frostfs/deposit.go | 55 ++++------------------ pkg/morph/event/frostfs/deposit_test.go | 9 ++-- pkg/morph/event/frostfs/withdraw.go | 43 ++++------------- pkg/morph/event/frostfs/withdraw_test.go | 9 ++-- 8 files changed, 52 insertions(+), 173 deletions(-) diff --git a/pkg/morph/event/frostfs/cheque.go b/pkg/morph/event/frostfs/cheque.go index eae2a23f5..cf56464b8 100644 --- a/pkg/morph/event/frostfs/cheque.go +++ b/pkg/morph/event/frostfs/cheque.go @@ -3,7 +3,7 @@ package frostfs import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -34,53 +34,20 @@ func (c Cheque) LockAccount() util.Uint160 { return c.LockValue } // ParseCheque from notification into cheque structure. func ParseCheque(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev Cheque - err error - ) + var ce frostfs.ChequeEvent + if err := ce.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse frostfs.ChequeEvent: %w", err) + } - params, err := event.ParseStackArray(e) + lock, err := util.Uint160DecodeBytesBE(ce.LockAccount) if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + return nil, fmt.Errorf("parse frostfs.ChequeEvent: field LockAccount: %w", err) } - if ln := len(params); ln != 4 { - return nil, event.WrongNumberOfParameters(4, ln) - } - - // parse id - ev.IDValue, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get cheque id: %w", err) - } - - // parse user - user, err := client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get cheque user: %w", err) - } - - ev.UserValue, err = util.Uint160DecodeBytesBE(user) - if err != nil { - return nil, fmt.Errorf("could not convert cheque user to uint160: %w", err) - } - - // parse amount - ev.AmountValue, err = client.IntFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get cheque amount: %w", err) - } - - // parse lock account - lock, err := client.BytesFromStackItem(params[3]) - if err != nil { - return nil, fmt.Errorf("could not get cheque lock account: %w", err) - } - - ev.LockValue, err = util.Uint160DecodeBytesBE(lock) - if err != nil { - return nil, fmt.Errorf("could not convert cheque lock account to uint160: %w", err) - } - - return ev, nil + return Cheque{ + IDValue: ce.Id, + AmountValue: ce.Amount.Int64(), + UserValue: ce.User, + LockValue: lock, + }, nil } diff --git a/pkg/morph/event/frostfs/cheque_test.go b/pkg/morph/event/frostfs/cheque_test.go index ab177757f..d92b7922b 100644 --- a/pkg/morph/event/frostfs/cheque_test.go +++ b/pkg/morph/event/frostfs/cheque_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" @@ -27,7 +26,7 @@ func TestParseCheque(t *testing.T) { } _, err := ParseCheque(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong id parameter", func(t *testing.T) { diff --git a/pkg/morph/event/frostfs/config.go b/pkg/morph/event/frostfs/config.go index 4c87634c2..805e80f3c 100644 --- a/pkg/morph/event/frostfs/config.go +++ b/pkg/morph/event/frostfs/config.go @@ -3,7 +3,7 @@ package frostfs import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -36,39 +36,15 @@ func (u Config) Key() []byte { return u.KeyValue } func (u Config) Value() []byte { return u.ValueValue } func ParseConfig(e *state.ContainedNotificationEvent) (event.Event, error) { - var ( - ev Config - err error - ) - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + var sce frostfs.SetConfigEvent + if err := sce.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse frostfs.SetConfigEvent: %w", err) } - if ln := len(params); ln != 3 { - return nil, event.WrongNumberOfParameters(3, ln) - } - - // parse id - ev.IDValue, err = client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get config update id: %w", err) - } - - // parse key - ev.KeyValue, err = client.BytesFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get config key: %w", err) - } - - // parse value - ev.ValueValue, err = client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get config value: %w", err) - } - - ev.TxHashValue = e.Container - - return ev, nil + return Config{ + KeyValue: sce.Key, + ValueValue: sce.Value, + IDValue: sce.Id, + TxHashValue: e.Container, + }, nil } diff --git a/pkg/morph/event/frostfs/config_test.go b/pkg/morph/event/frostfs/config_test.go index dcd4201e4..8acc8c15c 100644 --- a/pkg/morph/event/frostfs/config_test.go +++ b/pkg/morph/event/frostfs/config_test.go @@ -3,7 +3,6 @@ package frostfs import ( "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" ) @@ -21,7 +20,7 @@ func TestParseConfig(t *testing.T) { } _, err := ParseConfig(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong first parameter", func(t *testing.T) { diff --git a/pkg/morph/event/frostfs/deposit.go b/pkg/morph/event/frostfs/deposit.go index d8a3b82f0..fcb01577e 100644 --- a/pkg/morph/event/frostfs/deposit.go +++ b/pkg/morph/event/frostfs/deposit.go @@ -3,7 +3,7 @@ package frostfs import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -34,50 +34,15 @@ func (d Deposit) Amount() int64 { return d.AmountValue } // ParseDeposit notification into deposit structure. func ParseDeposit(e *state.ContainedNotificationEvent) (event.Event, error) { - var ev Deposit - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + var de frostfs.DepositEvent + if err := de.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse frostfs.DepositEvent: %w", err) } - if ln := len(params); ln != 4 { - return nil, event.WrongNumberOfParameters(4, ln) - } - - // parse from - from, err := client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get deposit sender: %w", err) - } - - ev.FromValue, err = util.Uint160DecodeBytesBE(from) - if err != nil { - return nil, fmt.Errorf("could not convert deposit sender to uint160: %w", err) - } - - // parse amount - ev.AmountValue, err = client.IntFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get deposit amount: %w", err) - } - - // parse to - to, err := client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get deposit receiver: %w", err) - } - - ev.ToValue, err = util.Uint160DecodeBytesBE(to) - if err != nil { - return nil, fmt.Errorf("could not convert deposit receiver to uint160: %w", err) - } - - // parse id - ev.IDValue, err = client.BytesFromStackItem(params[3]) - if err != nil { - return nil, fmt.Errorf("could not get deposit id: %w", err) - } - - return ev, nil + return Deposit{ + IDValue: de.TxHash[:], + AmountValue: de.Amount.Int64(), + FromValue: de.From, + ToValue: de.Receiver, + }, nil } diff --git a/pkg/morph/event/frostfs/deposit_test.go b/pkg/morph/event/frostfs/deposit_test.go index f279a7f9c..38d3e61f6 100644 --- a/pkg/morph/event/frostfs/deposit_test.go +++ b/pkg/morph/event/frostfs/deposit_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" @@ -12,7 +11,7 @@ import ( func TestParseDeposit(t *testing.T) { var ( - id = []byte("Hello World") + id = util.Uint256{0, 1, 2, 3} from = util.Uint160{0x1, 0x2, 0x3} to = util.Uint160{0x3, 0x2, 0x1} @@ -26,7 +25,7 @@ func TestParseDeposit(t *testing.T) { } _, err := ParseDeposit(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(4, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong from parameter", func(t *testing.T) { @@ -72,12 +71,12 @@ func TestParseDeposit(t *testing.T) { stackitem.NewByteArray(from.BytesBE()), stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), stackitem.NewByteArray(to.BytesBE()), - stackitem.NewByteArray(id), + stackitem.NewByteArray(id[:]), })) require.NoError(t, err) require.Equal(t, Deposit{ - IDValue: id, + IDValue: id[:], AmountValue: amount, FromValue: from, ToValue: to, diff --git a/pkg/morph/event/frostfs/withdraw.go b/pkg/morph/event/frostfs/withdraw.go index f48067f86..2568b6512 100644 --- a/pkg/morph/event/frostfs/withdraw.go +++ b/pkg/morph/event/frostfs/withdraw.go @@ -3,7 +3,7 @@ package frostfs import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/util" @@ -30,39 +30,14 @@ func (w Withdraw) Amount() int64 { return w.AmountValue } // ParseWithdraw notification into withdraw structure. func ParseWithdraw(e *state.ContainedNotificationEvent) (event.Event, error) { - var ev Withdraw - - params, err := event.ParseStackArray(e) - if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + var we frostfs.WithdrawEvent + if err := we.FromStackItem(e.Item); err != nil { + return nil, fmt.Errorf("parse frostfs.WithdrawEvent: %w", err) } - if ln := len(params); ln != 3 { - return nil, event.WrongNumberOfParameters(3, ln) - } - - // parse user - user, err := client.BytesFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get withdraw user: %w", err) - } - - ev.UserValue, err = util.Uint160DecodeBytesBE(user) - if err != nil { - return nil, fmt.Errorf("could not convert withdraw user to uint160: %w", err) - } - - // parse amount - ev.AmountValue, err = client.IntFromStackItem(params[1]) - if err != nil { - return nil, fmt.Errorf("could not get withdraw amount: %w", err) - } - - // parse id - ev.IDValue, err = client.BytesFromStackItem(params[2]) - if err != nil { - return nil, fmt.Errorf("could not get withdraw id: %w", err) - } - - return ev, nil + return Withdraw{ + IDValue: we.TxHash[:], + AmountValue: we.Amount.Int64(), + UserValue: we.User, + }, nil } diff --git a/pkg/morph/event/frostfs/withdraw_test.go b/pkg/morph/event/frostfs/withdraw_test.go index 33435d19a..e382305e6 100644 --- a/pkg/morph/event/frostfs/withdraw_test.go +++ b/pkg/morph/event/frostfs/withdraw_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/stretchr/testify/require" @@ -12,7 +11,7 @@ import ( func TestParseWithdraw(t *testing.T) { var ( - id = []byte("Hello World") + id = util.Uint256{1, 2, 3} user = util.Uint160{0x1, 0x2, 0x3} amount int64 = 10 @@ -25,7 +24,7 @@ func TestParseWithdraw(t *testing.T) { } _, err := ParseWithdraw(createNotifyEventFromItems(prms)) - require.EqualError(t, err, event.WrongNumberOfParameters(3, len(prms)).Error()) + require.Error(t, err) }) t.Run("wrong user parameter", func(t *testing.T) { @@ -59,12 +58,12 @@ func TestParseWithdraw(t *testing.T) { ev, err := ParseWithdraw(createNotifyEventFromItems([]stackitem.Item{ stackitem.NewByteArray(user.BytesBE()), stackitem.NewBigInteger(new(big.Int).SetInt64(amount)), - stackitem.NewByteArray(id), + stackitem.NewByteArray(id[:]), })) require.NoError(t, err) require.Equal(t, Withdraw{ - IDValue: id, + IDValue: id[:], AmountValue: amount, UserValue: user, }, ev) From 7e542906ef9085a792e5ec5f7185bfdd9acecedb Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Fri, 6 Dec 2024 14:48:44 +0300 Subject: [PATCH 254/591] [#1539] go.mod: Bump `frostfs-sdk-go` version * Also fix placement unit-test in object manager Signed-off-by: Airat Arifullin --- go.mod | 2 +- go.sum | 4 ++-- pkg/services/object_manager/placement/cache_test.go | 5 ++++- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 7e7fa584e..6ac37d343 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241206094944-81c423e7094d git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 diff --git a/go.sum b/go.sum index dec34ff6e..e084c2445 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823 h1:sepm9FeuoInmygH1K/+3L+Yp5bJhGiVi/oGCH6Emp2c= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241107121119-cb813e27a823/go.mod h1:eoK7+KZQ9GJxbzIs6vTnoUJqFDppavInLRHaN4MYgZg= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241206094944-81c423e7094d h1:FpXI+mOrmJk3t2MKQFZuhLjCHDyDeo5rtP1WXl7gUWc= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241206094944-81c423e7094d/go.mod h1:eoK7+KZQ9GJxbzIs6vTnoUJqFDppavInLRHaN4MYgZg= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go index a890d5357..7242970b5 100644 --- a/pkg/services/object_manager/placement/cache_test.go +++ b/pkg/services/object_manager/placement/cache_test.go @@ -85,7 +85,10 @@ func TestContainerNodesCache(t *testing.T) { }) t.Run("the error is propagated", func(t *testing.T) { var pp netmapSDK.PlacementPolicy - require.NoError(t, pp.DecodeString("REP 1 SELECT 1 FROM X FILTER ATTR EQ 42 AS X")) + r := netmapSDK.ReplicaDescriptor{} + r.SetNumberOfObjects(1) + r.SetSelectorName("Missing") + pp.AddReplicas(r) c := placement.NewContainerNodesCache(size) _, err := c.ContainerNodes(nm(1, nodes[0:1]), cidtest.ID(), pp) From ac0511d21423df1d191539f8272f962c23dfb62b Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 10 Dec 2024 16:52:46 +0300 Subject: [PATCH 255/591] [#1549] controlSvc: Drop deprecated EvacuateShard rpc Signed-off-by: Dmitrii Stepanov --- .../modules/control/evacuate_shard.go | 56 ------ cmd/frostfs-cli/modules/control/evacuation.go | 9 +- cmd/frostfs-cli/modules/control/shards.go | 2 - pkg/services/control/rpc.go | 14 -- pkg/services/control/server/evacuate.go | 188 ------------------ pkg/services/control/server/evacuate_async.go | 148 +++++++++++++- pkg/services/control/service.proto | 5 - pkg/services/control/service_grpc.pb.go | 43 ---- 8 files changed, 151 insertions(+), 314 deletions(-) delete mode 100644 cmd/frostfs-cli/modules/control/evacuate_shard.go delete mode 100644 pkg/services/control/server/evacuate.go diff --git a/cmd/frostfs-cli/modules/control/evacuate_shard.go b/cmd/frostfs-cli/modules/control/evacuate_shard.go deleted file mode 100644 index 1e48c1df4..000000000 --- a/cmd/frostfs-cli/modules/control/evacuate_shard.go +++ /dev/null @@ -1,56 +0,0 @@ -package control - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" - "github.com/spf13/cobra" -) - -const ignoreErrorsFlag = "no-errors" - -var evacuateShardCmd = &cobra.Command{ - Use: "evacuate", - Short: "Evacuate objects from shard", - Long: "Evacuate objects from shard to other shards", - Run: evacuateShard, - Deprecated: "use frostfs-cli control shards evacuation start", -} - -func evacuateShard(cmd *cobra.Command, _ []string) { - pk := key.Get(cmd) - - req := &control.EvacuateShardRequest{Body: new(control.EvacuateShardRequest_Body)} - req.Body.Shard_ID = getShardIDList(cmd) - req.Body.IgnoreErrors, _ = cmd.Flags().GetBool(ignoreErrorsFlag) - - signRequest(cmd, pk, req) - - cli := getClient(cmd, pk) - - var resp *control.EvacuateShardResponse - var err error - err = cli.ExecRaw(func(client *client.Client) error { - resp, err = control.EvacuateShard(client, req) - return err - }) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - cmd.Printf("Objects moved: %d\n", resp.GetBody().GetCount()) - - verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) - - cmd.Println("Shard has successfully been evacuated.") -} - -func initControlEvacuateShardCmd() { - initControlFlags(evacuateShardCmd) - - flags := evacuateShardCmd.Flags() - flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding") - flags.Bool(shardAllFlag, false, "Process all shards") - flags.Bool(ignoreErrorsFlag, false, "Skip invalid/unreadable objects") - - evacuateShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag) -} diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go index 73700e56d..8032bf09a 100644 --- a/cmd/frostfs-cli/modules/control/evacuation.go +++ b/cmd/frostfs-cli/modules/control/evacuation.go @@ -17,10 +17,11 @@ import ( ) const ( - awaitFlag = "await" - noProgressFlag = "no-progress" - scopeFlag = "scope" - repOneOnlyFlag = "rep-one-only" + awaitFlag = "await" + noProgressFlag = "no-progress" + scopeFlag = "scope" + repOneOnlyFlag = "rep-one-only" + ignoreErrorsFlag = "no-errors" containerWorkerCountFlag = "container-worker-count" objectWorkerCountFlag = "object-worker-count" diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go index 329cb9100..3483f5d62 100644 --- a/cmd/frostfs-cli/modules/control/shards.go +++ b/cmd/frostfs-cli/modules/control/shards.go @@ -13,7 +13,6 @@ var shardsCmd = &cobra.Command{ func initControlShardsCmd() { shardsCmd.AddCommand(listShardsCmd) shardsCmd.AddCommand(setShardModeCmd) - shardsCmd.AddCommand(evacuateShardCmd) shardsCmd.AddCommand(evacuationShardCmd) shardsCmd.AddCommand(flushCacheCmd) shardsCmd.AddCommand(doctorCmd) @@ -23,7 +22,6 @@ func initControlShardsCmd() { initControlShardsListCmd() initControlSetShardModeCmd() - initControlEvacuateShardCmd() initControlEvacuationShardCmd() initControlFlushCacheCmd() initControlDoctorCmd() diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go index 514061db4..6982d780d 100644 --- a/pkg/services/control/rpc.go +++ b/pkg/services/control/rpc.go @@ -15,7 +15,6 @@ const ( rpcListShards = "ListShards" rpcSetShardMode = "SetShardMode" rpcSynchronizeTree = "SynchronizeTree" - rpcEvacuateShard = "EvacuateShard" rpcStartShardEvacuation = "StartShardEvacuation" rpcGetShardEvacuationStatus = "GetShardEvacuationStatus" rpcResetShardEvacuationStatus = "ResetShardEvacuationStatus" @@ -162,19 +161,6 @@ func SynchronizeTree(cli *client.Client, req *SynchronizeTreeRequest, opts ...cl return wResp.message, nil } -// EvacuateShard executes ControlService.EvacuateShard RPC. -func EvacuateShard(cli *client.Client, req *EvacuateShardRequest, opts ...client.CallOption) (*EvacuateShardResponse, error) { - wResp := newResponseWrapper[EvacuateShardResponse]() - wReq := &requestWrapper{m: req} - - err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcEvacuateShard), wReq, wResp, opts...) - if err != nil { - return nil, err - } - - return wResp.message, nil -} - // StartShardEvacuation executes ControlService.StartShardEvacuation RPC. func StartShardEvacuation(cli *client.Client, req *StartShardEvacuationRequest, opts ...client.CallOption) (*StartShardEvacuationResponse, error) { wResp := newResponseWrapper[StartShardEvacuationResponse]() diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go deleted file mode 100644 index ae3413373..000000000 --- a/pkg/services/control/server/evacuate.go +++ /dev/null @@ -1,188 +0,0 @@ -package control - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes") - -func (s *Server) EvacuateShard(ctx context.Context, req *control.EvacuateShardRequest) (*control.EvacuateShardResponse, error) { - err := s.isValidRequest(req) - if err != nil { - return nil, status.Error(codes.PermissionDenied, err.Error()) - } - - prm := engine.EvacuateShardPrm{ - ShardID: s.getShardIDList(req.GetBody().GetShard_ID()), - IgnoreErrors: req.GetBody().GetIgnoreErrors(), - ObjectsHandler: s.replicateObject, - Scope: engine.EvacuateScopeObjects, - } - - res, err := s.s.Evacuate(ctx, prm) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - - resp := &control.EvacuateShardResponse{ - Body: &control.EvacuateShardResponse_Body{ - Count: uint32(res.ObjectsEvacuated()), - }, - } - - err = ctrlmessage.Sign(s.key, resp) - if err != nil { - return nil, status.Error(codes.Internal, err.Error()) - } - return resp, nil -} - -func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) { - cid, ok := obj.ContainerID() - if !ok { - // Return nil to prevent situations where a shard can't be evacuated - // because of a single bad/corrupted object. - return false, nil - } - - nodes, err := s.getContainerNodes(cid) - if err != nil { - return false, err - } - - if len(nodes) == 0 { - return false, nil - } - - var res replicatorResult - task := replicator.Task{ - NumCopies: 1, - Addr: addr, - Obj: obj, - Nodes: nodes, - } - s.replicator.HandleReplicationTask(ctx, task, &res) - - if res.count == 0 { - return false, errors.New("object was not replicated") - } - return true, nil -} - -func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) { - nodes, err := s.getContainerNodes(contID) - if err != nil { - return false, "", err - } - if len(nodes) == 0 { - return false, "", nil - } - - for _, node := range nodes { - err = s.replicateTreeToNode(ctx, forest, contID, treeID, node) - if err == nil { - return true, hex.EncodeToString(node.PublicKey()), nil - } - } - return false, "", err -} - -func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error { - rawCID := make([]byte, sha256.Size) - contID.Encode(rawCID) - - var height uint64 - for { - op, err := forest.TreeGetOpLog(ctx, contID, treeID, height) - if err != nil { - return err - } - - if op.Time == 0 { - return nil - } - - req := &tree.ApplyRequest{ - Body: &tree.ApplyRequest_Body{ - ContainerId: rawCID, - TreeId: treeID, - Operation: &tree.LogMove{ - ParentId: op.Parent, - Meta: op.Meta.Bytes(), - ChildId: op.Child, - }, - }, - } - - err = tree.SignMessage(req, s.key) - if err != nil { - return fmt.Errorf("can't message apply request: %w", err) - } - - err = s.treeService.ReplicateTreeOp(ctx, node, req) - if err != nil { - return err - } - - height = op.Time + 1 - } -} - -func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) { - nm, err := s.netMapSrc.GetNetMap(0) - if err != nil { - return nil, err - } - - c, err := s.cnrSrc.Get(contID) - if err != nil { - return nil, err - } - - binCnr := make([]byte, sha256.Size) - contID.Encode(binCnr) - - ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr) - if err != nil { - return nil, errFailedToBuildListOfContainerNodes - } - - nodes := placement.FlattenNodes(ns) - bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes() - for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body - if bytes.Equal(nodes[i].PublicKey(), bs) { - copy(nodes[i:], nodes[i+1:]) - nodes = nodes[:len(nodes)-1] - } - } - return nodes, nil -} - -type replicatorResult struct { - count int -} - -// SubmitSuccessfulReplication implements the replicator.TaskResult interface. -func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) { - r.count++ -} diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go index 146ac7e16..04465eb26 100644 --- a/pkg/services/control/server/evacuate_async.go +++ b/pkg/services/control/server/evacuate_async.go @@ -1,17 +1,32 @@ package control import ( + "bytes" "context" + "crypto/sha256" + "encoding/hex" "errors" + "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) +var errFailedToBuildListOfContainerNodes = errors.New("can't build a list of container nodes") + func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartShardEvacuationRequest) (*control.StartShardEvacuationResponse, error) { err := s.isValidRequest(req) if err != nil { @@ -34,8 +49,7 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha RepOneOnly: req.GetBody().GetRepOneOnly(), } - _, err = s.s.Evacuate(ctx, prm) - if err != nil { + if err = s.s.Evacuate(ctx, prm); err != nil { var logicalErr logicerr.Logical if errors.As(err, &logicalErr) { return nil, status.Error(codes.Aborted, err.Error()) @@ -135,3 +149,133 @@ func (s *Server) ResetShardEvacuationStatus(ctx context.Context, req *control.Re } return resp, nil } + +func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) { + cid, ok := obj.ContainerID() + if !ok { + // Return nil to prevent situations where a shard can't be evacuated + // because of a single bad/corrupted object. + return false, nil + } + + nodes, err := s.getContainerNodes(cid) + if err != nil { + return false, err + } + + if len(nodes) == 0 { + return false, nil + } + + var res replicatorResult + task := replicator.Task{ + NumCopies: 1, + Addr: addr, + Obj: obj, + Nodes: nodes, + } + s.replicator.HandleReplicationTask(ctx, task, &res) + + if res.count == 0 { + return false, errors.New("object was not replicated") + } + return true, nil +} + +func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) { + nodes, err := s.getContainerNodes(contID) + if err != nil { + return false, "", err + } + if len(nodes) == 0 { + return false, "", nil + } + + for _, node := range nodes { + err = s.replicateTreeToNode(ctx, forest, contID, treeID, node) + if err == nil { + return true, hex.EncodeToString(node.PublicKey()), nil + } + } + return false, "", err +} + +func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest, contID cid.ID, treeID string, node netmap.NodeInfo) error { + rawCID := make([]byte, sha256.Size) + contID.Encode(rawCID) + + var height uint64 + for { + op, err := forest.TreeGetOpLog(ctx, contID, treeID, height) + if err != nil { + return err + } + + if op.Time == 0 { + return nil + } + + req := &tree.ApplyRequest{ + Body: &tree.ApplyRequest_Body{ + ContainerId: rawCID, + TreeId: treeID, + Operation: &tree.LogMove{ + ParentId: op.Parent, + Meta: op.Meta.Bytes(), + ChildId: op.Child, + }, + }, + } + + err = tree.SignMessage(req, s.key) + if err != nil { + return fmt.Errorf("can't message apply request: %w", err) + } + + err = s.treeService.ReplicateTreeOp(ctx, node, req) + if err != nil { + return err + } + + height = op.Time + 1 + } +} + +func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) { + nm, err := s.netMapSrc.GetNetMap(0) + if err != nil { + return nil, err + } + + c, err := s.cnrSrc.Get(contID) + if err != nil { + return nil, err + } + + binCnr := make([]byte, sha256.Size) + contID.Encode(binCnr) + + ns, err := nm.ContainerNodes(c.Value.PlacementPolicy(), binCnr) + if err != nil { + return nil, errFailedToBuildListOfContainerNodes + } + + nodes := placement.FlattenNodes(ns) + bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes() + for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body + if bytes.Equal(nodes[i].PublicKey(), bs) { + copy(nodes[i:], nodes[i+1:]) + nodes = nodes[:len(nodes)-1] + } + } + return nodes, nil +} + +type replicatorResult struct { + count int +} + +// SubmitSuccessfulReplication implements the replicator.TaskResult interface. +func (r *replicatorResult) SubmitSuccessfulReplication(_ netmap.NodeInfo) { + r.count++ +} diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto index ae1939e13..97ecf9a8c 100644 --- a/pkg/services/control/service.proto +++ b/pkg/services/control/service.proto @@ -30,11 +30,6 @@ service ControlService { // Synchronizes all log operations for the specified tree. rpc SynchronizeTree(SynchronizeTreeRequest) returns (SynchronizeTreeResponse); - // EvacuateShard moves all data from one shard to the others. - // Deprecated: Use - // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation - rpc EvacuateShard(EvacuateShardRequest) returns (EvacuateShardResponse); - // StartShardEvacuation starts moving all data from one shard to the others. rpc StartShardEvacuation(StartShardEvacuationRequest) returns (StartShardEvacuationResponse); diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go index f5cfefa85..987e08c59 100644 --- a/pkg/services/control/service_grpc.pb.go +++ b/pkg/services/control/service_grpc.pb.go @@ -26,7 +26,6 @@ const ( ControlService_ListShards_FullMethodName = "/control.ControlService/ListShards" ControlService_SetShardMode_FullMethodName = "/control.ControlService/SetShardMode" ControlService_SynchronizeTree_FullMethodName = "/control.ControlService/SynchronizeTree" - ControlService_EvacuateShard_FullMethodName = "/control.ControlService/EvacuateShard" ControlService_StartShardEvacuation_FullMethodName = "/control.ControlService/StartShardEvacuation" ControlService_GetShardEvacuationStatus_FullMethodName = "/control.ControlService/GetShardEvacuationStatus" ControlService_ResetShardEvacuationStatus_FullMethodName = "/control.ControlService/ResetShardEvacuationStatus" @@ -62,10 +61,6 @@ type ControlServiceClient interface { SetShardMode(ctx context.Context, in *SetShardModeRequest, opts ...grpc.CallOption) (*SetShardModeResponse, error) // Synchronizes all log operations for the specified tree. SynchronizeTree(ctx context.Context, in *SynchronizeTreeRequest, opts ...grpc.CallOption) (*SynchronizeTreeResponse, error) - // EvacuateShard moves all data from one shard to the others. - // Deprecated: Use - // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation - EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) // StartShardEvacuation starts moving all data from one shard to the others. StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) // GetShardEvacuationStatus returns evacuation status. @@ -173,15 +168,6 @@ func (c *controlServiceClient) SynchronizeTree(ctx context.Context, in *Synchron return out, nil } -func (c *controlServiceClient) EvacuateShard(ctx context.Context, in *EvacuateShardRequest, opts ...grpc.CallOption) (*EvacuateShardResponse, error) { - out := new(EvacuateShardResponse) - err := c.cc.Invoke(ctx, ControlService_EvacuateShard_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *controlServiceClient) StartShardEvacuation(ctx context.Context, in *StartShardEvacuationRequest, opts ...grpc.CallOption) (*StartShardEvacuationResponse, error) { out := new(StartShardEvacuationResponse) err := c.cc.Invoke(ctx, ControlService_StartShardEvacuation_FullMethodName, in, out, opts...) @@ -335,10 +321,6 @@ type ControlServiceServer interface { SetShardMode(context.Context, *SetShardModeRequest) (*SetShardModeResponse, error) // Synchronizes all log operations for the specified tree. SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) - // EvacuateShard moves all data from one shard to the others. - // Deprecated: Use - // StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation - EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error) // StartShardEvacuation starts moving all data from one shard to the others. StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) // GetShardEvacuationStatus returns evacuation status. @@ -400,9 +382,6 @@ func (UnimplementedControlServiceServer) SetShardMode(context.Context, *SetShard func (UnimplementedControlServiceServer) SynchronizeTree(context.Context, *SynchronizeTreeRequest) (*SynchronizeTreeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method SynchronizeTree not implemented") } -func (UnimplementedControlServiceServer) EvacuateShard(context.Context, *EvacuateShardRequest) (*EvacuateShardResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method EvacuateShard not implemented") -} func (UnimplementedControlServiceServer) StartShardEvacuation(context.Context, *StartShardEvacuationRequest) (*StartShardEvacuationResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method StartShardEvacuation not implemented") } @@ -586,24 +565,6 @@ func _ControlService_SynchronizeTree_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } -func _ControlService_EvacuateShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(EvacuateShardRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServiceServer).EvacuateShard(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ControlService_EvacuateShard_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServiceServer).EvacuateShard(ctx, req.(*EvacuateShardRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _ControlService_StartShardEvacuation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StartShardEvacuationRequest) if err := dec(in); err != nil { @@ -909,10 +870,6 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{ MethodName: "SynchronizeTree", Handler: _ControlService_SynchronizeTree_Handler, }, - { - MethodName: "EvacuateShard", - Handler: _ControlService_EvacuateShard_Handler, - }, { MethodName: "StartShardEvacuation", Handler: _ControlService_StartShardEvacuation_Handler, From 41da27dad51b2906b15157c0e3dc4e87c991da02 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 10 Dec 2024 16:53:19 +0300 Subject: [PATCH 256/591] [#1549] engine: Drop Async flag from evacuation parameters Now it is only async evacuation. Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/engine/evacuate.go | 30 ++-- .../engine/evacuate_test.go | 130 +++++++++--------- pkg/services/control/server/evacuate_async.go | 1 - 3 files changed, 74 insertions(+), 87 deletions(-) diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index b88c249b1..2e0344bfb 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -86,7 +86,6 @@ type EvacuateShardPrm struct { ObjectsHandler func(context.Context, oid.Address, *objectSDK.Object) (bool, error) TreeHandler func(context.Context, cid.ID, string, pilorama.Forest) (bool, string, error) IgnoreErrors bool - Async bool Scope EvacuateScope RepOneOnly bool @@ -211,10 +210,10 @@ var errMustHaveTwoShards = errors.New("must have at least 1 spare shard") // Evacuate moves data from one shard to the others. // The shard being moved must be in read-only mode. -func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*EvacuateShardRes, error) { +func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) error { select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err() default: } @@ -226,7 +225,6 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Evacuate", trace.WithAttributes( attribute.StringSlice("shardIDs", shardIDs), - attribute.Bool("async", prm.Async), attribute.Bool("ignoreErrors", prm.IgnoreErrors), attribute.Stringer("scope", prm.Scope), )) @@ -234,7 +232,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev shards, err := e.getActualShards(shardIDs, prm) if err != nil { - return nil, err + return err } shardsToEvacuate := make(map[string]*shard.Shard) @@ -247,10 +245,10 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev } res := NewEvacuateShardRes() - ctx = ctxOrBackground(ctx, prm.Async) - eg, egCtx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res) + ctx = context.WithoutCancel(ctx) + eg, ctx, err := e.evacuateLimiter.TryStart(ctx, shardIDs, res) if err != nil { - return nil, err + return err } var mtx sync.RWMutex @@ -262,21 +260,10 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev return t } eg.Go(func() error { - return e.evacuateShards(egCtx, shardIDs, prm, res, copyShards, shardsToEvacuate) + return e.evacuateShards(ctx, shardIDs, prm, res, copyShards, shardsToEvacuate) }) - if prm.Async { - return nil, nil - } - - return res, eg.Wait() -} - -func ctxOrBackground(ctx context.Context, background bool) context.Context { - if background { - return context.Background() - } - return ctx + return nil } func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes, @@ -286,7 +273,6 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards", trace.WithAttributes( attribute.StringSlice("shardIDs", shardIDs), - attribute.Bool("async", prm.Async), attribute.Bool("ignoreErrors", prm.IgnoreErrors), attribute.Stringer("scope", prm.Scope), attribute.Bool("repOneOnly", prm.RepOneOnly), diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index beab8384e..248c39155 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -140,16 +140,17 @@ func TestEvacuateShardObjects(t *testing.T) { prm.Scope = EvacuateScopeObjects t.Run("must be read-only", func(t *testing.T) { - res, err := e.Evacuate(context.Background(), prm) + err := e.Evacuate(context.Background(), prm) require.ErrorIs(t, err, ErrMustBeReadOnly) - require.Equal(t, uint64(0), res.ObjectsEvacuated()) }) require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly)) - res, err := e.Evacuate(context.Background(), prm) + err := e.Evacuate(context.Background(), prm) require.NoError(t, err) - require.Equal(t, uint64(objPerShard), res.ObjectsEvacuated()) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, st.ErrorMessage(), "") + require.Equal(t, uint64(objPerShard), st.ObjectsEvacuated()) // We check that all objects are available both before and after shard removal. // First case is a real-world use-case. It ensures that an object can be put in presense @@ -186,9 +187,10 @@ func TestEvacuateShardObjects(t *testing.T) { } // Calling it again is OK, but all objects are already moved, so no new PUTs should be done. - res, err = e.Evacuate(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, uint64(0), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st = testWaitForEvacuationCompleted(t, e) + require.Equal(t, st.ErrorMessage(), "") + require.Equal(t, uint64(0), st.ObjectsEvacuated()) checkHasObjects(t) @@ -200,6 +202,17 @@ func TestEvacuateShardObjects(t *testing.T) { checkHasObjects(t) } +func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationState { + var st *EvacuationState + var err error + require.Eventually(t, func() bool { + st, err = e.GetEvacuationState(context.Background()) + require.NoError(t, err) + return st.ProcessingStatus() == EvacuateProcessStateCompleted + }, 3*time.Second, 10*time.Millisecond) + return st +} + func TestEvacuateObjectsNetwork(t *testing.T) { t.Parallel() @@ -242,15 +255,15 @@ func TestEvacuateObjectsNetwork(t *testing.T) { prm.ShardID = ids[0:1] prm.Scope = EvacuateScopeObjects - res, err := e.Evacuate(context.Background(), prm) + err := e.Evacuate(context.Background(), prm) require.ErrorIs(t, err, errMustHaveTwoShards) - require.Equal(t, uint64(0), res.ObjectsEvacuated()) prm.ObjectsHandler = acceptOneOf(objects, 2) - res, err = e.Evacuate(context.Background(), prm) - require.ErrorIs(t, err, errReplication) - require.Equal(t, uint64(2), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Contains(t, st.ErrorMessage(), errReplication.Error()) + require.Equal(t, uint64(2), st.ObjectsEvacuated()) }) t.Run("multiple shards, evacuate one", func(t *testing.T) { t.Parallel() @@ -267,16 +280,18 @@ func TestEvacuateObjectsNetwork(t *testing.T) { prm.ObjectsHandler = acceptOneOf(objects, 2) prm.Scope = EvacuateScopeObjects - res, err := e.Evacuate(context.Background(), prm) - require.ErrorIs(t, err, errReplication) - require.Equal(t, uint64(2), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Contains(t, st.ErrorMessage(), errReplication.Error()) + require.Equal(t, uint64(2), st.ObjectsEvacuated()) t.Run("no errors", func(t *testing.T) { prm.ObjectsHandler = acceptOneOf(objects, 3) - res, err := e.Evacuate(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, uint64(3), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, st.ErrorMessage(), "") + require.Equal(t, uint64(3), st.ObjectsEvacuated()) }) }) t.Run("multiple shards, evacuate many", func(t *testing.T) { @@ -305,16 +320,18 @@ func TestEvacuateObjectsNetwork(t *testing.T) { prm.ObjectsHandler = acceptOneOf(objects, totalCount-1) prm.Scope = EvacuateScopeObjects - res, err := e.Evacuate(context.Background(), prm) - require.ErrorIs(t, err, errReplication) - require.Equal(t, totalCount-1, res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Contains(t, st.ErrorMessage(), errReplication.Error()) + require.Equal(t, totalCount-1, st.ObjectsEvacuated()) t.Run("no errors", func(t *testing.T) { prm.ObjectsHandler = acceptOneOf(objects, totalCount) - res, err := e.Evacuate(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, totalCount, res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, st.ErrorMessage(), "") + require.Equal(t, totalCount, st.ObjectsEvacuated()) }) }) } @@ -344,9 +361,8 @@ func TestEvacuateCancellation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - res, err := e.Evacuate(ctx, prm) + err := e.Evacuate(ctx, prm) require.ErrorContains(t, err, "context canceled") - require.Equal(t, uint64(0), res.ObjectsEvacuated()) } func TestEvacuateCancellationByError(t *testing.T) { @@ -375,8 +391,9 @@ func TestEvacuateCancellationByError(t *testing.T) { prm.ObjectWorkerCount = 2 prm.ContainerWorkerCount = 2 - _, err := e.Evacuate(context.Background(), prm) - require.ErrorContains(t, err, "test error") + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Contains(t, st.ErrorMessage(), "test error") } func TestEvacuateSingleProcess(t *testing.T) { @@ -406,20 +423,19 @@ func TestEvacuateSingleProcess(t *testing.T) { eg, egCtx := errgroup.WithContext(context.Background()) eg.Go(func() error { - res, err := e.Evacuate(egCtx, prm) - require.NoError(t, err, "first evacuation failed") - require.Equal(t, uint64(3), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed") return nil }) eg.Go(func() error { <-running - res, err := e.Evacuate(egCtx, prm) - require.ErrorContains(t, err, "evacuate is already running for shard ids", "second evacuation not failed") - require.Equal(t, uint64(0), res.ObjectsEvacuated()) + require.ErrorContains(t, e.Evacuate(egCtx, prm), "evacuate is already running for shard ids", "second evacuation not failed") close(blocker) return nil }) require.NoError(t, eg.Wait()) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, uint64(3), st.ObjectsEvacuated()) + require.Equal(t, st.ErrorMessage(), "") } func TestEvacuateObjectsAsync(t *testing.T) { @@ -458,9 +474,9 @@ func TestEvacuateObjectsAsync(t *testing.T) { eg, egCtx := errgroup.WithContext(context.Background()) eg.Go(func() error { - res, err := e.Evacuate(egCtx, prm) - require.NoError(t, err, "first evacuation failed") - require.Equal(t, uint64(3), res.ObjectsEvacuated()) + require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed") + st = testWaitForEvacuationCompleted(t, e) + require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count") return nil }) @@ -483,12 +499,7 @@ func TestEvacuateObjectsAsync(t *testing.T) { close(blocker) - require.Eventually(t, func() bool { - st, err = e.GetEvacuationState(context.Background()) - return st.ProcessingStatus() == EvacuateProcessStateCompleted - }, 3*time.Second, 10*time.Millisecond, "invalid final state") - - require.NoError(t, err, "get final state failed") + st = testWaitForEvacuationCompleted(t, e) require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count") require.NotNil(t, st.StartedAt(), "invalid final started at") require.NotNil(t, st.FinishedAt(), "invalid final finished at") @@ -534,14 +545,9 @@ func TestEvacuateTreesLocal(t *testing.T) { require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids") require.Equal(t, "", st.ErrorMessage(), "invalid init error message") - res, err := e.Evacuate(context.Background(), prm) - require.NotNil(t, res, "sync evacuation result must be not nil") - require.NoError(t, err, "evacuation failed") - - st, err = e.GetEvacuationState(context.Background()) - require.NoError(t, err, "get evacuation state failed") - require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus()) + require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed") + st = testWaitForEvacuationCompleted(t, e) require.Equal(t, uint64(3), st.TreesTotal(), "invalid trees total count") require.Equal(t, uint64(3), st.TreesEvacuated(), "invalid trees evacuated count") require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count") @@ -632,15 +638,9 @@ func TestEvacuateTreesRemote(t *testing.T) { require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids") require.Equal(t, "", st.ErrorMessage(), "invalid init error message") - res, err := e.Evacuate(context.Background(), prm) - require.NotNil(t, res, "sync evacuation must return not nil") - require.NoError(t, err, "evacuation failed") + require.NoError(t, e.Evacuate(context.Background(), prm), "evacuation failed") + st = testWaitForEvacuationCompleted(t, e) - st, err = e.GetEvacuationState(context.Background()) - require.NoError(t, err, "get evacuation state failed") - require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus()) - - require.NoError(t, err, "get final state failed") require.Equal(t, uint64(6), st.TreesTotal(), "invalid trees total count") require.Equal(t, uint64(6), st.TreesEvacuated(), "invalid trees evacuated count") require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count") @@ -754,11 +754,12 @@ func TestEvacuateShardObjectsRepOneOnly(t *testing.T) { require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) - res, err := e.Evacuate(context.Background(), prm) - require.NoError(t, err) - require.Equal(t, uint64(4), res.ObjectsEvacuated()) - require.Equal(t, uint64(8), res.ObjectsSkipped()) - require.Equal(t, uint64(0), res.ObjectsFailed()) + require.NoError(t, e.Evacuate(context.Background(), prm)) + st := testWaitForEvacuationCompleted(t, e) + require.Equal(t, "", st.ErrorMessage()) + require.Equal(t, uint64(4), st.ObjectsEvacuated()) + require.Equal(t, uint64(8), st.ObjectsSkipped()) + require.Equal(t, uint64(0), st.ObjectsFailed()) } func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) { @@ -812,7 +813,8 @@ func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) { require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) start := time.Now() - _, err := e.Evacuate(context.Background(), prm) + err := e.Evacuate(context.Background(), prm) + testWaitForEvacuationCompleted(t, e) t.Logf("evacuate took %v\n", time.Since(start)) require.NoError(t, err) } diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go index 04465eb26..da5401515 100644 --- a/pkg/services/control/server/evacuate_async.go +++ b/pkg/services/control/server/evacuate_async.go @@ -42,7 +42,6 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha IgnoreErrors: req.GetBody().GetIgnoreErrors(), ObjectsHandler: s.replicateObject, TreeHandler: s.replicateTree, - Async: true, Scope: engine.EvacuateScope(req.GetBody().GetScope()), ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(), ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(), From 15139d80c9e78ca5a9023eb3f6466d31d40942f3 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 10 Dec 2024 12:47:20 +0300 Subject: [PATCH 257/591] [#1548] policer: Do not replicate EC chunk if object already removed Signed-off-by: Dmitrii Stepanov --- pkg/services/policer/ec.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go index cb583f1d3..db640e323 100644 --- a/pkg/services/policer/ec.go +++ b/pkg/services/policer/ec.go @@ -281,6 +281,8 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info } chunkIDs[ch.Index] = ecInfoChunkID } + } else if client.IsErrObjectAlreadyRemoved(err) { + restore = false } else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err)) p.replicator.HandleReplicationTask(ctx, replicator.Task{ From 386a12eea45d5af072dfbba307e5a079ee9ebdb4 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 10 Dec 2024 12:52:26 +0300 Subject: [PATCH 258/591] [#1548] engine: Rename parent -> ecParent Parent could mean split parent or EC parent. In this case it is EC parent only. Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/engine/exists_test.go | 2 +- pkg/local_object_storage/engine/put.go | 10 +++++----- pkg/local_object_storage/metabase/exists.go | 18 +++++++++--------- pkg/local_object_storage/shard/exists.go | 4 ++-- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/pkg/local_object_storage/engine/exists_test.go b/pkg/local_object_storage/engine/exists_test.go index 1b51c10dc..9b3c0833f 100644 --- a/pkg/local_object_storage/engine/exists_test.go +++ b/pkg/local_object_storage/engine/exists_test.go @@ -42,7 +42,7 @@ func benchmarkExists(b *testing.B, shardNum int) { for range b.N { var shPrm shard.ExistsPrm shPrm.Address = addr - shPrm.ParentAddress = oid.Address{} + shPrm.ECParentAddress = oid.Address{} ok, _, err := e.exists(context.Background(), shPrm) if err != nil || ok { b.Fatalf("%t %v", ok, err) diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index e080191ae..ba4a144d1 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -71,21 +71,21 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { // In #1146 this check was parallelized, however, it became // much slower on fast machines for 4 shards. - var parent oid.Address + var ecParent oid.Address if prm.Object.ECHeader() != nil { - parent.SetObject(prm.Object.ECHeader().Parent()) - parent.SetContainer(addr.Container()) + ecParent.SetObject(prm.Object.ECHeader().Parent()) + ecParent.SetContainer(addr.Container()) } var shPrm shard.ExistsPrm shPrm.Address = addr - shPrm.ParentAddress = parent + shPrm.ECParentAddress = ecParent existed, locked, err := e.exists(ctx, shPrm) if err != nil { return err } if !existed && locked { - lockers, err := e.GetLocked(ctx, parent) + lockers, err := e.GetLocked(ctx, ecParent) if err != nil { return err } diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go index 0294dd3ba..f6596e830 100644 --- a/pkg/local_object_storage/metabase/exists.go +++ b/pkg/local_object_storage/metabase/exists.go @@ -20,8 +20,8 @@ import ( // ExistsPrm groups the parameters of Exists operation. type ExistsPrm struct { - addr oid.Address - paddr oid.Address + addr oid.Address + ecParentAddr oid.Address } // ExistsRes groups the resulting values of Exists operation. @@ -37,9 +37,9 @@ func (p *ExistsPrm) SetAddress(addr oid.Address) { p.addr = addr } -// SetParent is an Exists option to set objects parent. -func (p *ExistsPrm) SetParent(addr oid.Address) { - p.paddr = addr +// SetECParent is an Exists option to set objects parent. +func (p *ExistsPrm) SetECParent(addr oid.Address) { + p.ecParentAddr = addr } // Exists returns the fact that the object is in the metabase. @@ -82,7 +82,7 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err currEpoch := db.epochState.CurrentEpoch() err = db.boltDB.View(func(tx *bbolt.Tx) error { - res.exists, res.locked, err = db.exists(tx, prm.addr, prm.paddr, currEpoch) + res.exists, res.locked, err = db.exists(tx, prm.addr, prm.ecParentAddr, currEpoch) return err }) @@ -90,10 +90,10 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err return res, metaerr.Wrap(err) } -func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, parent oid.Address, currEpoch uint64) (bool, bool, error) { +func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currEpoch uint64) (bool, bool, error) { var locked bool - if !parent.Equals(oid.Address{}) { - locked = objectLocked(tx, parent.Container(), parent.Object()) + if !ecParent.Equals(oid.Address{}) { + locked = objectLocked(tx, ecParent.Container(), ecParent.Object()) } // check graveyard and object expiration first st, err := objectStatus(tx, addr, currEpoch) diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go index 784bf293a..82ce48dde 100644 --- a/pkg/local_object_storage/shard/exists.go +++ b/pkg/local_object_storage/shard/exists.go @@ -18,7 +18,7 @@ type ExistsPrm struct { // Exists option to set object checked for existence. Address oid.Address // Exists option to set parent object checked for existence. - ParentAddress oid.Address + ECParentAddress oid.Address } // ExistsRes groups the resulting values of Exists operation. @@ -74,7 +74,7 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { } else { var existsPrm meta.ExistsPrm existsPrm.SetAddress(prm.Address) - existsPrm.SetParent(prm.ParentAddress) + existsPrm.SetECParent(prm.ECParentAddress) var res meta.ExistsRes res, err = s.metaBase.Exists(ctx, existsPrm) From 1f6cf57e308eab9b2659989eb31f1bee1a56ef9b Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 10 Dec 2024 12:53:54 +0300 Subject: [PATCH 259/591] [#1548] metabase: Check if EC parent is removed or expired Signed-off-by: Dmitrii Stepanov --- .../engine/inhume_test.go | 45 +++++++++++++++++++ pkg/local_object_storage/metabase/exists.go | 11 +++++ pkg/local_object_storage/metabase/put.go | 8 +++- 3 files changed, 63 insertions(+), 1 deletion(-) diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 9d7196d94..2d083a58c 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -11,6 +11,7 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -93,6 +94,50 @@ func TestStorageEngine_Inhume(t *testing.T) { }) } +func TestStorageEngine_ECInhume(t *testing.T) { + parentObjectAddress := oidtest.Address() + containerID := parentObjectAddress.Container() + + chunkObject0 := testutil.GenerateObjectWithCID(containerID) + chunkObject0.SetECHeader(objectSDK.NewECHeader( + objectSDK.ECParentInfo{ + ID: parentObjectAddress.Object(), + }, 0, 4, []byte{}, 0)) + + chunkObject1 := testutil.GenerateObjectWithCID(containerID) + chunkObject1.SetECHeader(objectSDK.NewECHeader( + objectSDK.ECParentInfo{ + ID: parentObjectAddress.Object(), + }, 1, 4, []byte{}, 0)) + + tombstone := objectSDK.NewTombstone() + tombstone.SetMembers([]oid.ID{parentObjectAddress.Object()}) + payload, err := tombstone.Marshal() + require.NoError(t, err) + tombstoneObject := testutil.GenerateObjectWithCID(containerID) + tombstoneObject.SetType(objectSDK.TypeTombstone) + tombstoneObject.SetPayload(payload) + tombstoneObjectAddress := object.AddressOf(tombstoneObject) + + e := testNewEngine(t).setShardsNum(t, 5).prepare(t).engine + defer func() { require.NoError(t, e.Close(context.Background())) }() + + require.NoError(t, Put(context.Background(), e, chunkObject0, false)) + + require.NoError(t, Put(context.Background(), e, tombstoneObject, false)) + + var inhumePrm InhumePrm + inhumePrm.WithTarget(tombstoneObjectAddress, parentObjectAddress) + _, err = e.Inhume(context.Background(), inhumePrm) + require.NoError(t, err) + + var alreadyRemoved *apistatus.ObjectAlreadyRemoved + + require.ErrorAs(t, Put(context.Background(), e, chunkObject0, false), &alreadyRemoved) + + require.ErrorAs(t, Put(context.Background(), e, chunkObject1, false), &alreadyRemoved) +} + func TestInhumeExpiredRegularObject(t *testing.T) { t.Parallel() diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go index f6596e830..411beb6b3 100644 --- a/pkg/local_object_storage/metabase/exists.go +++ b/pkg/local_object_storage/metabase/exists.go @@ -93,6 +93,17 @@ func (db *DB) Exists(ctx context.Context, prm ExistsPrm) (res ExistsRes, err err func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currEpoch uint64) (bool, bool, error) { var locked bool if !ecParent.Equals(oid.Address{}) { + st, err := objectStatus(tx, ecParent, currEpoch) + if err != nil { + return false, false, err + } + switch st { + case 2: + return false, locked, logicerr.Wrap(new(apistatus.ObjectAlreadyRemoved)) + case 3: + return false, locked, ErrObjectIsExpired + } + locked = objectLocked(tx, ecParent.Container(), ecParent.Object()) } // check graveyard and object expiration first diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index d7675869f..6f9dc1bf0 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -121,9 +121,15 @@ func (db *DB) put(tx *bbolt.Tx, return PutRes{}, errors.New("missing container in object") } + var ecParentAddress oid.Address + if ecHeader := obj.ECHeader(); ecHeader != nil { + ecParentAddress.SetContainer(cnr) + ecParentAddress.SetObject(ecHeader.Parent()) + } + isParent := si != nil - exists, _, err := db.exists(tx, objectCore.AddressOf(obj), oid.Address{}, currEpoch) + exists, _, err := db.exists(tx, objectCore.AddressOf(obj), ecParentAddress, currEpoch) var splitInfoError *objectSDK.SplitInfoError if errors.As(err, &splitInfoError) { From 670305a7216bf3a5304b6256eed8dc1efe285f9f Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 6 Dec 2024 15:50:57 +0300 Subject: [PATCH 260/591] [#1546] morph/event: Remove nil checks from event handler registrar This codepath hides possible bugs in code. We would rather panic then silently fail. Signed-off-by: Evgenii Stratonikov --- internal/logs/logs.go | 5 ----- pkg/morph/event/listener.go | 29 ----------------------------- 2 files changed, 34 deletions(-) diff --git a/internal/logs/logs.go b/internal/logs/logs.go index d0bac4d11..893be7006 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -164,17 +164,12 @@ const ( EventNotaryParserNotSet = "notary parser not set" EventCouldNotParseNotaryEvent = "could not parse notary event" EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered" - EventIgnoreNilEventParser = "ignore nil event parser" EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser" EventRegisteredNewEventParser = "registered new event parser" - EventIgnoreNilEventHandler = "ignore nil event handler" EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser" EventRegisteredNewEventHandler = "registered new event handler" - EventIgnoreNilNotaryEventParser = "ignore nil notary event parser" EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser" - EventIgnoreNilNotaryEventHandler = "ignore nil notary event handler" EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser" - EventIgnoreNilBlockHandler = "ignore nil block handler" StorageOperation = "local object storage operation" BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB" BlobovniczaOpeningBoltDB = "opening BoltDB" diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index 6e6184e77..b67546804 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -459,12 +459,6 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) { zap.Stringer("event_type", pi.getType()), ) - parser := pi.parser() - if parser == nil { - log.Info(context.Background(), logs.EventIgnoreNilEventParser) - return - } - l.mtx.Lock() defer l.mtx.Unlock() @@ -492,12 +486,6 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { zap.Stringer("event_type", hi.GetType()), ) - handler := hi.Handler() - if handler == nil { - log.Warn(context.Background(), logs.EventIgnoreNilEventHandler) - return - } - // check if parser was set l.mtx.RLock() _, ok := l.notificationParsers[hi.scriptHashWithType] @@ -555,12 +543,6 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) { zap.Stringer("notary_type", pi.RequestType()), ) - parser := pi.parser() - if parser == nil { - log.Info(context.Background(), logs.EventIgnoreNilNotaryEventParser) - return - } - l.mtx.Lock() defer l.mtx.Unlock() @@ -593,12 +575,6 @@ func (l *listener) RegisterNotaryHandler(hi NotaryHandlerInfo) { zap.Stringer("notary type", hi.RequestType()), ) - handler := hi.Handler() - if handler == nil { - log.Warn(context.Background(), logs.EventIgnoreNilNotaryEventHandler) - return - } - // check if parser was set l.mtx.RLock() _, ok := l.notaryParsers[hi.notaryRequestTypes] @@ -627,11 +603,6 @@ func (l *listener) Stop() { } func (l *listener) RegisterBlockHandler(handler BlockHandler) { - if handler == nil { - l.log.Warn(context.Background(), logs.EventIgnoreNilBlockHandler) - return - } - l.blockHandlers = append(l.blockHandlers, handler) } From dfa51048a852aa9a8c88a67b6914d671137dc3f7 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 6 Dec 2024 15:56:41 +0300 Subject: [PATCH 261/591] [#1546] morph/event: Remove "is started" checks from event handler registrar This codepath hides possible bugs in code. All initialization function should run before init stage. Signed-off-by: Evgenii Stratonikov --- internal/logs/logs.go | 2 -- pkg/morph/event/listener.go | 17 ----------------- 2 files changed, 19 deletions(-) diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 893be7006..535802a20 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -164,11 +164,9 @@ const ( EventNotaryParserNotSet = "notary parser not set" EventCouldNotParseNotaryEvent = "could not parse notary event" EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered" - EventListenerHasBeenAlreadyStartedIgnoreParser = "listener has been already started, ignore parser" EventRegisteredNewEventParser = "registered new event parser" EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser" EventRegisteredNewEventHandler = "registered new event handler" - EventListenerHasBeenAlreadyStartedIgnoreNotaryParser = "listener has been already started, ignore notary parser" EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser" StorageOperation = "local object storage operation" BlobovniczaCreatingDirectoryForBoltDB = "creating directory for BoltDB" diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index b67546804..64ff205be 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -100,8 +100,6 @@ type listener struct { startOnce, stopOnce sync.Once - started bool - notificationParsers map[scriptHashWithType]NotificationParser notificationHandlers map[scriptHashWithType][]Handler @@ -171,9 +169,6 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) { } func (l *listener) listen(ctx context.Context, intError chan<- error) error { - // mark listener as started - l.started = true - subErrCh := make(chan error) go l.subscribe(subErrCh) @@ -462,12 +457,6 @@ func (l *listener) SetNotificationParser(pi NotificationParserInfo) { l.mtx.Lock() defer l.mtx.Unlock() - // check if the listener was started - if l.started { - log.Warn(context.Background(), logs.EventListenerHasBeenAlreadyStartedIgnoreParser) - return - } - // add event parser if _, ok := l.notificationParsers[pi.scriptHashWithType]; !ok { l.notificationParsers[pi.scriptHashWithType] = pi.parser() @@ -546,12 +535,6 @@ func (l *listener) SetNotaryParser(pi NotaryParserInfo) { l.mtx.Lock() defer l.mtx.Unlock() - // check if the listener was started - if l.started { - log.Warn(context.Background(), logs.EventListenerHasBeenAlreadyStartedIgnoreNotaryParser) - return - } - // add event parser if _, ok := l.notaryParsers[pi.notaryRequestTypes]; !ok { l.notaryParsers[pi.notaryRequestTypes] = pi.parser() From d0ce835fbf75658271613a40741443405b86f7eb Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 6 Dec 2024 16:01:16 +0300 Subject: [PATCH 262/591] [#1546] morph/event: Merge notification parser and handlers They are decoupled, but it is an error to have a handler without a corresponding parser. Register them together on the code level and get rid of unreachable code. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/morph.go | 18 +++----- internal/logs/logs.go | 1 - pkg/innerring/bindings.go | 6 --- .../processors/alphabet/processor.go | 5 --- pkg/innerring/processors/balance/processor.go | 17 +------ .../processors/container/processor.go | 5 --- pkg/innerring/processors/frostfs/processor.go | 45 ++++--------------- .../processors/governance/processor.go | 12 +---- pkg/innerring/processors/netmap/processor.go | 19 +------- pkg/morph/event/handlers.go | 18 +++++--- pkg/morph/event/listener.go | 44 ++---------------- pkg/morph/event/listener_test.go | 18 ++++---- pkg/morph/event/parsers.go | 27 ----------- 13 files changed, 44 insertions(+), 191 deletions(-) diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 81579c7fc..e80dda80e 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -223,27 +223,21 @@ func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parse subs map[event.Type][]event.Handler, ) { for typ, handlers := range subs { - pi := event.NotificationParserInfo{} - pi.SetType(typ) - pi.SetScriptHash(scHash) + hi := event.NotificationHandlerInfo{} + hi.SetType(typ) + hi.SetScriptHash(scHash) p, ok := parsers[typ] if !ok { panic(fmt.Sprintf("missing parser for event %s", typ)) } - pi.SetParser(p) - - lis.SetNotificationParser(pi) + hi.SetParser(p) for _, h := range handlers { - hi := event.NotificationHandlerInfo{} - hi.SetType(typ) - hi.SetScriptHash(scHash) - hi.SetHandler(h) - - lis.RegisterNotificationHandler(hi) + hi.AddHandler(h) } + lis.RegisterNotificationHandler(hi) } } diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 535802a20..b24f3593d 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -165,7 +165,6 @@ const ( EventCouldNotParseNotaryEvent = "could not parse notary event" EventNotaryHandlersForParsedNotificationEventWereNotRegistered = "notary handlers for parsed notification event were not registered" EventRegisteredNewEventParser = "registered new event parser" - EventIgnoreHandlerOfEventWoParser = "ignore handler of event w/o parser" EventRegisteredNewEventHandler = "registered new event handler" EventIgnoreHandlerOfNotaryEventWoParser = "ignore handler of notary event w/o parser" StorageOperation = "local object storage operation" diff --git a/pkg/innerring/bindings.go b/pkg/innerring/bindings.go index c4de07a5f..dfada764a 100644 --- a/pkg/innerring/bindings.go +++ b/pkg/innerring/bindings.go @@ -8,7 +8,6 @@ type ( // ContractProcessor interface defines functions for binding event producers // such as event.Listener and Timers with contract processor. ContractProcessor interface { - ListenerNotificationParsers() []event.NotificationParserInfo ListenerNotificationHandlers() []event.NotificationHandlerInfo ListenerNotaryParsers() []event.NotaryParserInfo ListenerNotaryHandlers() []event.NotaryHandlerInfo @@ -16,11 +15,6 @@ type ( ) func connectListenerWithProcessor(l event.Listener, p ContractProcessor) { - // register notification parsers - for _, parser := range p.ListenerNotificationParsers() { - l.SetNotificationParser(parser) - } - // register notification handlers for _, handler := range p.ListenerNotificationHandlers() { l.RegisterNotificationHandler(handler) diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go index 3992e00f3..2c4654e7c 100644 --- a/pkg/innerring/processors/alphabet/processor.go +++ b/pkg/innerring/processors/alphabet/processor.go @@ -114,11 +114,6 @@ func (ap *Processor) SetParsedWallets(parsedWallets []util.Uint160) { ap.pwLock.Unlock() } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (ap *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - return nil -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (ap *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { return nil diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go index e2f649600..d323238c7 100644 --- a/pkg/innerring/processors/balance/processor.go +++ b/pkg/innerring/processors/balance/processor.go @@ -88,20 +88,6 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (bp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - var parsers []event.NotificationParserInfo - - // new lock event - lock := event.NotificationParserInfo{} - lock.SetType(lockNotification) - lock.SetScriptHash(bp.balanceSC) - lock.SetParser(balanceEvent.ParseLock) - parsers = append(parsers, lock) - - return parsers -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { var handlers []event.NotificationHandlerInfo @@ -110,7 +96,8 @@ func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerI lock := event.NotificationHandlerInfo{} lock.SetType(lockNotification) lock.SetScriptHash(bp.balanceSC) - lock.SetHandler(bp.handleLock) + lock.SetParser(balanceEvent.ParseLock) + lock.AddHandler(bp.handleLock) handlers = append(handlers, lock) return handlers diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go index 58b90457c..a0b7491e1 100644 --- a/pkg/innerring/processors/container/processor.go +++ b/pkg/innerring/processors/container/processor.go @@ -118,11 +118,6 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (cp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - return nil -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (cp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { return nil diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go index 6c29d330d..64b171f7f 100644 --- a/pkg/innerring/processors/frostfs/processor.go +++ b/pkg/innerring/processors/frostfs/processor.go @@ -142,39 +142,6 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - var ( - parsers = make([]event.NotificationParserInfo, 0, 6) - - p event.NotificationParserInfo - ) - - p.SetScriptHash(np.frostfsContract) - - // deposit event - p.SetType(event.TypeFromString(depositNotification)) - p.SetParser(frostfsEvent.ParseDeposit) - parsers = append(parsers, p) - - // withdraw event - p.SetType(event.TypeFromString(withdrawNotification)) - p.SetParser(frostfsEvent.ParseWithdraw) - parsers = append(parsers, p) - - // cheque event - p.SetType(event.TypeFromString(chequeNotification)) - p.SetParser(frostfsEvent.ParseCheque) - parsers = append(parsers, p) - - // config event - p.SetType(event.TypeFromString(configNotification)) - p.SetParser(frostfsEvent.ParseConfig) - parsers = append(parsers, p) - - return parsers -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { var ( @@ -187,22 +154,26 @@ func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerI // deposit handler h.SetType(event.TypeFromString(depositNotification)) - h.SetHandler(np.handleDeposit) + h.SetParser(frostfsEvent.ParseDeposit) + h.AddHandler(np.handleDeposit) handlers = append(handlers, h) // withdraw handler h.SetType(event.TypeFromString(withdrawNotification)) - h.SetHandler(np.handleWithdraw) + h.SetParser(frostfsEvent.ParseWithdraw) + h.AddHandler(np.handleWithdraw) handlers = append(handlers, h) // cheque handler h.SetType(event.TypeFromString(chequeNotification)) - h.SetHandler(np.handleCheque) + h.SetParser(frostfsEvent.ParseCheque) + h.AddHandler(np.handleCheque) handlers = append(handlers, h) // config handler h.SetType(event.TypeFromString(configNotification)) - h.SetHandler(np.handleConfig) + h.SetParser(frostfsEvent.ParseConfig) + h.AddHandler(np.handleConfig) handlers = append(handlers, h) return handlers diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go index 565f4c27d..313a0baea 100644 --- a/pkg/innerring/processors/governance/processor.go +++ b/pkg/innerring/processors/governance/processor.go @@ -155,21 +155,13 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (gp *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - var pi event.NotificationParserInfo - pi.SetScriptHash(gp.designate) - pi.SetType(event.TypeFromString(native.DesignationEventName)) - pi.SetParser(rolemanagement.ParseDesignate) - return []event.NotificationParserInfo{pi} -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (gp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { var hi event.NotificationHandlerInfo hi.SetScriptHash(gp.designate) hi.SetType(event.TypeFromString(native.DesignationEventName)) - hi.SetHandler(gp.HandleAlphabetSync) + hi.SetParser(rolemanagement.ParseDesignate) + hi.AddHandler(gp.HandleAlphabetSync) return []event.NotificationHandlerInfo{hi} } diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go index b3d57e85b..c726df955 100644 --- a/pkg/innerring/processors/netmap/processor.go +++ b/pkg/innerring/processors/netmap/processor.go @@ -161,22 +161,6 @@ func New(p *Params) (*Processor, error) { }, nil } -// ListenerNotificationParsers for the 'event.Listener' event producer. -func (np *Processor) ListenerNotificationParsers() []event.NotificationParserInfo { - parsers := make([]event.NotificationParserInfo, 0, 3) - - var p event.NotificationParserInfo - - p.SetScriptHash(np.netmapClient.ContractAddress()) - - // new epoch event - p.SetType(newEpochNotification) - p.SetParser(netmapEvent.ParseNewEpoch) - parsers = append(parsers, p) - - return parsers -} - // ListenerNotificationHandlers for the 'event.Listener' event producer. func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { handlers := make([]event.NotificationHandlerInfo, 0, 3) @@ -187,7 +171,8 @@ func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerI // new epoch handler i.SetType(newEpochNotification) - i.SetHandler(np.handleNewEpoch) + i.SetParser(netmapEvent.ParseNewEpoch) + i.AddHandler(np.handleNewEpoch) handlers = append(handlers, i) return handlers diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go index 822335329..e96abb846 100644 --- a/pkg/morph/event/handlers.go +++ b/pkg/morph/event/handlers.go @@ -18,17 +18,23 @@ type BlockHandler func(context.Context, *block.Block) type NotificationHandlerInfo struct { scriptHashWithType - h Handler + parser NotificationParser + handlers []Handler } -// SetHandler is an event handler setter. -func (s *NotificationHandlerInfo) SetHandler(v Handler) { - s.h = v +// SetParser is an event handler setter. +func (s *NotificationHandlerInfo) SetParser(p NotificationParser) { + s.parser = p +} + +// AddHandler adds an event handler. +func (s *NotificationHandlerInfo) AddHandler(v Handler) { + s.handlers = append(s.handlers, v) } // Handler returns an event handler. -func (s NotificationHandlerInfo) Handler() Handler { - return s.h +func (s NotificationHandlerInfo) Handlers() []Handler { + return s.handlers } // NotaryHandlerInfo is a structure that groups diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index 64ff205be..8fe95cf49 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -33,13 +33,6 @@ type Listener interface { // it could not be started. ListenWithError(context.Context, chan<- error) - // SetNotificationParser must set the parser of particular contract event. - // - // Parser of each event must be set once. All parsers must be set before Listen call. - // - // Must ignore nil parsers and all calls after listener has been started. - SetNotificationParser(NotificationParserInfo) - // RegisterNotificationHandler must register the event handler for particular notification event of contract. // // The specified handler must be called after each capture and parsing of the event. @@ -444,27 +437,6 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe handler(ctx, event) } -// SetNotificationParser sets the parser of particular contract event. -// -// Ignores nil and already set parsers. -// Ignores the parser if listener is started. -func (l *listener) SetNotificationParser(pi NotificationParserInfo) { - log := l.log.With( - zap.String("contract", pi.ScriptHash().StringLE()), - zap.Stringer("event_type", pi.getType()), - ) - - l.mtx.Lock() - defer l.mtx.Unlock() - - // add event parser - if _, ok := l.notificationParsers[pi.scriptHashWithType]; !ok { - l.notificationParsers[pi.scriptHashWithType] = pi.parser() - } - - log.Debug(context.Background(), logs.EventRegisteredNewEventParser) -} - // RegisterNotificationHandler registers the handler for particular notification event of contract. // // Ignores nil handlers. @@ -476,22 +448,14 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { ) // check if parser was set - l.mtx.RLock() - _, ok := l.notificationParsers[hi.scriptHashWithType] - l.mtx.RUnlock() - - if !ok { - log.Warn(context.Background(), logs.EventIgnoreHandlerOfEventWoParser) - return - } - - // add event handler l.mtx.Lock() + defer l.mtx.Unlock() + + l.notificationParsers[hi.scriptHashWithType] = hi.parser l.notificationHandlers[hi.scriptHashWithType] = append( l.notificationHandlers[hi.scriptHashWithType], - hi.Handler(), + hi.handlers..., ) - l.mtx.Unlock() log.Debug(context.Background(), logs.EventRegisteredNewEventHandler) } diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go index c0f9722d7..02dad2fd3 100644 --- a/pkg/morph/event/listener_test.go +++ b/pkg/morph/event/listener_test.go @@ -48,20 +48,18 @@ func TestEventHandling(t *testing.T) { }, } - l.SetNotificationParser(NotificationParserInfo{ - scriptHashWithType: key, - p: func(cne *state.ContainedNotificationEvent) (Event, error) { - return testNotificationEvent{source: cne}, nil - }, - }) - notificationHandled := make(chan bool) handledNotifications := make([]Event, 0) l.RegisterNotificationHandler(NotificationHandlerInfo{ scriptHashWithType: key, - h: func(_ context.Context, e Event) { - handledNotifications = append(handledNotifications, e) - notificationHandled <- true + parser: func(cne *state.ContainedNotificationEvent) (Event, error) { + return testNotificationEvent{source: cne}, nil + }, + handlers: []Handler{ + func(_ context.Context, e Event) { + handledNotifications = append(handledNotifications, e) + notificationHandled <- true + }, }, }) diff --git a/pkg/morph/event/parsers.go b/pkg/morph/event/parsers.go index 90eff0bd2..5adeb4b30 100644 --- a/pkg/morph/event/parsers.go +++ b/pkg/morph/event/parsers.go @@ -11,15 +11,6 @@ import ( // from the StackItem list. type NotificationParser func(*state.ContainedNotificationEvent) (Event, error) -// NotificationParserInfo is a structure that groups -// the parameters of particular contract -// notification event parser. -type NotificationParserInfo struct { - scriptHashWithType - - p NotificationParser -} - // NotaryPreparator constructs NotaryEvent // from the NotaryRequest event. type NotaryPreparator interface { @@ -47,24 +38,6 @@ func (n *NotaryParserInfo) SetParser(p NotaryParser) { n.p = p } -// SetParser is an event parser setter. -func (s *NotificationParserInfo) SetParser(v NotificationParser) { - s.p = v -} - -func (s NotificationParserInfo) parser() NotificationParser { - return s.p -} - -// SetType is an event type setter. -func (s *NotificationParserInfo) SetType(v Type) { - s.typ = v -} - -func (s NotificationParserInfo) getType() Type { - return s.typ -} - type wrongPrmNumber struct { exp, act int } From b1614a284d42521dfb1d5b7b46dd7e45acad4f7b Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 6 Dec 2024 16:12:47 +0300 Subject: [PATCH 263/591] [#1546] morph/event: Export NotificationHandlerInfo fields Hiding them achieves nothing, as the struct has no methods and is not used concurrently. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/morph.go | 16 ++--- pkg/innerring/processors/balance/processor.go | 19 +++--- pkg/innerring/processors/frostfs/processor.go | 59 ++++++++----------- .../processors/governance/processor.go | 14 +++-- pkg/innerring/processors/netmap/processor.go | 21 +++---- pkg/morph/event/handlers.go | 24 ++------ pkg/morph/event/listener.go | 16 +++-- pkg/morph/event/listener_test.go | 16 ++--- 8 files changed, 75 insertions(+), 110 deletions(-) diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index e80dda80e..0d4e8a1be 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -223,21 +223,17 @@ func registerNotificationHandlers(scHash util.Uint160, lis event.Listener, parse subs map[event.Type][]event.Handler, ) { for typ, handlers := range subs { - hi := event.NotificationHandlerInfo{} - hi.SetType(typ) - hi.SetScriptHash(scHash) - p, ok := parsers[typ] if !ok { panic(fmt.Sprintf("missing parser for event %s", typ)) } - hi.SetParser(p) - - for _, h := range handlers { - hi.AddHandler(h) - } - lis.RegisterNotificationHandler(hi) + lis.RegisterNotificationHandler(event.NotificationHandlerInfo{ + Contract: scHash, + Type: typ, + Parser: p, + Handlers: handlers, + }) } } diff --git a/pkg/innerring/processors/balance/processor.go b/pkg/innerring/processors/balance/processor.go index d323238c7..34203b74f 100644 --- a/pkg/innerring/processors/balance/processor.go +++ b/pkg/innerring/processors/balance/processor.go @@ -90,17 +90,14 @@ func New(p *Params) (*Processor, error) { // ListenerNotificationHandlers for the 'event.Listener' event producer. func (bp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - var handlers []event.NotificationHandlerInfo - - // lock handler - lock := event.NotificationHandlerInfo{} - lock.SetType(lockNotification) - lock.SetScriptHash(bp.balanceSC) - lock.SetParser(balanceEvent.ParseLock) - lock.AddHandler(bp.handleLock) - handlers = append(handlers, lock) - - return handlers + return []event.NotificationHandlerInfo{ + { + Contract: bp.balanceSC, + Type: lockNotification, + Parser: balanceEvent.ParseLock, + Handlers: []event.Handler{bp.handleLock}, + }, + } } // ListenerNotaryParsers for the 'event.Listener' event producer. diff --git a/pkg/innerring/processors/frostfs/processor.go b/pkg/innerring/processors/frostfs/processor.go index 64b171f7f..9d3bf65cd 100644 --- a/pkg/innerring/processors/frostfs/processor.go +++ b/pkg/innerring/processors/frostfs/processor.go @@ -144,39 +144,32 @@ func New(p *Params) (*Processor, error) { // ListenerNotificationHandlers for the 'event.Listener' event producer. func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - var ( - handlers = make([]event.NotificationHandlerInfo, 0, 6) - - h event.NotificationHandlerInfo - ) - - h.SetScriptHash(np.frostfsContract) - - // deposit handler - h.SetType(event.TypeFromString(depositNotification)) - h.SetParser(frostfsEvent.ParseDeposit) - h.AddHandler(np.handleDeposit) - handlers = append(handlers, h) - - // withdraw handler - h.SetType(event.TypeFromString(withdrawNotification)) - h.SetParser(frostfsEvent.ParseWithdraw) - h.AddHandler(np.handleWithdraw) - handlers = append(handlers, h) - - // cheque handler - h.SetType(event.TypeFromString(chequeNotification)) - h.SetParser(frostfsEvent.ParseCheque) - h.AddHandler(np.handleCheque) - handlers = append(handlers, h) - - // config handler - h.SetType(event.TypeFromString(configNotification)) - h.SetParser(frostfsEvent.ParseConfig) - h.AddHandler(np.handleConfig) - handlers = append(handlers, h) - - return handlers + return []event.NotificationHandlerInfo{ + { + Contract: np.frostfsContract, + Type: event.TypeFromString(depositNotification), + Parser: frostfsEvent.ParseDeposit, + Handlers: []event.Handler{np.handleDeposit}, + }, + { + Contract: np.frostfsContract, + Type: event.TypeFromString(withdrawNotification), + Parser: frostfsEvent.ParseWithdraw, + Handlers: []event.Handler{np.handleWithdraw}, + }, + { + Contract: np.frostfsContract, + Type: event.TypeFromString(chequeNotification), + Parser: frostfsEvent.ParseCheque, + Handlers: []event.Handler{np.handleCheque}, + }, + { + Contract: np.frostfsContract, + Type: event.TypeFromString(configNotification), + Parser: frostfsEvent.ParseConfig, + Handlers: []event.Handler{np.handleConfig}, + }, + } } // ListenerNotaryParsers for the 'event.Listener' event producer. diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go index 313a0baea..7859ebee1 100644 --- a/pkg/innerring/processors/governance/processor.go +++ b/pkg/innerring/processors/governance/processor.go @@ -157,12 +157,14 @@ func New(p *Params) (*Processor, error) { // ListenerNotificationHandlers for the 'event.Listener' event producer. func (gp *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - var hi event.NotificationHandlerInfo - hi.SetScriptHash(gp.designate) - hi.SetType(event.TypeFromString(native.DesignationEventName)) - hi.SetParser(rolemanagement.ParseDesignate) - hi.AddHandler(gp.HandleAlphabetSync) - return []event.NotificationHandlerInfo{hi} + return []event.NotificationHandlerInfo{ + { + Contract: gp.designate, + Type: event.TypeFromString(native.DesignationEventName), + Parser: rolemanagement.ParseDesignate, + Handlers: []event.Handler{gp.HandleAlphabetSync}, + }, + } } // ListenerNotaryParsers for the 'event.Listener' event producer. diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go index c726df955..36df57afe 100644 --- a/pkg/innerring/processors/netmap/processor.go +++ b/pkg/innerring/processors/netmap/processor.go @@ -163,19 +163,14 @@ func New(p *Params) (*Processor, error) { // ListenerNotificationHandlers for the 'event.Listener' event producer. func (np *Processor) ListenerNotificationHandlers() []event.NotificationHandlerInfo { - handlers := make([]event.NotificationHandlerInfo, 0, 3) - - var i event.NotificationHandlerInfo - - i.SetScriptHash(np.netmapClient.ContractAddress()) - - // new epoch handler - i.SetType(newEpochNotification) - i.SetParser(netmapEvent.ParseNewEpoch) - i.AddHandler(np.handleNewEpoch) - handlers = append(handlers, i) - - return handlers + return []event.NotificationHandlerInfo{ + { + Contract: np.netmapClient.ContractAddress(), + Type: newEpochNotification, + Parser: netmapEvent.ParseNewEpoch, + Handlers: []event.Handler{np.handleNewEpoch}, + }, + } } // ListenerNotaryParsers for the 'event.Listener' event producer. diff --git a/pkg/morph/event/handlers.go b/pkg/morph/event/handlers.go index e96abb846..55a514ff1 100644 --- a/pkg/morph/event/handlers.go +++ b/pkg/morph/event/handlers.go @@ -4,6 +4,7 @@ import ( "context" "github.com/nspcc-dev/neo-go/pkg/core/block" + "github.com/nspcc-dev/neo-go/pkg/util" ) // Handler is an Event processing function. @@ -16,25 +17,10 @@ type BlockHandler func(context.Context, *block.Block) // the parameters of the handler of particular // contract event. type NotificationHandlerInfo struct { - scriptHashWithType - - parser NotificationParser - handlers []Handler -} - -// SetParser is an event handler setter. -func (s *NotificationHandlerInfo) SetParser(p NotificationParser) { - s.parser = p -} - -// AddHandler adds an event handler. -func (s *NotificationHandlerInfo) AddHandler(v Handler) { - s.handlers = append(s.handlers, v) -} - -// Handler returns an event handler. -func (s NotificationHandlerInfo) Handlers() []Handler { - return s.handlers + Contract util.Uint160 + Type Type + Parser NotificationParser + Handlers []Handler } // NotaryHandlerInfo is a structure that groups diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index 8fe95cf49..9852a3d62 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -443,18 +443,22 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe // Ignores handlers of event without parser. func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { log := l.log.With( - zap.String("contract", hi.ScriptHash().StringLE()), - zap.Stringer("event_type", hi.GetType()), + zap.String("contract", hi.Contract.StringLE()), + zap.Stringer("event_type", hi.Type), ) // check if parser was set l.mtx.Lock() defer l.mtx.Unlock() - l.notificationParsers[hi.scriptHashWithType] = hi.parser - l.notificationHandlers[hi.scriptHashWithType] = append( - l.notificationHandlers[hi.scriptHashWithType], - hi.handlers..., + var k scriptHashWithType + k.hash = hi.Contract + k.typ = hi.Type + + l.notificationParsers[k] = hi.Parser + l.notificationHandlers[k] = append( + l.notificationHandlers[k], + hi.Handlers..., ) log.Debug(context.Background(), logs.EventRegisteredNewEventHandler) diff --git a/pkg/morph/event/listener_test.go b/pkg/morph/event/listener_test.go index 02dad2fd3..87f37305f 100644 --- a/pkg/morph/event/listener_test.go +++ b/pkg/morph/event/listener_test.go @@ -39,23 +39,15 @@ func TestEventHandling(t *testing.T) { blockHandled <- true }) - key := scriptHashWithType{ - scriptHashValue: scriptHashValue{ - hash: util.Uint160{100}, - }, - typeValue: typeValue{ - typ: TypeFromString("notification type"), - }, - } - notificationHandled := make(chan bool) handledNotifications := make([]Event, 0) l.RegisterNotificationHandler(NotificationHandlerInfo{ - scriptHashWithType: key, - parser: func(cne *state.ContainedNotificationEvent) (Event, error) { + Contract: util.Uint160{100}, + Type: TypeFromString("notification type"), + Parser: func(cne *state.ContainedNotificationEvent) (Event, error) { return testNotificationEvent{source: cne}, nil }, - handlers: []Handler{ + Handlers: []Handler{ func(_ context.Context, e Event) { handledNotifications = append(handledNotifications, e) notificationHandled <- true From a641c91594dcf48c567ae312a7b7db8ccabf121c Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Tue, 10 Dec 2024 18:04:22 +0300 Subject: [PATCH 264/591] [#1550] Add CODEOWNERS Signed-off-by: Vitaliy Potyarkin --- CODEOWNERS | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 CODEOWNERS diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..d19c96a5c --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,3 @@ +.* @TrueCloudLab/storage-core-committers @TrueCloudLab/storage-core-developers +.forgejo/.* @potyarkin +Makefile @potyarkin From e9837bbcf9b02ed9218f7ebdb088f0099fd04b86 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 11 Dec 2024 14:39:53 +0300 Subject: [PATCH 265/591] [#1554] morph/event: Remove unused AlphabetUpdate event Refs TrueCloudLab/frostfs-contract#138. Signed-off-by: Evgenii Stratonikov --- pkg/morph/event/frostfs/ir_update.go | 54 --------------------- pkg/morph/event/frostfs/ir_update_test.go | 57 ----------------------- 2 files changed, 111 deletions(-) delete mode 100644 pkg/morph/event/frostfs/ir_update.go delete mode 100644 pkg/morph/event/frostfs/ir_update_test.go diff --git a/pkg/morph/event/frostfs/ir_update.go b/pkg/morph/event/frostfs/ir_update.go deleted file mode 100644 index 62203540f..000000000 --- a/pkg/morph/event/frostfs/ir_update.go +++ /dev/null @@ -1,54 +0,0 @@ -package frostfs - -import ( - "crypto/elliptic" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" -) - -type UpdateInnerRing struct { - keys []*keys.PublicKey -} - -// MorphEvent implements Neo:Morph Event interface. -func (UpdateInnerRing) MorphEvent() {} - -func (u UpdateInnerRing) Keys() []*keys.PublicKey { return u.keys } - -func ParseUpdateInnerRing(params []stackitem.Item) (event.Event, error) { - var ( - ev UpdateInnerRing - err error - ) - - if ln := len(params); ln != 1 { - return nil, event.WrongNumberOfParameters(1, ln) - } - - // parse keys - irKeys, err := client.ArrayFromStackItem(params[0]) - if err != nil { - return nil, fmt.Errorf("could not get updated inner ring keys: %w", err) - } - - ev.keys = make([]*keys.PublicKey, 0, len(irKeys)) - for i := range irKeys { - rawKey, err := client.BytesFromStackItem(irKeys[i]) - if err != nil { - return nil, fmt.Errorf("could not get updated inner ring public key: %w", err) - } - - key, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("could not parse updated inner ring public key: %w", err) - } - - ev.keys = append(ev.keys, key) - } - - return ev, nil -} diff --git a/pkg/morph/event/frostfs/ir_update_test.go b/pkg/morph/event/frostfs/ir_update_test.go deleted file mode 100644 index fae87e5f9..000000000 --- a/pkg/morph/event/frostfs/ir_update_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package frostfs - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/stretchr/testify/require" -) - -func genKey(t *testing.T) *keys.PrivateKey { - priv, err := keys.NewPrivateKey() - require.NoError(t, err) - return priv -} - -func TestParseUpdateInnerRing(t *testing.T) { - publicKeys := []*keys.PublicKey{ - genKey(t).PublicKey(), - genKey(t).PublicKey(), - genKey(t).PublicKey(), - } - - t.Run("wrong number of parameters", func(t *testing.T) { - prms := []stackitem.Item{ - stackitem.NewMap(), - stackitem.NewMap(), - } - - _, err := ParseUpdateInnerRing(prms) - require.EqualError(t, err, event.WrongNumberOfParameters(1, len(prms)).Error()) - }) - - t.Run("wrong first parameter", func(t *testing.T) { - _, err := ParseUpdateInnerRing([]stackitem.Item{ - stackitem.NewMap(), - }) - - require.Error(t, err) - }) - - t.Run("correct", func(t *testing.T) { - ev, err := ParseUpdateInnerRing([]stackitem.Item{ - stackitem.NewArray([]stackitem.Item{ - stackitem.NewByteArray(publicKeys[0].Bytes()), - stackitem.NewByteArray(publicKeys[1].Bytes()), - stackitem.NewByteArray(publicKeys[2].Bytes()), - }), - }) - require.NoError(t, err) - - require.Equal(t, UpdateInnerRing{ - keys: publicKeys, - }, ev) - }) -} From 72470d6b4884780978768add9c3496768cb6ee9c Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Wed, 11 Dec 2024 14:43:36 +0300 Subject: [PATCH 266/591] [#1555] local_object_storage: Rename method `GetLocked` -> `GetLocks` Renamed to better reflect the method's purpose of returning locks for the specified object. Signed-off-by: Aleksey Savchuk --- pkg/local_object_storage/engine/inhume.go | 8 ++++---- pkg/local_object_storage/engine/put.go | 2 +- pkg/local_object_storage/metabase/lock.go | 12 ++++++------ pkg/local_object_storage/shard/lock.go | 8 ++++---- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index 80c77af54..0eb9d05c9 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -254,9 +254,9 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e return locked, outErr } -// GetLocked return lock id's if object is locked according to StorageEngine's state. -func (e *StorageEngine) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocked", +// GetLocks return lock id's if object is locked according to StorageEngine's state. +func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocks", trace.WithAttributes( attribute.String("address", addr.EncodeToString()), )) @@ -266,7 +266,7 @@ func (e *StorageEngine) GetLocked(ctx context.Context, addr oid.Address) ([]oid. var outErr error e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { - ld, err := h.Shard.GetLocked(ctx, addr) + ld, err := h.Shard.GetLocks(ctx, addr) if err != nil { e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index ba4a144d1..62671f433 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -85,7 +85,7 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { } if !existed && locked { - lockers, err := e.GetLocked(ctx, ecParent) + lockers, err := e.GetLocks(ctx, ecParent) if err != nil { return err } diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go index 6b78ef392..b930a0141 100644 --- a/pkg/local_object_storage/metabase/lock.go +++ b/pkg/local_object_storage/metabase/lock.go @@ -176,7 +176,7 @@ func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { } // return `LOCK` id's if specified object is locked in the specified container. -func getLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) { +func getLocks(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) ([]oid.ID, error) { var lockers []oid.ID bucketLocked := tx.Bucket(bucketNameLocked) if bucketLocked != nil { @@ -351,20 +351,20 @@ func (db *DB) IsLocked(ctx context.Context, prm IsLockedPrm) (res IsLockedRes, e return res, err } -// GetLocked return `LOCK` id's if provided object is locked by any `LOCK`. Not found +// GetLocks return `LOCK` id's if provided object is locked by any `LOCK`. Not found // object is considered as non-locked. // // Returns only non-logical errors related to underlying database. -func (db *DB) GetLocked(ctx context.Context, addr oid.Address) (res []oid.ID, err error) { +func (db *DB) GetLocks(ctx context.Context, addr oid.Address) (res []oid.ID, err error) { var ( startedAt = time.Now() success = false ) defer func() { - db.metrics.AddMethodDuration("GetLocked", time.Since(startedAt), success) + db.metrics.AddMethodDuration("GetLocks", time.Since(startedAt), success) }() - _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocked", + _, span := tracing.StartSpanFromContext(ctx, "metabase.GetLocks", trace.WithAttributes( attribute.String("address", addr.EncodeToString()), )) @@ -377,7 +377,7 @@ func (db *DB) GetLocked(ctx context.Context, addr oid.Address) (res []oid.ID, er return res, ErrDegradedMode } err = metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error { - res, err = getLocked(tx, addr.Container(), addr.Object()) + res, err = getLocks(tx, addr.Container(), addr.Object()) return nil })) success = err == nil diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go index 4a8d89d63..31ca16aa1 100644 --- a/pkg/local_object_storage/shard/lock.go +++ b/pkg/local_object_storage/shard/lock.go @@ -72,10 +72,10 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) { return res.Locked(), nil } -// GetLocked return lock id's of the provided object. Not found object is +// GetLocks return lock id's of the provided object. Not found object is // considered as not locked. Requires healthy metabase, returns ErrDegradedMode otherwise. -func (s *Shard) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocked", +func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) { + ctx, span := tracing.StartSpanFromContext(ctx, "Shard.GetLocks", trace.WithAttributes( attribute.String("shard_id", s.ID().String()), attribute.String("address", addr.EncodeToString()), @@ -86,5 +86,5 @@ func (s *Shard) GetLocked(ctx context.Context, addr oid.Address) ([]oid.ID, erro if m.NoMetabase() { return nil, ErrDegradedMode } - return s.metaBase.GetLocked(ctx, addr) + return s.metaBase.GetLocks(ctx, addr) } From 3821645085986c116b2f19c721775e6921ad76a0 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Wed, 11 Dec 2024 14:47:18 +0300 Subject: [PATCH 267/591] [#1555] engine: Refactor `(*StorageEngine).GetLocks` Refactored after renaming the method to replace the confusing `locked` variable with `locks`. Signed-off-by: Aleksey Savchuk --- pkg/local_object_storage/engine/inhume.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index 0eb9d05c9..bae784064 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -262,23 +262,23 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I )) defer span.End() - var locked []oid.ID + var allLocks []oid.ID var outErr error e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { - ld, err := h.Shard.GetLocks(ctx, addr) + locks, err := h.Shard.GetLocks(ctx, addr) if err != nil { e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) outErr = err } - locked = append(locked, ld...) + allLocks = append(allLocks, locks...) return false }) - if len(locked) > 0 { - return locked, nil + if len(allLocks) > 0 { + return allLocks, nil } - return locked, outErr + return allLocks, outErr } func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) { From 7853dbc315a7a9d7d450d92e4342b59c155a514c Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 11 Dec 2024 15:20:59 +0300 Subject: [PATCH 268/591] [#1557] morph/event: Remove embedded structs from scriptHashWithValue Also, make them public, because otherwise `unused` linter complains. ``` pkg/morph/event/utils.go:25:2 unused field `typ` is unused ``` This complain is wrong, though: we _use_ `typ` field because the whole struct is used as a map key. Signed-off-by: Evgenii Stratonikov --- pkg/morph/event/listener.go | 12 ++++-------- pkg/morph/event/utils.go | 18 ++---------------- 2 files changed, 6 insertions(+), 24 deletions(-) diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index 9852a3d62..7a16eb991 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -180,7 +180,7 @@ func (l *listener) subscribe(errCh chan error) { // fill the list with the contracts with set event parsers. l.mtx.RLock() for hashType := range l.notificationParsers { - scHash := hashType.ScriptHash() + scHash := hashType.Hash // prevent repetitions for _, hash := range hashes { @@ -189,7 +189,7 @@ func (l *listener) subscribe(errCh chan error) { } } - hashes = append(hashes, hashType.ScriptHash()) + hashes = append(hashes, hashType.Hash) } l.mtx.RUnlock() @@ -326,9 +326,7 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent * ) // get the event parser - keyEvent := scriptHashWithType{} - keyEvent.SetScriptHash(notifyEvent.ScriptHash) - keyEvent.SetType(typEvent) + keyEvent := scriptHashWithType{Hash: notifyEvent.ScriptHash, Type: typEvent} l.mtx.RLock() parser, ok := l.notificationParsers[keyEvent] @@ -451,9 +449,7 @@ func (l *listener) RegisterNotificationHandler(hi NotificationHandlerInfo) { l.mtx.Lock() defer l.mtx.Unlock() - var k scriptHashWithType - k.hash = hi.Contract - k.typ = hi.Type + k := scriptHashWithType{Hash: hi.Contract, Type: hi.Type} l.notificationParsers[k] = hi.Parser l.notificationHandlers[k] = append( diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go index 99ea9a7f0..058959c63 100644 --- a/pkg/morph/event/utils.go +++ b/pkg/morph/event/utils.go @@ -20,13 +20,9 @@ type scriptHashValue struct { hash util.Uint160 } -type typeValue struct { - typ Type -} - type scriptHashWithType struct { - scriptHashValue - typeValue + Hash util.Uint160 + Type Type } type notaryRequestTypes struct { @@ -73,16 +69,6 @@ func (s scriptHashValue) ScriptHash() util.Uint160 { return s.hash } -// SetType is an event type setter. -func (s *typeValue) SetType(v Type) { - s.typ = v -} - -// GetType is an event type getter. -func (s typeValue) GetType() Type { - return s.typ -} - // WorkerPoolHandler sets closure over worker pool w with passed handler h. func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handler { return func(ctx context.Context, e Event) { From 91d9dc2676fa7739ae5ba6763995ae92fab81d29 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 12 Dec 2024 14:59:49 +0300 Subject: [PATCH 269/591] [#1558] morph/event: Remove "could not" from error messages Signed-off-by: Evgenii Stratonikov --- pkg/morph/event/listener.go | 10 +++++----- pkg/morph/event/netmap/update_peer_notary.go | 4 ++-- pkg/morph/event/notary_preparator.go | 18 +++++++++--------- pkg/morph/event/rolemanagement/designate.go | 2 +- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index 7a16eb991..ed77352ec 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -111,7 +111,7 @@ type listener struct { pool *ants.Pool } -const newListenerFailMsg = "could not instantiate Listener" +const newListenerFailMsg = "instantiate Listener" var ( errNilLogger = errors.New("nil logger") @@ -195,20 +195,20 @@ func (l *listener) subscribe(errCh chan error) { err := l.subscriber.SubscribeForNotification(hashes...) if err != nil { - errCh <- fmt.Errorf("could not subscribe for notifications: %w", err) + errCh <- fmt.Errorf("subscribe for notifications: %w", err) return } if len(l.blockHandlers) > 0 { if err = l.subscriber.BlockNotifications(); err != nil { - errCh <- fmt.Errorf("could not subscribe for blocks: %w", err) + errCh <- fmt.Errorf("subscribe for blocks: %w", err) return } } if l.listenNotary { if err = l.subscriber.SubscribeForNotaryRequests(l.notaryMainTXSigner); err != nil { - errCh <- fmt.Errorf("could not subscribe for notary requests: %w", err) + errCh <- fmt.Errorf("subscribe for notary requests: %w", err) return } } @@ -566,7 +566,7 @@ func NewListener(p ListenerParams) (Listener, error) { // The default capacity is 0, which means "infinite". pool, err := ants.NewPool(p.WorkerPoolCapacity) if err != nil { - return nil, fmt.Errorf("could not init worker pool: %w", err) + return nil, fmt.Errorf("init worker pool: %w", err) } return &listener{ diff --git a/pkg/morph/event/netmap/update_peer_notary.go b/pkg/morph/event/netmap/update_peer_notary.go index 0260810b8..993182ab4 100644 --- a/pkg/morph/event/netmap/update_peer_notary.go +++ b/pkg/morph/event/netmap/update_peer_notary.go @@ -10,7 +10,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/vm/opcode" ) -var errNilPubKey = errors.New("could not parse public key: public key is nil") +var errNilPubKey = errors.New("public key is nil") func (s *UpdatePeer) setPublicKey(v []byte) (err error) { if v == nil { @@ -19,7 +19,7 @@ func (s *UpdatePeer) setPublicKey(v []byte) (err error) { s.PubKey, err = keys.NewPublicKeyFromBytes(v, elliptic.P256()) if err != nil { - return fmt.Errorf("could not parse public key: %w", err) + return fmt.Errorf("parse public key: %w", err) } return diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go index 37091f768..40f5984a9 100644 --- a/pkg/morph/event/notary_preparator.go +++ b/pkg/morph/event/notary_preparator.go @@ -127,7 +127,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) { for { opCode, param, err = ctx.Next() if err != nil { - return nil, fmt.Errorf("could not get next opcode in script: %w", err) + return nil, fmt.Errorf("get next opcode in script: %w", err) } if opCode == opcode.RET { @@ -147,7 +147,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) { // retrieve contract's script hash contractHash, err := util.Uint160DecodeBytesBE(ops[opsLen-2].param) if err != nil { - return nil, fmt.Errorf("could not decode contract hash: %w", err) + return nil, fmt.Errorf("decode contract hash: %w", err) } // retrieve contract's method @@ -164,7 +164,7 @@ func (p Preparator) Prepare(nr *payload.P2PNotaryRequest) (NotaryEvent, error) { if len(args) != 0 { err = p.validateParameterOpcodes(args) if err != nil { - return nil, fmt.Errorf("could not validate arguments: %w", err) + return nil, fmt.Errorf("validate arguments: %w", err) } // without args packing opcodes @@ -206,7 +206,7 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error { currentAlphabet, err := p.alphaKeys() if err != nil { - return fmt.Errorf("could not fetch Alphabet public keys: %w", err) + return fmt.Errorf("fetch Alphabet public keys: %w", err) } err = p.validateCosigners(ln, nr.MainTransaction.Signers, currentAlphabet) @@ -239,7 +239,7 @@ func (p Preparator) validateParameterOpcodes(ops []Op) error { argsLen, err := IntFromOpcode(ops[l-2]) if err != nil { - return fmt.Errorf("could not parse argument len: %w", err) + return fmt.Errorf("parse argument len: %w", err) } err = validateNestedArgs(argsLen, ops[:l-2]) @@ -273,7 +273,7 @@ func validateNestedArgs(expArgLen int64, ops []Op) error { argsLen, err := IntFromOpcode(ops[i-1]) if err != nil { - return fmt.Errorf("could not parse argument len: %w", err) + return fmt.Errorf("parse argument len: %w", err) } expArgLen += argsLen + 1 @@ -307,7 +307,7 @@ func (p Preparator) validateExpiration(fbTX *transaction.Transaction) error { currBlock, err := p.blockCounter.BlockCount() if err != nil { - return fmt.Errorf("could not fetch current chain height: %w", err) + return fmt.Errorf("fetch current chain height: %w", err) } if currBlock >= nvb.Height { @@ -327,7 +327,7 @@ func (p Preparator) validateCosigners(expected int, s []transaction.Signer, alph alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys) if err != nil { - return fmt.Errorf("could not get Alphabet verification script: %w", err) + return fmt.Errorf("get Alphabet verification script: %w", err) } if !s[1].Account.Equals(hash.Hash160(alphaVerificationScript)) { @@ -346,7 +346,7 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu alphaVerificationScript, err := smartcontract.CreateMultiSigRedeemScript(len(alphaKeys)*2/3+1, alphaKeys) if err != nil { - return fmt.Errorf("could not get Alphabet verification script: %w", err) + return fmt.Errorf("get Alphabet verification script: %w", err) } // the second one must be witness of the current diff --git a/pkg/morph/event/rolemanagement/designate.go b/pkg/morph/event/rolemanagement/designate.go index 28c968046..b384e436b 100644 --- a/pkg/morph/event/rolemanagement/designate.go +++ b/pkg/morph/event/rolemanagement/designate.go @@ -26,7 +26,7 @@ func (Designate) MorphEvent() {} func ParseDesignate(e *state.ContainedNotificationEvent) (event.Event, error) { params, err := event.ParseStackArray(e) if err != nil { - return nil, fmt.Errorf("could not parse stack items from notify event: %w", err) + return nil, fmt.Errorf("parse stack items from notify event: %w", err) } if len(params) != 2 { From 7151c71d51615648233acda1479feb7f4ab86ce6 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 12 Dec 2024 15:06:20 +0300 Subject: [PATCH 270/591] [#1558] morph/client: Remove "could not"/"can't"/"failed to" from error messages Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/balance/balanceOf.go | 2 +- pkg/morph/client/balance/decimals.go | 2 +- pkg/morph/client/balance/transfer.go | 2 +- pkg/morph/client/client.go | 6 ++-- pkg/morph/client/container/containers_of.go | 2 +- pkg/morph/client/container/delete.go | 2 +- pkg/morph/client/container/deletion_info.go | 8 ++--- pkg/morph/client/container/get.go | 14 ++++---- pkg/morph/client/container/list.go | 4 +-- pkg/morph/client/container/put.go | 2 +- pkg/morph/client/frostfsid/subject.go | 6 ++-- pkg/morph/client/netmap/epoch.go | 4 +-- pkg/morph/client/netmap/innerring.go | 6 ++-- pkg/morph/client/netmap/new_epoch.go | 4 +-- pkg/morph/client/netmap/peer.go | 2 +- pkg/morph/client/nns.go | 2 +- pkg/morph/client/notary.go | 38 ++++++++++----------- pkg/morph/client/static.go | 2 +- pkg/morph/client/util.go | 2 +- 19 files changed, 55 insertions(+), 55 deletions(-) diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go index 12a499ffb..a5fb8e82a 100644 --- a/pkg/morph/client/balance/balanceOf.go +++ b/pkg/morph/client/balance/balanceOf.go @@ -29,7 +29,7 @@ func (c *Client) BalanceOf(id user.ID) (*big.Int, error) { amount, err := client.BigIntFromStackItem(prms[0]) if err != nil { - return nil, fmt.Errorf("could not get integer stack item from stack item (%s): %w", balanceOfMethod, err) + return nil, fmt.Errorf("get integer stack item from stack item (%s): %w", balanceOfMethod, err) } return amount, nil } diff --git a/pkg/morph/client/balance/decimals.go b/pkg/morph/client/balance/decimals.go index 28329ee6e..c2a66dded 100644 --- a/pkg/morph/client/balance/decimals.go +++ b/pkg/morph/client/balance/decimals.go @@ -21,7 +21,7 @@ func (c *Client) Decimals() (uint32, error) { decimals, err := client.IntFromStackItem(prms[0]) if err != nil { - return 0, fmt.Errorf("could not get integer stack item from stack item (%s): %w", decimalsMethod, err) + return 0, fmt.Errorf("get integer stack item from stack item (%s): %w", decimalsMethod, err) } return uint32(decimals), nil } diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go index 16c8f3982..52d69dccb 100644 --- a/pkg/morph/client/balance/transfer.go +++ b/pkg/morph/client/balance/transfer.go @@ -39,7 +39,7 @@ func (c *Client) TransferX(ctx context.Context, p TransferPrm) error { _, err = c.client.Invoke(ctx, prm) if err != nil { - return fmt.Errorf("could not invoke method (%s): %w", transferXMethod, err) + return fmt.Errorf("invoke method (%s): %w", transferXMethod, err) } return nil } diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index a0c29141b..5a5d24900 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -196,7 +196,7 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F txHash, vub, err := c.rpcActor.SendTunedCall(contract, method, nil, addFeeCheckerModifier(int64(fee)), args...) if err != nil { - return InvokeRes{}, fmt.Errorf("could not invoke %s: %w", method, err) + return InvokeRes{}, fmt.Errorf("invoke %s: %w", method, err) } c.logger.Debug(ctx, logs.ClientNeoClientInvoke, @@ -509,7 +509,7 @@ func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) { list, err := c.roleList(noderoles.NeoFSAlphabet) if err != nil { - return nil, fmt.Errorf("can't get alphabet nodes role list: %w", err) + return nil, fmt.Errorf("get alphabet nodes role list: %w", err) } return list, nil @@ -523,7 +523,7 @@ func (c *Client) GetDesignateHash() util.Uint160 { func (c *Client) roleList(r noderoles.Role) (keys.PublicKeys, error) { height, err := c.rpcActor.GetBlockCount() if err != nil { - return nil, fmt.Errorf("can't get chain height: %w", err) + return nil, fmt.Errorf("get chain height: %w", err) } return c.rolemgmt.GetDesignatedByRole(r, height) diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go index c4db0fe6e..5fe15be0d 100644 --- a/pkg/morph/client/container/containers_of.go +++ b/pkg/morph/client/container/containers_of.go @@ -26,7 +26,7 @@ func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) { cb := func(item stackitem.Item) error { rawID, err := client.BytesFromStackItem(item) if err != nil { - return fmt.Errorf("could not get byte array from stack item (%s): %w", containersOfMethod, err) + return fmt.Errorf("get byte array from stack item (%s): %w", containersOfMethod, err) } var id cid.ID diff --git a/pkg/morph/client/container/delete.go b/pkg/morph/client/container/delete.go index ef5cc8c38..09912efa5 100644 --- a/pkg/morph/client/container/delete.go +++ b/pkg/morph/client/container/delete.go @@ -78,7 +78,7 @@ func (c *Client) Delete(ctx context.Context, p DeletePrm) (uint32, error) { res, err := c.client.Invoke(ctx, prm) if err != nil { - return 0, fmt.Errorf("could not invoke method (%s): %w", deleteMethod, err) + return 0, fmt.Errorf("invoke method (%s): %w", deleteMethod, err) } return res.VUB, nil } diff --git a/pkg/morph/client/container/deletion_info.go b/pkg/morph/client/container/deletion_info.go index 436ca3c01..b86e0ce9c 100644 --- a/pkg/morph/client/container/deletion_info.go +++ b/pkg/morph/client/container/deletion_info.go @@ -46,7 +46,7 @@ func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) { arr, err := client.ArrayFromStackItem(res[0]) if err != nil { - return nil, fmt.Errorf("could not get item array of container (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("get item array of container (%s): %w", deletionInfoMethod, err) } if len(arr) != 2 { @@ -55,17 +55,17 @@ func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) { rawOwner, err := client.BytesFromStackItem(arr[0]) if err != nil { - return nil, fmt.Errorf("could not get byte array of container (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("get byte array of container (%s): %w", deletionInfoMethod, err) } var owner user.ID if err := owner.DecodeString(base58.Encode(rawOwner)); err != nil { - return nil, fmt.Errorf("could not decode container owner id (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("decode container owner id (%s): %w", deletionInfoMethod, err) } epoch, err := client.BigIntFromStackItem(arr[1]) if err != nil { - return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", deletionInfoMethod, err) + return nil, fmt.Errorf("get byte array of container signature (%s): %w", deletionInfoMethod, err) } return &containercore.DelInfo{ diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go index 1d84e9109..2ab58bf01 100644 --- a/pkg/morph/client/container/get.go +++ b/pkg/morph/client/container/get.go @@ -60,7 +60,7 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) { arr, err := client.ArrayFromStackItem(res[0]) if err != nil { - return nil, fmt.Errorf("could not get item array of container (%s): %w", getMethod, err) + return nil, fmt.Errorf("get item array of container (%s): %w", getMethod, err) } if len(arr) != 4 { @@ -69,29 +69,29 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) { cnrBytes, err := client.BytesFromStackItem(arr[0]) if err != nil { - return nil, fmt.Errorf("could not get byte array of container (%s): %w", getMethod, err) + return nil, fmt.Errorf("get byte array of container (%s): %w", getMethod, err) } sigBytes, err := client.BytesFromStackItem(arr[1]) if err != nil { - return nil, fmt.Errorf("could not get byte array of container signature (%s): %w", getMethod, err) + return nil, fmt.Errorf("get byte array of container signature (%s): %w", getMethod, err) } pub, err := client.BytesFromStackItem(arr[2]) if err != nil { - return nil, fmt.Errorf("could not get byte array of public key (%s): %w", getMethod, err) + return nil, fmt.Errorf("get byte array of public key (%s): %w", getMethod, err) } tokBytes, err := client.BytesFromStackItem(arr[3]) if err != nil { - return nil, fmt.Errorf("could not get byte array of session token (%s): %w", getMethod, err) + return nil, fmt.Errorf("get byte array of session token (%s): %w", getMethod, err) } var cnr containercore.Container if err := cnr.Value.Unmarshal(cnrBytes); err != nil { // use other major version if there any - return nil, fmt.Errorf("can't unmarshal container: %w", err) + return nil, fmt.Errorf("unmarshal container: %w", err) } if len(tokBytes) > 0 { @@ -99,7 +99,7 @@ func (c *Client) Get(cid []byte) (*containercore.Container, error) { err = cnr.Session.Unmarshal(tokBytes) if err != nil { - return nil, fmt.Errorf("could not unmarshal session token: %w", err) + return nil, fmt.Errorf("unmarshal session token: %w", err) } } diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go index 55317375a..d9719aedd 100644 --- a/pkg/morph/client/container/list.go +++ b/pkg/morph/client/container/list.go @@ -34,14 +34,14 @@ func (c *Client) list(idUser *user.ID) ([]cid.ID, error) { res, err = client.ArrayFromStackItem(res[0]) if err != nil { - return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", listMethod, err) + return nil, fmt.Errorf("get stack item array from stack item (%s): %w", listMethod, err) } cidList := make([]cid.ID, 0, len(res)) for i := range res { rawID, err := client.BytesFromStackItem(res[i]) if err != nil { - return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", listMethod, err) + return nil, fmt.Errorf("get byte array from stack item (%s): %w", listMethod, err) } var id cid.ID diff --git a/pkg/morph/client/container/put.go b/pkg/morph/client/container/put.go index 1eb03694f..3bb84eb87 100644 --- a/pkg/morph/client/container/put.go +++ b/pkg/morph/client/container/put.go @@ -117,7 +117,7 @@ func (c *Client) Put(ctx context.Context, p PutPrm) error { _, err := c.client.Invoke(ctx, prm) if err != nil { - return fmt.Errorf("could not invoke method (%s): %w", method, err) + return fmt.Errorf("invoke method (%s): %w", method, err) } return nil } diff --git a/pkg/morph/client/frostfsid/subject.go b/pkg/morph/client/frostfsid/subject.go index 7c93f4922..305f3ce09 100644 --- a/pkg/morph/client/frostfsid/subject.go +++ b/pkg/morph/client/frostfsid/subject.go @@ -31,7 +31,7 @@ func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) subj, err := frostfsidclient.ParseSubject(structArr) if err != nil { - return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err) + return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err) } return subj, nil @@ -54,7 +54,7 @@ func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.Subject subj, err := frostfsidclient.ParseSubjectExtended(structArr) if err != nil { - return nil, fmt.Errorf("could not parse test invocation result (%s): %w", methodGetSubject, err) + return nil, fmt.Errorf("parse test invocation result (%s): %w", methodGetSubject, err) } return subj, nil @@ -67,7 +67,7 @@ func checkStackItem(res []stackitem.Item) (structArr []stackitem.Item, err error structArr, err = client.ArrayFromStackItem(res[0]) if err != nil { - return nil, fmt.Errorf("could not get item array of container (%s): %w", methodGetSubject, err) + return nil, fmt.Errorf("get item array of container (%s): %w", methodGetSubject, err) } return } diff --git a/pkg/morph/client/netmap/epoch.go b/pkg/morph/client/netmap/epoch.go index 6d909a7a1..6d7394998 100644 --- a/pkg/morph/client/netmap/epoch.go +++ b/pkg/morph/client/netmap/epoch.go @@ -25,7 +25,7 @@ func (c *Client) Epoch() (uint64, error) { num, err := client.IntFromStackItem(items[0]) if err != nil { - return 0, fmt.Errorf("could not get number from stack item (%s): %w", epochMethod, err) + return 0, fmt.Errorf("get number from stack item (%s): %w", epochMethod, err) } return uint64(num), nil } @@ -49,7 +49,7 @@ func (c *Client) LastEpochBlock() (uint32, error) { block, err := client.IntFromStackItem(items[0]) if err != nil { - return 0, fmt.Errorf("could not get number from stack item (%s): %w", + return 0, fmt.Errorf("get number from stack item (%s): %w", lastEpochBlockMethod, err) } return uint32(block), nil diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go index 824827d6f..0cfad4c82 100644 --- a/pkg/morph/client/netmap/innerring.go +++ b/pkg/morph/client/netmap/innerring.go @@ -59,7 +59,7 @@ func irKeysFromStackItem(stack []stackitem.Item, method string) (keys.PublicKeys irs, err := client.ArrayFromStackItem(stack[0]) if err != nil { - return nil, fmt.Errorf("could not get stack item array from stack item (%s): %w", method, err) + return nil, fmt.Errorf("get stack item array from stack item (%s): %w", method, err) } irKeys := make(keys.PublicKeys, len(irs)) @@ -79,7 +79,7 @@ const irNodeFixedPrmNumber = 1 func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) { prms, err := client.ArrayFromStackItem(prm) if err != nil { - return nil, fmt.Errorf("could not get stack item array (IRNode): %w", err) + return nil, fmt.Errorf("get stack item array (IRNode): %w", err) } else if ln := len(prms); ln != irNodeFixedPrmNumber { return nil, fmt.Errorf( "unexpected stack item count (IRNode): expected %d, has %d", @@ -90,7 +90,7 @@ func irKeyFromStackItem(prm stackitem.Item) (*keys.PublicKey, error) { byteKey, err := client.BytesFromStackItem(prms[0]) if err != nil { - return nil, fmt.Errorf("could not parse bytes from stack item (IRNode): %w", err) + return nil, fmt.Errorf("parse bytes from stack item (IRNode): %w", err) } return keys.NewPublicKeyFromBytes(byteKey, elliptic.P256()) diff --git a/pkg/morph/client/netmap/new_epoch.go b/pkg/morph/client/netmap/new_epoch.go index efcdfd7b6..341b20935 100644 --- a/pkg/morph/client/netmap/new_epoch.go +++ b/pkg/morph/client/netmap/new_epoch.go @@ -16,7 +16,7 @@ func (c *Client) NewEpoch(ctx context.Context, epoch uint64) error { _, err := c.client.Invoke(ctx, prm) if err != nil { - return fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err) + return fmt.Errorf("invoke method (%s): %w", newEpochMethod, err) } return nil } @@ -34,7 +34,7 @@ func (c *Client) NewEpochControl(ctx context.Context, epoch uint64, vub uint32) res, err := c.client.Invoke(ctx, prm) if err != nil { - return 0, fmt.Errorf("could not invoke method (%s): %w", newEpochMethod, err) + return 0, fmt.Errorf("invoke method (%s): %w", newEpochMethod, err) } return res.VUB, nil } diff --git a/pkg/morph/client/netmap/peer.go b/pkg/morph/client/netmap/peer.go index 949e8cb63..e83acde39 100644 --- a/pkg/morph/client/netmap/peer.go +++ b/pkg/morph/client/netmap/peer.go @@ -41,7 +41,7 @@ func (c *Client) AddPeer(ctx context.Context, p AddPeerPrm) error { prm.InvokePrmOptional = p.InvokePrmOptional if _, err := c.client.Invoke(ctx, prm); err != nil { - return fmt.Errorf("could not invoke method (%s): %w", method, err) + return fmt.Errorf("invoke method (%s): %w", method, err) } return nil } diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go index 218f7ad8e..f292dccf1 100644 --- a/pkg/morph/client/nns.go +++ b/pkg/morph/client/nns.go @@ -107,7 +107,7 @@ func (c *Client) NNSHash() (util.Uint160, error) { func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) { found, err := exists(c, nnsHash, domain) if err != nil { - return nil, fmt.Errorf("could not check presence in NNS contract for %s: %w", domain, err) + return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err) } if !found { diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 8516c89a9..1e80be921 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -62,7 +62,7 @@ const ( notaryExpirationOfMethod = "expirationOf" setDesignateMethod = "designateAsRole" - notaryBalanceErrMsg = "can't fetch notary balance" + notaryBalanceErrMsg = "fetch notary balance" notaryNotEnabledPanicMsg = "notary support was not enabled on this client" ) @@ -155,12 +155,12 @@ func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta bc, err := c.rpcActor.GetBlockCount() if err != nil { - return util.Uint256{}, fmt.Errorf("can't get blockchain height: %w", err) + return util.Uint256{}, fmt.Errorf("get blockchain height: %w", err) } currentTill, err := c.depositExpirationOf() if err != nil { - return util.Uint256{}, fmt.Errorf("can't get previous expiration value: %w", err) + return util.Uint256{}, fmt.Errorf("get previous expiration value: %w", err) } till := max(int64(bc+delta), currentTill) @@ -197,7 +197,7 @@ func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till i []any{c.acc.PrivateKey().GetScriptHash(), till}) if err != nil { if !errors.Is(err, neorpc.ErrAlreadyExists) { - return util.Uint256{}, 0, fmt.Errorf("can't make notary deposit: %w", err) + return util.Uint256{}, 0, fmt.Errorf("make notary deposit: %w", err) } // Transaction is already in mempool waiting to be processed. @@ -289,7 +289,7 @@ func (c *Client) UpdateNotaryList(ctx context.Context, prm UpdateNotaryListPrm) nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash) if err != nil { - return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err) + return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err) } return c.notaryInvokeAsCommittee( @@ -338,7 +338,7 @@ func (c *Client) UpdateNeoFSAlphabetList(ctx context.Context, prm UpdateAlphabet nonce, vub, err := c.CalculateNonceAndVUB(&prm.hash) if err != nil { - return fmt.Errorf("could not calculate nonce and `valicUntilBlock` values: %w", err) + return fmt.Errorf("calculate nonce and `valicUntilBlock` values: %w", err) } return c.notaryInvokeAsCommittee( @@ -407,7 +407,7 @@ func (c *Client) NotarySignAndInvokeTX(mainTx *transaction.Transaction) error { alphabetList, err := c.notary.alphabetSource() if err != nil { - return fmt.Errorf("could not fetch current alphabet keys: %w", err) + return fmt.Errorf("fetch current alphabet keys: %w", err) } cosigners, err := c.notaryCosignersFromTx(mainTx, alphabetList) @@ -529,24 +529,24 @@ func (c *Client) notaryCosignersFromTx(mainTx *transaction.Transaction, alphabet if ok { pub, err := keys.NewPublicKeyFromBytes(pubBytes, elliptic.P256()) if err != nil { - return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key: %w", err) + return nil, fmt.Errorf("parse verification script of signer #2: invalid public key: %w", err) } acc = notary.FakeSimpleAccount(pub) } else { m, pubsBytes, ok := vm.ParseMultiSigContract(script) if !ok { - return nil, errors.New("failed to parse verification script of signer #2: unknown witness type") + return nil, errors.New("parse verification script of signer #2: unknown witness type") } pubs := make(keys.PublicKeys, len(pubsBytes)) for i := range pubs { pubs[i], err = keys.NewPublicKeyFromBytes(pubsBytes[i], elliptic.P256()) if err != nil { - return nil, fmt.Errorf("failed to parse verification script of signer #2: invalid public key #%d: %w", i, err) + return nil, fmt.Errorf("parse verification script of signer #2: invalid public key #%d: %w", i, err) } } acc, err = notary.FakeMultisigAccount(m, pubs) if err != nil { - return nil, fmt.Errorf("failed to create fake account for signer #2: %w", err) + return nil, fmt.Errorf("create fake account for signer #2: %w", err) } } } @@ -623,7 +623,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB err := multisigAccount.ConvertMultisig(m, ir) if err != nil { // wrap error as FrostFS-specific since the call is not related to any client - return nil, wrapFrostFSError(fmt.Errorf("can't convert account to inner ring multisig wallet: %w", err)) + return nil, wrapFrostFSError(fmt.Errorf("convert account to inner ring multisig wallet: %w", err)) } } else { // alphabet multisig redeem script is @@ -632,7 +632,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB multisigAccount, err = notary.FakeMultisigAccount(m, ir) if err != nil { // wrap error as FrostFS-specific since the call is not related to any client - return nil, wrapFrostFSError(fmt.Errorf("can't make inner ring multisig wallet: %w", err)) + return nil, wrapFrostFSError(fmt.Errorf("make inner ring multisig wallet: %w", err)) } } @@ -642,7 +642,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB func (c *Client) notaryTxValidationLimit() (uint32, error) { bc, err := c.rpcActor.GetBlockCount() if err != nil { - return 0, fmt.Errorf("can't get current blockchain height: %w", err) + return 0, fmt.Errorf("get current blockchain height: %w", err) } minTime := bc + c.notary.txValidTime @@ -663,7 +663,7 @@ func (c *Client) depositExpirationOf() (int64, error) { currentTillBig, err := expirationRes[0].TryInteger() if err != nil { - return 0, fmt.Errorf("can't parse deposit till value: %w", err) + return 0, fmt.Errorf("parse deposit till value: %w", err) } return currentTillBig.Int64(), nil @@ -742,12 +742,12 @@ func alreadyOnChainError(err error) bool { func CalculateNotaryDepositAmount(c *Client, gasMul, gasDiv int64) (fixedn.Fixed8, error) { notaryBalance, err := c.GetNotaryDeposit() if err != nil { - return 0, fmt.Errorf("could not get notary balance: %w", err) + return 0, fmt.Errorf("get notary balance: %w", err) } gasBalance, err := c.GasBalance() if err != nil { - return 0, fmt.Errorf("could not get GAS balance: %w", err) + return 0, fmt.Errorf("get GAS balance: %w", err) } if gasBalance == 0 { @@ -796,12 +796,12 @@ func (c *Client) calculateNonceAndVUB(hash *util.Uint256, roundBlockHeight bool) if hash != nil { height, err = c.getTransactionHeight(*hash) if err != nil { - return 0, 0, fmt.Errorf("could not get transaction height: %w", err) + return 0, 0, fmt.Errorf("get transaction height: %w", err) } } else { height, err = c.rpcActor.GetBlockCount() if err != nil { - return 0, 0, fmt.Errorf("could not get chain height: %w", err) + return 0, 0, fmt.Errorf("get chain height: %w", err) } } diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go index be4c09182..21adebd9e 100644 --- a/pkg/morph/client/static.go +++ b/pkg/morph/client/static.go @@ -159,7 +159,7 @@ func (s StaticClient) Invoke(ctx context.Context, prm InvokePrm) (InvokeRes, err nonce, vub, err = s.client.CalculateNonceAndVUB(prm.hash) } if err != nil { - return InvokeRes{}, fmt.Errorf("could not calculate nonce and VUB for notary alphabet invoke: %w", err) + return InvokeRes{}, fmt.Errorf("calculate nonce and VUB for notary alphabet invoke: %w", err) } vubP = &vub diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go index cd55d6bd2..f68d39beb 100644 --- a/pkg/morph/client/util.go +++ b/pkg/morph/client/util.go @@ -53,7 +53,7 @@ func BytesFromStackItem(param stackitem.Item) ([]byte, error) { case stackitem.IntegerT: n, err := param.TryInteger() if err != nil { - return nil, fmt.Errorf("can't parse integer bytes: %w", err) + return nil, fmt.Errorf("parse integer bytes: %w", err) } return n.Bytes(), nil From d165ac042cf9f34a54117ea002d789fae0b0905b Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 12 Dec 2024 15:27:17 +0300 Subject: [PATCH 271/591] [#1558] morph/client: Reuse notary rpcclient wrapper Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/notary.go | 44 ++++++-------------------------------- 1 file changed, 7 insertions(+), 37 deletions(-) diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 1e80be921..dbd58a53a 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -58,16 +58,11 @@ const ( defaultNotaryValidTime = 50 defaultNotaryRoundTime = 100 - notaryBalanceOfMethod = "balanceOf" - notaryExpirationOfMethod = "expirationOf" - setDesignateMethod = "designateAsRole" + setDesignateMethod = "designateAsRole" - notaryBalanceErrMsg = "fetch notary balance" notaryNotEnabledPanicMsg = "notary support was not enabled on this client" ) -var errUnexpectedItems = errors.New("invalid number of NEO VM arguments on stack") - func defaultNotaryConfig(c *Client) *notaryCfg { return ¬aryCfg{ txValidTime: defaultNotaryValidTime, @@ -158,12 +153,13 @@ func (c *Client) DepositNotary(ctx context.Context, amount fixedn.Fixed8, delta return util.Uint256{}, fmt.Errorf("get blockchain height: %w", err) } - currentTill, err := c.depositExpirationOf() + r := notary.NewReader(c.rpcActor) + currentTill, err := r.ExpirationOf(c.acc.PrivateKey().GetScriptHash()) if err != nil { return util.Uint256{}, fmt.Errorf("get previous expiration value: %w", err) } - till := max(int64(bc+delta), currentTill) + till := max(int64(bc+delta), int64(currentTill)) res, _, err := c.depositNotary(ctx, amount, till) return res, err } @@ -237,18 +233,10 @@ func (c *Client) GetNotaryDeposit() (res int64, err error) { sh := c.acc.PrivateKey().PublicKey().GetScriptHash() - items, err := c.TestInvoke(c.notary.notary, notaryBalanceOfMethod, sh) + r := notary.NewReader(c.rpcActor) + bigIntDeposit, err := r.BalanceOf(sh) if err != nil { - return 0, fmt.Errorf("test invoke (%s): %w", notaryBalanceOfMethod, err) - } - - if len(items) != 1 { - return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, errUnexpectedItems)) - } - - bigIntDeposit, err := items[0].TryInteger() - if err != nil { - return 0, wrapFrostFSError(fmt.Errorf("%v: %w", notaryBalanceErrMsg, err)) + return 0, fmt.Errorf("get notary deposit: %w", err) } return bigIntDeposit.Int64(), nil @@ -651,24 +639,6 @@ func (c *Client) notaryTxValidationLimit() (uint32, error) { return rounded, nil } -func (c *Client) depositExpirationOf() (int64, error) { - expirationRes, err := c.TestInvoke(c.notary.notary, notaryExpirationOfMethod, c.acc.PrivateKey().GetScriptHash()) - if err != nil { - return 0, fmt.Errorf("test invoke (%s): %w", notaryExpirationOfMethod, err) - } - - if len(expirationRes) != 1 { - return 0, fmt.Errorf("method returned unexpected item count: %d", len(expirationRes)) - } - - currentTillBig, err := expirationRes[0].TryInteger() - if err != nil { - return 0, fmt.Errorf("parse deposit till value: %w", err) - } - - return currentTillBig.Int64(), nil -} - // sigCount returns the number of required signature. // For FrostFS Alphabet M is a 2/3+1 of it (like in dBFT). // If committee is true, returns M as N/2+1. From 2af3409d39f173e749fb6b1396cc2f4c03d1b179 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Fri, 13 Dec 2024 12:59:35 +0300 Subject: [PATCH 272/591] [#1510] metabase/test: Fix `BenchmarkGet` Fix misplaced `(*DB).Close` (broken after 47dcfa20f3) Signed-off-by: Aleksey Savchuk --- pkg/local_object_storage/metabase/get_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go index c93d2c992..98c428410 100644 --- a/pkg/local_object_storage/metabase/get_test.go +++ b/pkg/local_object_storage/metabase/get_test.go @@ -219,7 +219,6 @@ func benchmarkGet(b *testing.B, numOfObj int) { meta.WithMaxBatchSize(batchSize), meta.WithMaxBatchDelay(10*time.Millisecond), ) - defer func() { require.NoError(b, db.Close(context.Background())) }() addrs := make([]oid.Address, 0, numOfObj) for range numOfObj { @@ -234,6 +233,7 @@ func benchmarkGet(b *testing.B, numOfObj int) { } db, addrs := prepareDb(runtime.NumCPU()) + defer func() { require.NoError(b, db.Close(context.Background())) }() b.Run("parallel", func(b *testing.B) { b.ReportAllocs() From 8ba9f31fca82ca8c0ccabdbb7254b2ad67a2f6fe Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Fri, 13 Dec 2024 13:04:12 +0300 Subject: [PATCH 273/591] [#1510] metabase/test: Fix `BenchmarkListWithCursor` - Fix misplaced `(*DB).Close` (broken after 47dcfa20f3) - Use `errors.Is` for error checking (broken after fcdbf5e509) Signed-off-by: Aleksey Savchuk --- pkg/local_object_storage/metabase/list_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index 6f6463071..1d8beb175 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -18,6 +18,8 @@ import ( func BenchmarkListWithCursor(b *testing.B) { db := listWithCursorPrepareDB(b) + defer func() { require.NoError(b, db.Close(context.Background())) }() + b.Run("1 item", func(b *testing.B) { benchmarkListWithCursor(b, db, 1) }) @@ -33,7 +35,6 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB { db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{ NoSync: true, })) // faster single-thread generation - defer func() { require.NoError(b, db.Close(context.Background())) }() obj := testutil.GenerateObject() for i := range 100_000 { // should be a multiple of all batch sizes @@ -55,7 +56,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) { for range b.N { res, err := db.ListWithCursor(context.Background(), prm) if err != nil { - if err != meta.ErrEndOfListing { + if errors.Is(err, meta.ErrEndOfListing) { b.Fatalf("error: %v", err) } prm.SetCursor(nil) From f0c43c8d80e79b12111e8e8d1574e847f5162ac1 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Fri, 13 Dec 2024 11:44:56 +0300 Subject: [PATCH 274/591] [#1502] Use `zap.Error` for logging errors Use `zap.Error` instead of `zap.String` for logging errors: change all expressions like `zap.String("error", err.Error())` or `zap.String("err", err.Error())` to `zap.Error(err)`. Leave similar expressions with other messages unchanged, for example, `zap.String("last_error", lastErr.Error())` or `zap.String("reason", ctx.Err().Error())`. This change was made by applying the following patch: ```diff @@ var err expression @@ -zap.String("error", err.Error()) +zap.Error(err) @@ var err expression @@ -zap.String("err", err.Error()) +zap.Error(err) ``` Signed-off-by: Aleksey Savchuk --- cmd/frostfs-ir/httpcomponent.go | 2 +- cmd/frostfs-ir/main.go | 4 ++-- cmd/frostfs-ir/pprof.go | 2 +- cmd/frostfs-node/config.go | 4 ++-- cmd/frostfs-node/main.go | 2 +- cmd/frostfs-node/morph.go | 4 ++-- cmd/frostfs-node/netmap.go | 2 +- cmd/frostfs-node/object.go | 4 ++-- cmd/frostfs-node/tree.go | 2 +- pkg/core/object/sender_classifier.go | 4 ++-- pkg/innerring/initialization.go | 4 ++-- pkg/innerring/innerring.go | 4 ++-- .../processors/alphabet/process_emit.go | 10 +++++----- .../processors/container/process_container.go | 8 ++++---- .../processors/frostfs/process_assets.go | 2 +- .../processors/governance/process_update.go | 18 ++++++++--------- .../processors/netmap/process_cleanup.go | 2 +- .../processors/netmap/process_epoch.go | 8 ++++---- .../processors/netmap/process_peers.go | 2 +- pkg/innerring/state.go | 8 ++++---- .../blobstor/blobovniczatree/delete.go | 2 +- .../blobstor/blobovniczatree/exists.go | 2 +- .../blobstor/blobovniczatree/get.go | 2 +- .../blobstor/blobovniczatree/get_range.go | 2 +- .../blobstor/blobovniczatree/iterate.go | 4 ++-- .../blobstor/blobovniczatree/manager.go | 4 ++-- .../blobstor/blobovniczatree/put.go | 4 ++-- pkg/local_object_storage/blobstor/control.go | 2 +- pkg/local_object_storage/blobstor/exists.go | 2 +- .../blobstor/fstree/fstree.go | 4 ++-- pkg/local_object_storage/blobstor/iterate.go | 2 +- pkg/local_object_storage/engine/control.go | 2 +- pkg/local_object_storage/engine/delete.go | 6 +++--- pkg/local_object_storage/engine/engine.go | 4 ++-- pkg/local_object_storage/engine/get.go | 2 +- pkg/local_object_storage/engine/put.go | 6 +++--- pkg/local_object_storage/engine/range.go | 2 +- pkg/local_object_storage/shard/control.go | 2 +- pkg/local_object_storage/shard/delete.go | 4 ++-- pkg/local_object_storage/shard/gc.go | 20 +++++++++---------- pkg/local_object_storage/shard/inhume.go | 2 +- pkg/local_object_storage/shard/list.go | 2 +- pkg/local_object_storage/shard/put.go | 2 +- pkg/morph/client/client.go | 4 ++-- pkg/morph/event/listener.go | 12 +++++------ pkg/morph/event/utils.go | 2 +- pkg/services/object/delete/delete.go | 2 +- pkg/services/object/search/container.go | 8 ++++---- pkg/services/object/search/local.go | 2 +- pkg/services/object/search/search.go | 2 +- pkg/services/object/util/log.go | 4 ++-- pkg/services/policer/check.go | 2 +- pkg/services/policer/ec.go | 2 +- pkg/services/policer/process.go | 2 +- pkg/services/replicator/process.go | 2 +- pkg/services/tree/replicator.go | 4 ++-- 56 files changed, 114 insertions(+), 114 deletions(-) diff --git a/cmd/frostfs-ir/httpcomponent.go b/cmd/frostfs-ir/httpcomponent.go index a8eef6010..dd70fc91c 100644 --- a/cmd/frostfs-ir/httpcomponent.go +++ b/cmd/frostfs-ir/httpcomponent.go @@ -77,7 +77,7 @@ func (c *httpComponent) reload(ctx context.Context) { log.Info(ctx, c.name+" config updated") if err := c.shutdown(ctx); err != nil { log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.String("error", err.Error()), + zap.Error(err), ) } else { c.init(ctx) diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index e86c04b9e..ade64ba84 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -119,12 +119,12 @@ func shutdown(ctx context.Context) { innerRing.Stop(ctx) if err := metricsCmp.shutdown(ctx); err != nil { log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.String("error", err.Error()), + zap.Error(err), ) } if err := pprofCmp.shutdown(ctx); err != nil { log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.String("error", err.Error()), + zap.Error(err), ) } diff --git a/cmd/frostfs-ir/pprof.go b/cmd/frostfs-ir/pprof.go index 8e81d8b85..2aebcde7f 100644 --- a/cmd/frostfs-ir/pprof.go +++ b/cmd/frostfs-ir/pprof.go @@ -58,7 +58,7 @@ func (c *pprofComponent) reload(ctx context.Context) { log.Info(ctx, c.name+" config updated") if err := c.shutdown(ctx); err != nil { log.Debug(ctx, logs.FrostFSIRCouldNotShutdownHTTPServer, - zap.String("error", err.Error())) + zap.Error(err)) return } diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 9b727e41a..6950c9e24 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1119,7 +1119,7 @@ func initLocalStorage(ctx context.Context, c *cfg) { err := ls.Close(context.WithoutCancel(ctx)) if err != nil { c.log.Info(ctx, logs.FrostFSNodeStorageEngineClosingFailure, - zap.String("error", err.Error()), + zap.Error(err), ) } else { c.log.Info(ctx, logs.FrostFSNodeAllComponentsOfTheStorageEngineClosedSuccessfully) @@ -1209,7 +1209,7 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { if err != nil { c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, zap.Uint64("epoch", epoch), - zap.String("error", err.Error())) + zap.Error(err)) return } diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go index f8854ab3c..3c15dc439 100644 --- a/cmd/frostfs-node/main.go +++ b/cmd/frostfs-node/main.go @@ -134,7 +134,7 @@ func stopAndLog(ctx context.Context, c *cfg, name string, stopper func(context.C err := stopper(ctx) if err != nil { c.log.Debug(ctx, fmt.Sprintf("could not shutdown %s server", name), - zap.String("error", err.Error()), + zap.Error(err), ) } diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 0d4e8a1be..5415da12a 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -96,7 +96,7 @@ func initMorphClient(ctx context.Context, c *cfg) { if err != nil { c.log.Info(ctx, logs.FrostFSNodeFailedToCreateNeoRPCClient, zap.Any("endpoints", addresses), - zap.String("error", err.Error()), + zap.Error(err), ) fatalOnErr(err) @@ -168,7 +168,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) + c.log.Warn(ctx, logs.FrostFSNodeCantGetLastProcessedSideChainBlockNumber, zap.Error(err)) } subs, err = subscriber.New(ctx, &subscriber.Params{ diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index e94428fcb..2eb4cd132 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -197,7 +197,7 @@ func addNewEpochNotificationHandlers(c *cfg) { _, _, err := makeNotaryDeposit(ctx, c) if err != nil { c.log.Error(ctx, logs.FrostFSNodeCouldNotMakeNotaryDeposit, - zap.String("error", err.Error()), + zap.Error(err), ) } }) diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 6804aae59..f82a8e533 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -58,7 +58,7 @@ func (c *cfg) MaxObjectSize() uint64 { sz, err := c.cfgNetmap.wrapper.MaxObjectSize() if err != nil { c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, - zap.String("error", err.Error()), + zap.Error(err), ) } @@ -269,7 +269,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl _, err := ls.Inhume(ctx, inhumePrm) if err != nil { c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, - zap.String("error", err.Error()), + zap.Error(err), ) } }), diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index c423c0660..f3ddc8cbe 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -113,7 +113,7 @@ func initTreeService(c *cfg) { // Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged. c.log.Error(ctx, logs.FrostFSNodeContainerRemovalEventReceivedButTreesWerentRemoved, zap.Stringer("cid", ev.ID), - zap.String("error", err.Error())) + zap.Error(err)) } }) diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go index 3b3650134..a1a5fcac1 100644 --- a/pkg/core/object/sender_classifier.go +++ b/pkg/core/object/sender_classifier.go @@ -67,7 +67,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK if err != nil { // do not throw error, try best case matching c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing, - zap.String("error", err.Error())) + zap.Error(err)) } else if isInnerRingNode { return &ClassifyResult{ Role: acl.RoleInnerRing, @@ -84,7 +84,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK // is not possible for previous epoch, so // do not throw error, try best case matching c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode, - zap.String("error", err.Error())) + zap.Error(err)) } else if isContainerNode { return &ClassifyResult{ Role: acl.RoleContainer, diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index 5481354e1..ecaf8ae86 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -100,7 +100,7 @@ func (s *Server) initMainnet(ctx context.Context, cfg *viper.Viper, morphChain * fromMainChainBlock, err := s.persistate.UInt32(persistateMainChainLastBlockKey) if err != nil { fromMainChainBlock = 0 - s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.InnerringCantGetLastProcessedMainChainBlockNumber, zap.Error(err)) } mainnetChain.from = fromMainChainBlock @@ -456,7 +456,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- fromSideChainBlock, err := s.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.InnerringCantGetLastProcessedSideChainBlockNumber, zap.Error(err)) } morphChain := &chainParams{ diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index 4fe9cc084..0b9e83443 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -177,7 +177,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { if err != nil { // we don't stop inner ring execution on this error s.log.Warn(ctx, logs.InnerringCantVoteForPreparedValidators, - zap.String("error", err.Error())) + zap.Error(err)) } s.tickInitialExpoch(ctx) @@ -308,7 +308,7 @@ func (s *Server) Stop(ctx context.Context) { for _, c := range s.closers { if err := c(); err != nil { s.log.Warn(ctx, logs.InnerringCloserError, - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go index 229261250..8e11d2d61 100644 --- a/pkg/innerring/processors/alphabet/process_emit.go +++ b/pkg/innerring/processors/alphabet/process_emit.go @@ -33,7 +33,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool { // there is no signature collecting, so we don't need extra fee _, err := ap.morphClient.Invoke(ctx, contract, 0, emitMethod) if err != nil { - ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.String("error", err.Error())) + ap.log.Warn(ctx, logs.AlphabetCantInvokeAlphabetEmitMethod, zap.Error(err)) return false } @@ -47,7 +47,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool { networkMap, err := ap.netmapClient.NetMap() if err != nil { ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, - zap.String("error", err.Error())) + zap.Error(err)) return false } @@ -83,7 +83,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256()) if err != nil { ap.log.Warn(ctx, logs.AlphabetCantParseNodePublicKey, - zap.String("error", err.Error())) + zap.Error(err)) continue } @@ -93,7 +93,7 @@ func (ap *Processor) transferGasToNetmapNodes(ctx context.Context, nmNodes []net ap.log.Warn(ctx, logs.AlphabetCantTransferGas, zap.String("receiver", key.Address()), zap.Int64("amount", int64(gasPerNode)), - zap.String("error", err.Error()), + zap.Error(err), ) } } @@ -110,7 +110,7 @@ func (ap *Processor) transferGasToExtraNodes(ctx context.Context, pw []util.Uint ap.log.Warn(ctx, logs.AlphabetCantTransferGasToWallet, zap.Strings("receivers", receiversLog), zap.Int64("amount", int64(gasPerNode)), - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go index 16c450166..ffaea653a 100644 --- a/pkg/innerring/processors/container/process_container.go +++ b/pkg/innerring/processors/container/process_container.go @@ -50,7 +50,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool err := cp.checkPutContainer(pctx) if err != nil { cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed, - zap.String("error", err.Error()), + zap.Error(err), ) return false @@ -58,7 +58,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool if err := cp.morphClient.NotarySignAndInvokeTX(pctx.e.NotaryRequest().MainTransaction); err != nil { cp.log.Error(ctx, logs.ContainerCouldNotApprovePutContainer, - zap.String("error", err.Error()), + zap.Error(err), ) return false } @@ -113,7 +113,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven err := cp.checkDeleteContainer(e) if err != nil { cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed, - zap.String("error", err.Error()), + zap.Error(err), ) return false @@ -121,7 +121,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven if err := cp.morphClient.NotarySignAndInvokeTX(e.NotaryRequest().MainTransaction); err != nil { cp.log.Error(ctx, logs.ContainerCouldNotApproveDeleteContainer, - zap.String("error", err.Error()), + zap.Error(err), ) return false diff --git a/pkg/innerring/processors/frostfs/process_assets.go b/pkg/innerring/processors/frostfs/process_assets.go index ee824ea31..d10eb9660 100644 --- a/pkg/innerring/processors/frostfs/process_assets.go +++ b/pkg/innerring/processors/frostfs/process_assets.go @@ -73,7 +73,7 @@ func (np *Processor) processDeposit(ctx context.Context, deposit frostfsEvent.De err = np.morphClient.TransferGas(receiver, np.mintEmitValue) if err != nil { np.log.Error(ctx, logs.FrostFSCantTransferNativeGasToReceiver, - zap.String("error", err.Error())) + zap.Error(err)) return false } diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go index 73d21a7d2..245679656 100644 --- a/pkg/innerring/processors/governance/process_update.go +++ b/pkg/innerring/processors/governance/process_update.go @@ -28,21 +28,21 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25 mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList() if err != nil { gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet, - zap.String("error", err.Error())) + zap.Error(err)) return false } sidechainAlphabet, err := gp.morphClient.Committee() if err != nil { gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromSideChain, - zap.String("error", err.Error())) + zap.Error(err)) return false } newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet) if err != nil { gp.log.Error(ctx, logs.GovernanceCantMergeAlphabetListsFromMainNetAndSideChain, - zap.String("error", err.Error())) + zap.Error(err)) return false } @@ -65,7 +65,7 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25 err = gp.voter.VoteForSidechainValidator(ctx, votePrm) if err != nil { gp.log.Error(ctx, logs.GovernanceCantVoteForSideChainCommittee, - zap.String("error", err.Error())) + zap.Error(err)) } // 2. Update NeoFSAlphabet role in the sidechain. @@ -98,14 +98,14 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid innerRing, err := gp.irFetcher.InnerRingKeys() if err != nil { gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain, - zap.String("error", err.Error())) + zap.Error(err)) return } newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet) if err != nil { gp.log.Error(ctx, logs.GovernanceCantCreateNewInnerRingListWithNewAlphabetKeys, - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -122,7 +122,7 @@ func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sid if err = gp.morphClient.UpdateNeoFSAlphabetList(ctx, updPrm); err != nil { gp.log.Error(ctx, logs.GovernanceCantUpdateInnerRingListWithNewAlphabetKeys, - zap.String("error", err.Error())) + zap.Error(err)) } } @@ -135,7 +135,7 @@ func (gp *Processor) updateNotaryRoleInSidechain(ctx context.Context, newAlphabe err := gp.morphClient.UpdateNotaryList(ctx, updPrm) if err != nil { gp.log.Error(ctx, logs.GovernanceCantUpdateListOfNotaryNodesInSideChain, - zap.String("error", err.Error())) + zap.Error(err)) } } @@ -155,6 +155,6 @@ func (gp *Processor) updateFrostFSContractInMainnet(ctx context.Context, newAlph err := gp.frostfsClient.AlphabetUpdate(ctx, prm) if err != nil { gp.log.Error(ctx, logs.GovernanceCantUpdateListOfAlphabetNodesInFrostfsContract, - zap.String("error", err.Error())) + zap.Error(err)) } } diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go index a43005ffb..8f8cc17ff 100644 --- a/pkg/innerring/processors/netmap/process_cleanup.go +++ b/pkg/innerring/processors/netmap/process_cleanup.go @@ -49,7 +49,7 @@ func (np *Processor) processNetmapCleanupTick(ctx context.Context, ev netmapClea }) if err != nil { np.log.Warn(ctx, logs.NetmapCantIterateOnNetmapCleanerCache, - zap.String("error", err.Error())) + zap.Error(err)) return false } diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go index 237c4e512..93e00bbaa 100644 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ b/pkg/innerring/processors/netmap/process_epoch.go @@ -17,7 +17,7 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc epochDuration, err := np.netmapClient.EpochDuration() if err != nil { np.log.Warn(ctx, logs.NetmapCantGetEpochDuration, - zap.String("error", err.Error())) + zap.Error(err)) } else { np.epochState.SetEpochDuration(epochDuration) } @@ -28,19 +28,19 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc if err != nil { np.log.Warn(ctx, logs.NetmapCantGetTransactionHeight, zap.String("hash", ev.TxHash().StringLE()), - zap.String("error", err.Error())) + zap.Error(err)) } if err := np.epochTimer.ResetEpochTimer(h); err != nil { np.log.Warn(ctx, logs.NetmapCantResetEpochTimer, - zap.String("error", err.Error())) + zap.Error(err)) } // get new netmap snapshot networkMap, err := np.netmapClient.NetMap() if err != nil { np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup, - zap.String("error", err.Error())) + zap.Error(err)) return false } diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go index 72aa08f76..5b565ffd1 100644 --- a/pkg/innerring/processors/netmap/process_peers.go +++ b/pkg/innerring/processors/netmap/process_peers.go @@ -42,7 +42,7 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) err = np.nodeValidator.VerifyAndUpdate(&nodeInfo) if err != nil { np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, - zap.String("error", err.Error()), + zap.Error(err), ) return false diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go index 77c2af2ce..3e9880e70 100644 --- a/pkg/innerring/state.go +++ b/pkg/innerring/state.go @@ -62,7 +62,7 @@ func (s *Server) IsAlphabet(ctx context.Context) bool { func (s *Server) InnerRingIndex(ctx context.Context) int { index, err := s.statusIndex.InnerRingIndex() if err != nil { - s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.String("error", err.Error())) + s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err)) return -1 } @@ -74,7 +74,7 @@ func (s *Server) InnerRingIndex(ctx context.Context) int { func (s *Server) InnerRingSize(ctx context.Context) int { size, err := s.statusIndex.InnerRingSize() if err != nil { - s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.String("error", err.Error())) + s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err)) return 0 } @@ -86,7 +86,7 @@ func (s *Server) InnerRingSize(ctx context.Context) int { func (s *Server) AlphabetIndex(ctx context.Context) int { index, err := s.statusIndex.AlphabetIndex() if err != nil { - s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.String("error", err.Error())) + s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err)) return -1 } @@ -132,7 +132,7 @@ func (s *Server) voteForSidechainValidator(ctx context.Context, prm governance.V s.log.Warn(ctx, logs.InnerringCantInvokeVoteMethodInAlphabetContract, zap.Int8("alphabet_index", int8(letter)), zap.Uint64("epoch", epoch), - zap.String("error", err.Error())) + zap.Error(err)) } }) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go index 8c2d7aa67..47e12bafb 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go @@ -82,7 +82,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co if !client.IsErrObjectNotFound(err) { b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel, zap.String("level", p), - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go index 63d2f21e1..d2c99945f 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go @@ -57,7 +57,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common if !client.IsErrObjectNotFound(err) { b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go index b7ef8d8a5..1a4f11c29 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go @@ -69,7 +69,7 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G if !client.IsErrObjectNotFound(err) { b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go index b24f1b881..6d06b8e6f 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go @@ -71,7 +71,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re if !outOfBounds && !client.IsErrObjectNotFound(err) { b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } if outOfBounds { diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go index b120c22f7..a710cf988 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go @@ -44,7 +44,7 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm if prm.IgnoreErrors { b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Stringer("address", elem.Address()), - zap.String("err", err.Error()), + zap.Error(err), zap.String("storage_id", p), zap.String("root_path", b.rootPath)) return nil @@ -77,7 +77,7 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo if err != nil { if ignoreErrors { b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, - zap.String("err", err.Error()), + zap.Error(err), zap.String("storage_id", p), zap.String("root_path", b.rootPath)) return false, nil diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go index b35e052cf..7d44aa5c6 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go @@ -97,7 +97,7 @@ func (b *sharedDB) Close(ctx context.Context) { if err := b.blcza.Close(ctx); err != nil { b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", b.path), - zap.String("error", err.Error()), + zap.Error(err), ) } b.blcza = nil @@ -125,7 +125,7 @@ func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error { if err := b.blcza.Close(ctx); err != nil { b.log.Error(ctx, logs.BlobovniczatreeCouldNotCloseBlobovnicza, zap.String("id", b.path), - zap.String("error", err.Error()), + zap.Error(err), ) return fmt.Errorf("failed to close blobovnicza (path = %s): %w", b.path, err) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go index 1678e578c..8276a25ef 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go @@ -83,7 +83,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } @@ -106,7 +106,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) } else { i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", active.SystemPath()), - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } if errors.Is(err, blobovnicza.ErrNoSpace) { diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go index 44685524f..93316be02 100644 --- a/pkg/local_object_storage/blobstor/control.go +++ b/pkg/local_object_storage/blobstor/control.go @@ -74,7 +74,7 @@ func (b *BlobStor) Close(ctx context.Context) error { for i := range b.storage { err := b.storage[i].Storage.Close(ctx) if err != nil { - b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.String("error", err.Error())) + b.log.Info(ctx, logs.BlobstorCouldntCloseStorage, zap.Error(err)) if firstErr == nil { firstErr = err } diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go index 556f53e12..f1e45fe10 100644 --- a/pkg/local_object_storage/blobstor/exists.go +++ b/pkg/local_object_storage/blobstor/exists.go @@ -75,7 +75,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi for _, err := range errors[:len(errors)-1] { b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking, zap.Stringer("address", prm.Address), - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go index 53eb0395a..a77ad2f93 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree.go @@ -153,7 +153,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr if err != nil { if prm.IgnoreErrors { t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, - zap.String("err", err.Error()), + zap.Error(err), zap.String("directory_path", dirPath)) return nil } @@ -202,7 +202,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr if prm.IgnoreErrors { t.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Stringer("address", addr), - zap.String("err", err.Error()), + zap.Error(err), zap.String("path", path)) continue } diff --git a/pkg/local_object_storage/blobstor/iterate.go b/pkg/local_object_storage/blobstor/iterate.go index 1ba835a95..ff1aa9d64 100644 --- a/pkg/local_object_storage/blobstor/iterate.go +++ b/pkg/local_object_storage/blobstor/iterate.go @@ -45,7 +45,7 @@ func (b *BlobStor) Iterate(ctx context.Context, prm common.IteratePrm) (common.I b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.String("storage_path", b.storage[i].Storage.Path()), zap.String("storage_type", b.storage[i].Storage.Type()), - zap.String("err", err.Error())) + zap.Error(err)) continue } return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err) diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index a5c53dcad..bd9eb1021 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -167,7 +167,7 @@ func (e *StorageEngine) close(ctx context.Context, releasePools bool) error { if err := sh.Close(ctx); err != nil { e.log.Debug(ctx, logs.EngineCouldNotCloseShard, zap.String("id", id), - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 7164ff21f..65ccbdb9e 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -154,7 +154,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo if err != nil { e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren, zap.Stringer("addr", addr), - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return false } @@ -166,7 +166,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo if err != nil { e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), - zap.String("err", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) continue } @@ -196,7 +196,7 @@ func (e *StorageEngine) deleteChunks( if err != nil { e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), - zap.String("err", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) continue } diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 029904046..f82268d1d 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -140,7 +140,7 @@ func (e *StorageEngine) reportShardError( if isLogical(err) { e.log.Warn(ctx, msg, zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -151,7 +151,7 @@ func (e *StorageEngine) reportShardError( e.log.Warn(ctx, msg, append([]zap.Field{ zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount), - zap.String("error", err.Error()), + zap.Error(err), }, fields...)...) if e.errorsThreshold == 0 || errCount < e.errorsThreshold { diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index c7145889b..81b027c26 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -106,7 +106,7 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { if it.ShardWithMeta.Shard != nil && it.MetaError != nil { e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, zap.Stringer("shard_id", it.ShardWithMeta.ID()), - zap.String("error", it.MetaError.Error()), + zap.Error(it.MetaError), zap.Stringer("address", prm.addr), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 62671f433..c79b6e251 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -143,7 +143,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti } else { e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence, zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } @@ -165,14 +165,14 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return } if client.IsErrObjectAlreadyRemoved(err) { e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) res.status = putToShardRemoved res.err = err diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index 0c9cea903..600e7266c 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -118,7 +118,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error if it.ShardWithMeta.Shard != nil && it.MetaError != nil { e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, zap.Stringer("shard_id", it.ShardWithMeta.ID()), - zap.String("error", it.MetaError.Error()), + zap.Error(it.MetaError), zap.Stringer("address", prm.addr), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 5a9e26155..78ce241fe 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -272,7 +272,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, if err := obj.Unmarshal(data); err != nil { s.log.Warn(ctx, logs.ShardCouldNotUnmarshalObject, zap.Stringer("address", addr), - zap.String("err", err.Error())) + zap.Error(err)) return nil } diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go index f62cecd56..fb6769b51 100644 --- a/pkg/local_object_storage/shard/delete.go +++ b/pkg/local_object_storage/shard/delete.go @@ -112,7 +112,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error if err != nil { s.log.Debug(ctx, logs.StorageIDRetrievalFailure, zap.Stringer("object", addr), - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return err } @@ -132,7 +132,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error if err != nil && !client.IsErrObjectNotFound(err) { s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor, zap.Stringer("object_address", addr), - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) return err } diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index c212f8c36..1b218a372 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -205,7 +205,7 @@ func (gc *gc) handleEvent(ctx context.Context, event Event) { }) if err != nil { gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool, - zap.String("error", err.Error()), + zap.Error(err), ) v.prevGroup.Done() @@ -313,7 +313,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { err := s.metaBase.IterateOverGarbage(ctx, iterPrm) if err != nil { s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed, - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -334,7 +334,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { if err != nil { s.log.Warn(ctx, logs.ShardCouldNotDeleteTheObjects, - zap.String("error", err.Error()), + zap.Error(err), ) result.success = false } @@ -396,7 +396,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { }) if err = errGroup.Wait(); err != nil { - s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.ShardIteratorOverExpiredObjectsFailed, zap.Error(err)) } } @@ -429,7 +429,7 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) res, err := s.metaBase.Inhume(ctx, inhumePrm) if err != nil { s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -584,7 +584,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { }) if err = errGroup.Wait(); err != nil { - s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.String("error", err.Error())) + s.log.Warn(ctx, logs.ShardIteratorOverExpiredLocksFailed, zap.Error(err)) } } @@ -637,7 +637,7 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston res, err := s.metaBase.InhumeTombstones(ctx, tss) if err != nil { s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -665,7 +665,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] unlocked, err := s.metaBase.FreeLockedBy(lockers) if err != nil { s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -678,7 +678,7 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] res, err := s.metaBase.Inhume(ctx, pInhume) if err != nil { s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -722,7 +722,7 @@ func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { _, err := s.metaBase.FreeLockedBy(lockers) if err != nil { s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, - zap.String("error", err.Error()), + zap.Error(err), ) return diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go index 984c54fbc..d46400869 100644 --- a/pkg/local_object_storage/shard/inhume.go +++ b/pkg/local_object_storage/shard/inhume.go @@ -110,7 +110,7 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { } s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase, - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index 7b267d2e4..f583ef5d9 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -124,7 +124,7 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { if err != nil { s.log.Debug(ctx, logs.ShardCantSelectAllObjects, zap.Stringer("cid", lst[i]), - zap.String("error", err.Error()), + zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) continue diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index 50125a88d..1e4643db5 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -76,7 +76,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { if err != nil || !tryCache { if err != nil { s.log.Debug(ctx, logs.ShardCantPutObjectToTheWritecacheTryingBlobstor, - zap.String("err", err.Error())) + zap.Error(err)) } res, err = s.blobStor.Put(ctx, putPrm) diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index 5a5d24900..37599e696 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -390,7 +390,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { height, err = c.rpcActor.GetBlockCount() if err != nil { c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight, - zap.String("error", err.Error())) + zap.Error(err)) return nil } @@ -404,7 +404,7 @@ func (c *Client) Wait(ctx context.Context, n uint32) error { newHeight, err = c.rpcActor.GetBlockCount() if err != nil { c.logger.Error(ctx, logs.ClientCantGetBlockchainHeight243, - zap.String("error", err.Error())) + zap.Error(err)) return nil } diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index ed77352ec..83f8bee07 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -136,7 +136,7 @@ func (l *listener) Listen(ctx context.Context) { defer l.wg.Done() if err := l.listen(ctx, nil); err != nil { l.log.Error(ctx, logs.EventCouldNotStartListenToEvents, - zap.String("error", err.Error()), + zap.Error(err), ) } }) @@ -154,7 +154,7 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) { defer l.wg.Done() if err := l.listen(ctx, intError); err != nil { l.log.Error(ctx, logs.EventCouldNotStartListenToEvents, - zap.String("error", err.Error()), + zap.Error(err), ) l.sendError(ctx, intError, err) } @@ -342,7 +342,7 @@ func (l *listener) parseAndHandleNotification(ctx context.Context, notifyEvent * event, err := parser(notifyEvent) if err != nil { log.Warn(ctx, logs.EventCouldNotParseNotificationEvent, - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -375,13 +375,13 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe case errors.Is(err, ErrTXAlreadyHandled): case errors.As(err, &expErr): l.log.Warn(ctx, logs.EventSkipExpiredMainTXNotaryEvent, - zap.String("error", err.Error()), + zap.Error(err), zap.Uint32("current_block_height", expErr.CurrentBlockHeight), zap.Uint32("fallback_tx_not_valid_before_height", expErr.FallbackTXNotValidBeforeHeight), ) default: l.log.Warn(ctx, logs.EventCouldNotPrepareAndValidateNotaryEvent, - zap.String("error", err.Error()), + zap.Error(err), ) } @@ -413,7 +413,7 @@ func (l *listener) parseAndHandleNotary(ctx context.Context, nr *result.NotaryRe event, err := parser(notaryEvent) if err != nil { log.Warn(ctx, logs.EventCouldNotParseNotaryEvent, - zap.String("error", err.Error()), + zap.Error(err), ) return diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go index 058959c63..0088be400 100644 --- a/pkg/morph/event/utils.go +++ b/pkg/morph/event/utils.go @@ -77,7 +77,7 @@ func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *logger.Logger) Handle }) if err != nil { log.Warn(ctx, logs.EventCouldNotSubmitHandlerToWorkerPool, - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go index 8aaff670c..57e33fde7 100644 --- a/pkg/services/object/delete/delete.go +++ b/pkg/services/object/delete/delete.go @@ -36,7 +36,7 @@ func (exec *execCtx) execute(ctx context.Context) error { exec.log.Debug(ctx, logs.ServingRequest) if err := exec.executeLocal(ctx); err != nil { - exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err)) return err } diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go index 999a3cc9e..e82f999cf 100644 --- a/pkg/services/object/search/container.go +++ b/pkg/services/object/search/container.go @@ -73,7 +73,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { select { case <-ctx.Done(): exec.log.Debug(ctx, logs.InterruptPlacementIterationByContext, - zap.String("error", ctx.Err().Error())) + zap.Error(ctx.Err())) return default: } @@ -86,14 +86,14 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { c, err := exec.svc.clientConstructor.get(info) if err != nil { - exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.SearchCouldNotConstructRemoteNodeClient, zap.Error(err)) return } ids, err := c.searchObjects(ctx, exec, info) if err != nil { exec.log.Debug(ctx, logs.SearchRemoteOperationFailed, - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -102,7 +102,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { err = exec.writeIDList(ids) mtx.Unlock() if err != nil { - exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.SearchCouldNotWriteObjectIdentifiers, zap.Error(err)) return } }(i) diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go index bc59d0394..ec65ab06a 100644 --- a/pkg/services/object/search/local.go +++ b/pkg/services/object/search/local.go @@ -11,7 +11,7 @@ import ( func (exec *execCtx) executeLocal(ctx context.Context) error { ids, err := exec.svc.localStorage.search(ctx, exec) if err != nil { - exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.SearchLocalOperationFailed, zap.Error(err)) return err } diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go index e24da975d..76c091f85 100644 --- a/pkg/services/object/search/search.go +++ b/pkg/services/object/search/search.go @@ -38,7 +38,7 @@ func (exec *execCtx) execute(ctx context.Context) error { func (exec *execCtx) logResult(ctx context.Context, err error) { switch { default: - exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.String("error", err.Error())) + exec.log.Debug(ctx, logs.OperationFinishedWithError, zap.Error(err)) case err == nil: exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) } diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go index a9f875d8d..2c1e053ac 100644 --- a/pkg/services/object/util/log.go +++ b/pkg/services/object/util/log.go @@ -14,7 +14,7 @@ func LogServiceError(ctx context.Context, l *logger.Logger, req string, node net l.Error(ctx, logs.UtilObjectServiceError, zap.String("node", network.StringifyGroup(node)), zap.String("request", req), - zap.String("error", err.Error()), + zap.Error(err), ) } @@ -22,6 +22,6 @@ func LogServiceError(ctx context.Context, l *logger.Logger, req string, node net func LogWorkerPoolError(ctx context.Context, l *logger.Logger, req string, err error) { l.Error(ctx, logs.UtilCouldNotPushTaskToWorkerPool, zap.String("request", req), - zap.String("error", err.Error()), + zap.Error(err), ) } diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index 2e5e54dfd..bdfc4344b 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -153,7 +153,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe } else { p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", addr), - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go index db640e323..f6d3b9ea1 100644 --- a/pkg/services/policer/ec.go +++ b/pkg/services/policer/ec.go @@ -131,7 +131,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n // consider maintenance mode has object, but do not drop local copy p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode))) } else { - p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.String("error", err.Error())) + p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, zap.Stringer("object", objInfo.Address), zap.Error(err)) } return ecChunkProcessResult{ diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go index 80a87ade9..bd830d04e 100644 --- a/pkg/services/policer/process.go +++ b/pkg/services/policer/process.go @@ -61,7 +61,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) { if err != nil && !skipMap.addSeenError(addr.Address.Container(), err) { p.log.Error(ctx, logs.PolicerUnableToProcessObj, zap.Stringer("object", addr.Address), - zap.String("error", err.Error())) + zap.Error(err)) } p.cache.Add(addr.Address, time.Now()) p.objsInWork.remove(addr.Address) diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go index 2120312f6..69395bb02 100644 --- a/pkg/services/replicator/process.go +++ b/pkg/services/replicator/process.go @@ -76,7 +76,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T if err != nil { log.Error(ctx, logs.ReplicatorCouldNotReplicateObject, - zap.String("error", err.Error()), + zap.Error(err), ) } else { log.Debug(ctx, logs.ReplicatorObjectSuccessfullyReplicated) diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index 0c5bde078..e0085d73a 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -59,7 +59,7 @@ func (s *Service) localReplicationWorker(ctx context.Context) { err := s.forest.TreeApply(ctx, op.cid, op.treeID, &op.Move, false) if err != nil { s.log.Error(ctx, logs.TreeFailedToApplyReplicatedOperation, - zap.String("err", err.Error())) + zap.Error(err)) } span.End() } @@ -155,7 +155,7 @@ func (s *Service) replicateLoop(ctx context.Context) { err := s.replicate(op) if err != nil { s.log.Error(ctx, logs.TreeErrorDuringReplication, - zap.String("err", err.Error()), + zap.Error(err), zap.Stringer("cid", op.cid), zap.String("treeID", op.treeID)) } From 7ac354271462ec6ec59d5d02f2e4cfad0c693572 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 16 Dec 2024 13:59:08 +0300 Subject: [PATCH 275/591] [#1563] ape: Introduce `ChainRouterError` error type Signed-off-by: Airat Arifullin --- pkg/services/common/ape/checker.go | 2 +- pkg/services/common/ape/error.go | 33 ++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 pkg/services/common/ape/error.go diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go index 30580da12..86021c3db 100644 --- a/pkg/services/common/ape/checker.go +++ b/pkg/services/common/ape/checker.go @@ -103,7 +103,7 @@ func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error { if found && status == apechain.Allow { return nil } - return fmt.Errorf("access to operation %s is denied by access policy engine: %s", prm.Request.Operation(), status.String()) + return newChainRouterError(prm.Request.Operation(), status) } // isValidBearer checks whether bearer token was correctly signed by authorized diff --git a/pkg/services/common/ape/error.go b/pkg/services/common/ape/error.go new file mode 100644 index 000000000..d3c381de7 --- /dev/null +++ b/pkg/services/common/ape/error.go @@ -0,0 +1,33 @@ +package ape + +import ( + "fmt" + + apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" +) + +// ChainRouterError is returned when chain router validation prevents +// the APE request from being processed (no rule found, access denied, etc.). +type ChainRouterError struct { + operation string + status apechain.Status +} + +func (e *ChainRouterError) Error() string { + return fmt.Sprintf("access to operation %s is denied by access policy engine: %s", e.Operation(), e.Status()) +} + +func (e *ChainRouterError) Operation() string { + return e.operation +} + +func (e *ChainRouterError) Status() apechain.Status { + return e.status +} + +func newChainRouterError(operation string, status apechain.Status) *ChainRouterError { + return &ChainRouterError{ + operation: operation, + status: status, + } +} From 1a091ea7bbc2c1f01bd3026b7be338261338a4d9 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 16 Dec 2024 14:10:18 +0300 Subject: [PATCH 276/591] [#1563] object: Wrap only `ChainRouterError` erros with `ObjectAccessDenied` * Such wrapping helps to differentiate logical check errors and server internal errors. Signed-off-by: Airat Arifullin --- pkg/services/object/ape/errors.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go index 1b2024ed5..6e458b384 100644 --- a/pkg/services/object/ape/errors.go +++ b/pkg/services/object/ape/errors.go @@ -1,10 +1,19 @@ package ape import ( + "errors" + + checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" ) func toStatusErr(err error) error { + var chRouterErr *checkercore.ChainRouterError + if !errors.As(err, &chRouterErr) { + errServerInternal := &apistatus.ServerInternal{} + apistatus.WriteInternalServerErr(errServerInternal, err) + return errServerInternal + } errAccessDenied := &apistatus.ObjectAccessDenied{} errAccessDenied.WriteReason("ape denied request: " + err.Error()) return errAccessDenied From 6e82661c3556fa64ee11310258c7b980870a649e Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 16 Dec 2024 14:17:37 +0300 Subject: [PATCH 277/591] [#1563] tree: Wrap only `ChainRouterError` erros with `ObjectAccessDenied` * Such wrapping helps to differentiate logical check errors and server internal errors. Signed-off-by: Airat Arifullin --- pkg/services/tree/signature.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index 80f5b3590..b0f00615a 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -9,6 +9,7 @@ import ( "fmt" core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -70,6 +71,12 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, } func apeErr(err error) error { + var chRouterErr *checkercore.ChainRouterError + if !errors.As(err, &chRouterErr) { + errServerInternal := &apistatus.ServerInternal{} + apistatus.WriteInternalServerErr(errServerInternal, err) + return errServerInternal + } errAccessDenied := &apistatus.ObjectAccessDenied{} errAccessDenied.WriteReason(err.Error()) return errAccessDenied From b6c8ebf4930174367a08ff546fb2354162fc6d4f Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Thu, 12 Dec 2024 17:46:30 +0300 Subject: [PATCH 278/591] [#1453] container: Replace `sort.Slice` with `slices.SortFunc` * Replaced `sort.Slice` with `slices.SortFunc` in `ListContainersRes.SortedIDList()` as it is a bit faster, according to 15102e6dfd. Signed-off-by: Ekaterina Lebedeva --- cmd/frostfs-cli/internal/client/client.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index 948d61f36..851cf5ccc 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -9,7 +9,6 @@ import ( "io" "os" "slices" - "sort" "strings" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting" @@ -78,9 +77,8 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain // SortedIDList returns sorted list of identifiers of user's containers. func (x ListContainersRes) SortedIDList() []cid.ID { list := x.cliRes.Containers() - sort.Slice(list, func(i, j int) bool { - lhs, rhs := list[i].EncodeToString(), list[j].EncodeToString() - return strings.Compare(lhs, rhs) < 0 + slices.SortFunc(list, func(lhs, rhs cid.ID) int { + return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString()) }) return list } From df05057ed46632e7746fcaa26731987a9070b2e5 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Mon, 28 Oct 2024 18:10:07 +0300 Subject: [PATCH 279/591] [#1452] container: Add ListStream method * Added new method for listing containers to container service. It opens stream and sends containers in batches. * Added TransportSplitter wrapper around ExecutionService to split container ID list read from contract in parts that are smaller than grpc max message size. Batch size can be changed in node configuration file (as in example config file). * Changed `container list` implementaion in cli: now ListStream is called by default. Old List is called only if ListStream is not implemented. * Changed `internalclient.ListContainersPrm`.`Account` to `OwnerID` since `client.PrmContainerList`.`Account` was renamed to `OwnerID` in sdk. Signed-off-by: Ekaterina Lebedeva --- cmd/frostfs-cli/internal/client/client.go | 19 ++++ cmd/frostfs-cli/modules/container/list.go | 77 ++++++++++------ cmd/frostfs-node/config.go | 7 +- .../config/container/container.go | 27 ++++++ .../config/container/container_test.go | 27 ++++++ cmd/frostfs-node/container.go | 6 +- config/example/node.env | 3 + config/example/node.json | 5 + config/example/node.yaml | 4 + go.mod | 2 +- go.sum | 4 +- .../transport/container/grpc/service.go | 23 +++++ pkg/services/container/ape.go | 73 +++++++++++++++ pkg/services/container/ape_test.go | 5 + pkg/services/container/audit.go | 11 +++ pkg/services/container/executor.go | 9 ++ pkg/services/container/morph/executor.go | 32 +++++++ pkg/services/container/server.go | 8 ++ pkg/services/container/sign.go | 37 ++++++++ pkg/services/container/transport_splitter.go | 92 +++++++++++++++++++ 20 files changed, 435 insertions(+), 36 deletions(-) create mode 100644 cmd/frostfs-node/config/container/container.go create mode 100644 cmd/frostfs-node/config/container/container_test.go create mode 100644 pkg/services/container/transport_splitter.go diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index 851cf5ccc..ceae36ae7 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -83,6 +83,25 @@ func (x ListContainersRes) SortedIDList() []cid.ID { return list } +func ListContainersStream(ctx context.Context, prm ListContainersPrm, processCnr func(id cid.ID) bool) (err error) { + cliPrm := &client.PrmContainerListStream{ + XHeaders: prm.XHeaders, + OwnerID: prm.OwnerID, + Session: prm.Session, + } + rdr, err := prm.cli.ContainerListInit(ctx, *cliPrm) + if err != nil { + return fmt.Errorf("init container list: %w", err) + } + + err = rdr.Iterate(processCnr) + if err != nil { + return fmt.Errorf("read container list: %w", err) + } + + return +} + // PutContainerPrm groups parameters of PutContainer operation. type PutContainerPrm struct { Client *client.Client diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go index f01e4db4d..bbb8da840 100644 --- a/cmd/frostfs-cli/modules/container/list.go +++ b/cmd/frostfs-cli/modules/container/list.go @@ -6,8 +6,11 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // flags of list command. @@ -51,44 +54,60 @@ var listContainersCmd = &cobra.Command{ var prm internalclient.ListContainersPrm prm.SetClient(cli) - prm.Account = idUser - - res, err := internalclient.ListContainers(cmd.Context(), prm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - + prm.OwnerID = idUser prmGet := internalclient.GetContainerPrm{ Client: cli, } + var containerIDs []cid.ID + + err := internalclient.ListContainersStream(cmd.Context(), prm, func(id cid.ID) bool { + printContainer(cmd, prmGet, id) + return false + }) + if err == nil { + return + } + + if e, ok := status.FromError(err); ok && e.Code() == codes.Unimplemented { + res, err := internalclient.ListContainers(cmd.Context(), prm) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + containerIDs = res.SortedIDList() + } else { + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + } - containerIDs := res.SortedIDList() for _, cnrID := range containerIDs { - if flagVarListName == "" && !flagVarListPrintAttr { - cmd.Println(cnrID.String()) - continue - } - - prmGet.ClientParams.ContainerID = &cnrID - res, err := internalclient.GetContainer(cmd.Context(), prmGet) - if err != nil { - cmd.Printf(" failed to read attributes: %v\n", err) - continue - } - - cnr := res.Container() - if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName { - continue - } - cmd.Println(cnrID.String()) - - if flagVarListPrintAttr { - cnr.IterateUserAttributes(func(key, val string) { - cmd.Printf(" %s: %s\n", key, val) - }) - } + printContainer(cmd, prmGet, cnrID) } }, } +func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, id cid.ID) { + if flagVarListName == "" && !flagVarListPrintAttr { + cmd.Println(id.String()) + return + } + + prmGet.ClientParams.ContainerID = &id + res, err := internalclient.GetContainer(cmd.Context(), prmGet) + if err != nil { + cmd.Printf(" failed to read attributes: %v\n", err) + return + } + + cnr := res.Container() + if cnrName := containerSDK.Name(cnr); flagVarListName != "" && cnrName != flagVarListName { + return + } + cmd.Println(id.String()) + + if flagVarListPrintAttr { + cnr.IterateUserAttributes(func(key, val string) { + cmd.Printf(" %s: %s\n", key, val) + }) + } +} + func initContainerListContainersCmd() { commonflags.Init(listContainersCmd) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 6950c9e24..40be8f45f 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -606,9 +606,10 @@ type cfgAccounting struct { type cfgContainer struct { scriptHash neogoutil.Uint160 - parsers map[event.Type]event.NotificationParser - subscribers map[event.Type][]event.Handler - workerPool util.WorkerPool // pool for asynchronous handlers + parsers map[event.Type]event.NotificationParser + subscribers map[event.Type][]event.Handler + workerPool util.WorkerPool // pool for asynchronous handlers + containerBatchSize uint32 } type cfgFrostfsID struct { diff --git a/cmd/frostfs-node/config/container/container.go b/cmd/frostfs-node/config/container/container.go new file mode 100644 index 000000000..b0b8043d6 --- /dev/null +++ b/cmd/frostfs-node/config/container/container.go @@ -0,0 +1,27 @@ +package containerconfig + +import "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + +const ( + subsection = "container" + listStreamSubsection = "list_stream" + + // ContainerBatchSizeDefault represents he maximum amount of containers to send via stream at once. + ContainerBatchSizeDefault = 1000 +) + +// ContainerBatchSize returns the value of "batch_size" config parameter +// from "list_stream" subsection of "container" section. +// +// Returns ContainerBatchSizeDefault if the value is missing or if +// the value is not positive integer. +func ContainerBatchSize(c *config.Config) uint32 { + if c.Sub(subsection).Sub(listStreamSubsection).Value("batch_size") == nil { + return ContainerBatchSizeDefault + } + size := config.Uint32Safe(c.Sub(subsection).Sub(listStreamSubsection), "batch_size") + if size == 0 { + return ContainerBatchSizeDefault + } + return size +} diff --git a/cmd/frostfs-node/config/container/container_test.go b/cmd/frostfs-node/config/container/container_test.go new file mode 100644 index 000000000..744cd3295 --- /dev/null +++ b/cmd/frostfs-node/config/container/container_test.go @@ -0,0 +1,27 @@ +package containerconfig_test + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "github.com/stretchr/testify/require" +) + +func TestContainerSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + empty := configtest.EmptyConfig() + require.Equal(t, uint32(containerconfig.ContainerBatchSizeDefault), containerconfig.ContainerBatchSize(empty)) + }) + + const path = "../../../../config/example/node" + fileConfigTest := func(c *config.Config) { + require.Equal(t, uint32(500), containerconfig.ContainerBatchSize(c)) + } + + configtest.ForEachFileType(path, fileConfigTest) + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) +} diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index 7d558dacb..fb2550a03 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -5,6 +5,7 @@ import ( "context" "net" + containerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/container" morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" @@ -47,6 +48,7 @@ func initContainerService(_ context.Context, c *cfg) { } c.shared.frostfsidClient = frostfsIDSubjectProvider + c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg) defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides( c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), @@ -56,7 +58,9 @@ func initContainerService(_ context.Context, c *cfg) { &c.key.PrivateKey, containerService.NewAPEServer(defaultChainRouter, cnrRdr, newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient, - containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc), + containerService.NewSplitterService( + c.cfgContainer.containerBatchSize, c.respSvc, + containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)), ), ) service = containerService.NewAuditService(service, c.log, c.audit) diff --git a/config/example/node.env b/config/example/node.env index f470acf3e..b2a0633a9 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -83,6 +83,9 @@ FROSTFS_POLICER_HEAD_TIMEOUT=15s FROSTFS_REPLICATOR_PUT_TIMEOUT=15s FROSTFS_REPLICATOR_POOL_SIZE=10 +# Container service section +FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500 + # Object service section FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100 FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200 diff --git a/config/example/node.json b/config/example/node.json index dba3bad8b..f3192ac2f 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -124,6 +124,11 @@ "pool_size": 10, "put_timeout": "15s" }, + "container": { + "list_stream": { + "batch_size": "500" + } + }, "object": { "delete": { "tombstone_lifetime": 10 diff --git a/config/example/node.yaml b/config/example/node.yaml index 8f9300b4a..a179b4704 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -108,6 +108,10 @@ replicator: put_timeout: 15s # timeout for the Replicator PUT remote operation pool_size: 10 # maximum amount of concurrent replications +container: + list_stream: + batch_size: 500 # container_batch_size is the maximum amount of containers to send via stream at once + object: delete: tombstone_lifetime: 10 # tombstone "local" lifetime in epochs diff --git a/go.mod b/go.mod index 6ac37d343..8f4053872 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241206094944-81c423e7094d + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241210104938-c4463df8d467 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 diff --git a/go.sum b/go.sum index e084c2445..d63396202 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241206094944-81c423e7094d h1:FpXI+mOrmJk3t2MKQFZuhLjCHDyDeo5rtP1WXl7gUWc= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241206094944-81c423e7094d/go.mod h1:eoK7+KZQ9GJxbzIs6vTnoUJqFDppavInLRHaN4MYgZg= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241210104938-c4463df8d467 h1:MH9uHZFZNyUCL+YChiDcVeXPjhTDcFDeoGr8Mc8NY9M= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241210104938-c4463df8d467/go.mod h1:eoK7+KZQ9GJxbzIs6vTnoUJqFDppavInLRHaN4MYgZg= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go index 49d083a90..8cbf8d9c3 100644 --- a/pkg/network/transport/container/grpc/service.go +++ b/pkg/network/transport/container/grpc/service.go @@ -80,3 +80,26 @@ func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*con return resp.ToGRPCMessage().(*containerGRPC.ListResponse), nil } + +type containerStreamerV2 struct { + containerGRPC.ContainerService_ListStreamServer +} + +func (s *containerStreamerV2) Send(resp *container.ListStreamResponse) error { + return s.ContainerService_ListStreamServer.Send( + resp.ToGRPCMessage().(*containerGRPC.ListStreamResponse), + ) +} + +// ListStream converts gRPC ListRequest message and server-side stream and overtakes its data +// to gRPC stream. +func (s *Server) ListStream(req *containerGRPC.ListStreamRequest, gStream containerGRPC.ContainerService_ListStreamServer) error { + listReq := new(container.ListStreamRequest) + if err := listReq.FromGRPCMessage(req); err != nil { + return err + } + + return s.srv.ListStream(listReq, &containerStreamerV2{ + ContainerService_ListStreamServer: gStream, + }) +} diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go index 2cdb30b45..493452fa6 100644 --- a/pkg/services/container/ape.go +++ b/pkg/services/container/ape.go @@ -175,6 +175,79 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co return nil, apeErr(nativeschema.MethodListContainers, s) } +func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListStream) error { + ctx, span := tracing.StartSpanFromContext(stream.Context(), "apeChecker.ListStream") + defer span.End() + + role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) + if err != nil { + return err + } + + reqProps := map[string]string{ + nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()), + nativeschema.PropertyKeyActorRole: role, + } + + reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) + if err != nil { + return err + } + if p, ok := peer.FromContext(ctx); ok { + if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok { + reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String() + } + } + + namespace, err := ac.namespaceByOwner(req.GetBody().GetOwnerID()) + if err != nil { + return fmt.Errorf("could not get owner namespace: %w", err) + } + if err := ac.validateNamespaceByPublicKey(pk, namespace); err != nil { + return err + } + + request := aperequest.NewRequest( + nativeschema.MethodListContainers, + aperequest.NewResource( + resourceName(namespace, ""), + make(map[string]string), + ), + reqProps, + ) + + groups, err := aperequest.Groups(ac.frostFSIDClient, pk) + if err != nil { + return fmt.Errorf("failed to get group ids: %w", err) + } + + // Policy contract keeps group related chains as namespace-group pair. + for i := range groups { + groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i]) + } + + rt := policyengine.NewRequestTargetWithNamespace(namespace) + rt.User = &policyengine.Target{ + Type: policyengine.User, + Name: fmt.Sprintf("%s:%s", namespace, pk.Address()), + } + rt.Groups = make([]policyengine.Target, len(groups)) + for i := range groups { + rt.Groups[i] = policyengine.GroupTarget(groups[i]) + } + + s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request) + if err != nil { + return err + } + + if found && s == apechain.Allow { + return ac.next.ListStream(req, stream) + } + + return apeErr(nativeschema.MethodListContainers, s) +} + func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Put") defer span.End() diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go index b6b42a559..513ffff02 100644 --- a/pkg/services/container/ape_test.go +++ b/pkg/services/container/ape_test.go @@ -1079,6 +1079,11 @@ func (s *srvStub) List(context.Context, *container.ListRequest) (*container.List return &container.ListResponse{}, nil } +func (s *srvStub) ListStream(*container.ListStreamRequest, ListStream) error { + s.calls["ListStream"]++ + return nil +} + func (s *srvStub) Put(context.Context, *container.PutRequest) (*container.PutResponse, error) { s.calls["Put"]++ return &container.PutResponse{}, nil diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go index 411eb4863..b235efa3c 100644 --- a/pkg/services/container/audit.go +++ b/pkg/services/container/audit.go @@ -63,6 +63,17 @@ func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*c return res, err } +// ListStream implements Server. +func (a *auditService) ListStream(req *container.ListStreamRequest, stream ListStream) error { + err := a.next.ListStream(req, stream) + if !a.enabled.Load() { + return err + } + audit.LogRequest(stream.Context(), a.log, container_grpc.ContainerService_ListStream_FullMethodName, req, + audit.TargetFromRef(req.GetBody().GetOwnerID(), &user.ID{}), err == nil) + return err +} + // Put implements Server. func (a *auditService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { res, err := a.next.Put(ctx, req) diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go index 70234d3de..cdd0d2514 100644 --- a/pkg/services/container/executor.go +++ b/pkg/services/container/executor.go @@ -14,6 +14,7 @@ type ServiceExecutor interface { Delete(context.Context, *session.Token, *container.DeleteRequestBody) (*container.DeleteResponseBody, error) Get(context.Context, *container.GetRequestBody) (*container.GetResponseBody, error) List(context.Context, *container.ListRequestBody) (*container.ListResponseBody, error) + ListStream(context.Context, *container.ListStreamRequest, ListStream) error } type executorSvc struct { @@ -93,3 +94,11 @@ func (s *executorSvc) List(ctx context.Context, req *container.ListRequest) (*co s.respSvc.SetMeta(resp) return resp, nil } + +func (s *executorSvc) ListStream(req *container.ListStreamRequest, stream ListStream) error { + err := s.exec.ListStream(stream.Context(), req, stream) + if err != nil { + return fmt.Errorf("could not execute ListStream request: %w", err) + } + return nil +} diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go index 211f469f3..e9d1606f1 100644 --- a/pkg/services/container/morph/executor.go +++ b/pkg/services/container/morph/executor.go @@ -200,3 +200,35 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) return res, nil } + +func (s *morphExecutor) ListStream(_ context.Context, req *container.ListStreamRequest, stream containerSvc.ListStream) error { + body := req.GetBody() + idV2 := body.GetOwnerID() + if idV2 == nil { + return errMissingUserID + } + + var id user.ID + + err := id.ReadFromV2(*idV2) + if err != nil { + return fmt.Errorf("invalid user ID: %w", err) + } + + cnrs, err := s.rdr.ContainersOf(&id) + if err != nil { + return err + } + + cidList := make([]refs.ContainerID, len(cnrs)) + for i := range cnrs { + cnrs[i].WriteToV2(&cidList[i]) + } + + resBody := new(container.ListStreamResponseBody) + resBody.SetContainerIDs(cidList) + r := new(container.ListStreamResponse) + r.SetBody(resBody) + + return stream.Send(r) +} diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go index 78fd3d34c..d9208077d 100644 --- a/pkg/services/container/server.go +++ b/pkg/services/container/server.go @@ -3,6 +3,7 @@ package container import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" ) @@ -12,4 +13,11 @@ type Server interface { Get(context.Context, *container.GetRequest) (*container.GetResponse, error) Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error) List(context.Context, *container.ListRequest) (*container.ListResponse, error) + ListStream(*container.ListStreamRequest, ListStream) error +} + +// ListStream is an interface of FrostFS API v2 compatible search streamer. +type ListStream interface { + util.ServerStream + Send(*container.ListStreamResponse) error } diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go index c478c0e1c..85fe7ae87 100644 --- a/pkg/services/container/sign.go +++ b/pkg/services/container/sign.go @@ -56,3 +56,40 @@ func (s *signService) List(ctx context.Context, req *container.ListRequest) (*co resp, err := util.EnsureNonNilResponse(s.svc.List(ctx, req)) return resp, s.sigSvc.SignResponse(resp, err) } + +func (s *signService) ListStream(req *container.ListStreamRequest, stream ListStream) error { + if err := s.sigSvc.VerifyRequest(req); err != nil { + resp := new(container.ListStreamResponse) + _ = s.sigSvc.SignResponse(resp, err) + return stream.Send(resp) + } + + ss := &listStreamSigner{ + ListStream: stream, + sigSvc: s.sigSvc, + } + err := s.svc.ListStream(req, ss) + if err != nil || !ss.nonEmptyResp { + return ss.send(new(container.ListStreamResponse), err) + } + return nil +} + +type listStreamSigner struct { + ListStream + sigSvc *util.SignService + + nonEmptyResp bool // set on first Send call +} + +func (s *listStreamSigner) Send(resp *container.ListStreamResponse) error { + s.nonEmptyResp = true + return s.send(resp, nil) +} + +func (s *listStreamSigner) send(resp *container.ListStreamResponse, err error) error { + if err := s.sigSvc.SignResponse(resp, err); err != nil { + return err + } + return s.ListStream.Send(resp) +} diff --git a/pkg/services/container/transport_splitter.go b/pkg/services/container/transport_splitter.go new file mode 100644 index 000000000..4f8708da7 --- /dev/null +++ b/pkg/services/container/transport_splitter.go @@ -0,0 +1,92 @@ +package container + +import ( + "context" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" +) + +type ( + TransportSplitter struct { + next Server + + respSvc *response.Service + cnrAmount uint32 + } + + listStreamMsgSizeCtrl struct { + util.ServerStream + stream ListStream + respSvc *response.Service + cnrAmount uint32 + } +) + +func NewSplitterService(cnrAmount uint32, respSvc *response.Service, next Server) Server { + return &TransportSplitter{ + next: next, + respSvc: respSvc, + cnrAmount: cnrAmount, + } +} + +func (s *TransportSplitter) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { + return s.next.Put(ctx, req) +} + +func (s *TransportSplitter) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) { + return s.next.Delete(ctx, req) +} + +func (s *TransportSplitter) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) { + return s.next.Get(ctx, req) +} + +func (s *TransportSplitter) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) { + return s.next.List(ctx, req) +} + +func (s *TransportSplitter) ListStream(req *container.ListStreamRequest, stream ListStream) error { + return s.next.ListStream(req, &listStreamMsgSizeCtrl{ + ServerStream: stream, + stream: stream, + respSvc: s.respSvc, + cnrAmount: s.cnrAmount, + }) +} + +func (s *listStreamMsgSizeCtrl) Send(resp *container.ListStreamResponse) error { + s.respSvc.SetMeta(resp) + body := resp.GetBody() + ids := body.GetContainerIDs() + + var newResp *container.ListStreamResponse + + for { + if newResp == nil { + newResp = new(container.ListStreamResponse) + newResp.SetBody(body) + } + + cut := min(s.cnrAmount, uint32(len(ids))) + + body.SetContainerIDs(ids[:cut]) + newResp.SetMetaHeader(resp.GetMetaHeader()) + newResp.SetVerificationHeader(resp.GetVerificationHeader()) + + if err := s.stream.Send(newResp); err != nil { + return fmt.Errorf("TransportSplitter: %w", err) + } + + ids = ids[cut:] + + if len(ids) == 0 { + break + } + } + + return nil +} From bed49e6ace1b975e0c0d4a53d82540ad35e90bf3 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 18 Dec 2024 10:01:19 +0300 Subject: [PATCH 280/591] [#1569] cli: Make `--range` flag required in `object hash` Previously, `object head` was used if no range was provided. This is wrong on multiple levels: 1. We print an error if the checksum is missing in header, even though taking hash is possible. 2. We silently ignore --salt parameter. 3. `--range` is required for Object.RANGEHASH RPC, custom logic for one specific usecase has no value. So we make it required and make CLI command follow more closely the FrostFS API. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-cli/modules/object/hash.go | 31 ++------------------------ 1 file changed, 2 insertions(+), 29 deletions(-) diff --git a/cmd/frostfs-cli/modules/object/hash.go b/cmd/frostfs-cli/modules/object/hash.go index 26243e7e7..b18fab5e0 100644 --- a/cmd/frostfs-cli/modules/object/hash.go +++ b/cmd/frostfs-cli/modules/object/hash.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/spf13/cobra" @@ -43,6 +42,8 @@ func initObjectHashCmd() { _ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag) flags.String("range", "", "Range to take hash from in the form offset1:length1,...") + _ = objectHashCmd.MarkFlagRequired("range") + flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'") flags.String(getRangeHashSaltFlag, "", "Salt in hex format") } @@ -67,34 +68,6 @@ func getObjectHash(cmd *cobra.Command, _ []string) { cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) tz := typ == hashTz - fullHash := len(ranges) == 0 - if fullHash { - var headPrm internalclient.HeadObjectPrm - headPrm.SetClient(cli) - Prepare(cmd, &headPrm) - headPrm.SetAddress(objAddr) - - // get hash of full payload through HEAD (may be user can do it through dedicated command?) - res, err := internalclient.HeadObject(cmd.Context(), headPrm) - commonCmd.ExitOnErr(cmd, "rpc error: %w", err) - - var cs checksum.Checksum - var csSet bool - - if tz { - cs, csSet = res.Header().PayloadHomomorphicHash() - } else { - cs, csSet = res.Header().PayloadChecksum() - } - - if csSet { - cmd.Println(hex.EncodeToString(cs.Value())) - } else { - cmd.Println("Missing checksum in object header.") - } - - return - } var hashPrm internalclient.HashPayloadRangesPrm hashPrm.SetClient(cli) From e44b84c18c428a691b36ec100a41cfa100e4ad8a Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 18 Dec 2024 10:07:22 +0300 Subject: [PATCH 281/591] [#1569] cli: Remove unnecessary variable after refactoring Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-cli/modules/object/hash.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cmd/frostfs-cli/modules/object/hash.go b/cmd/frostfs-cli/modules/object/hash.go index b18fab5e0..461c35f30 100644 --- a/cmd/frostfs-cli/modules/object/hash.go +++ b/cmd/frostfs-cli/modules/object/hash.go @@ -67,8 +67,6 @@ func getObjectHash(cmd *cobra.Command, _ []string) { pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - tz := typ == hashTz - var hashPrm internalclient.HashPayloadRangesPrm hashPrm.SetClient(cli) Prepare(cmd, &hashPrm) @@ -77,7 +75,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) { hashPrm.SetSalt(salt) hashPrm.SetRanges(ranges) - if tz { + if typ == hashTz { hashPrm.TZ() } From bd0197eaa8579ac4aa54bc2ebc7b38a610a38aa2 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 18 Dec 2024 09:38:14 +0300 Subject: [PATCH 282/591] [#1568] storage: Remove "could not/can't/failed to" from error messages Signed-off-by: Evgenii Stratonikov --- .../blobovnicza/control.go | 4 ++-- .../blobovnicza/iterate.go | 2 +- .../blobstor/blobovniczatree/get.go | 4 ++-- .../blobstor/blobovniczatree/get_range.go | 4 ++-- .../blobstor/blobovniczatree/iterate.go | 4 ++-- .../blobstor/blobovniczatree/manager.go | 6 ++--- .../blobstor/fstree/fstree.go | 4 ++-- .../blobstor/memstore/memstore.go | 4 ++-- pkg/local_object_storage/blobstor/mode.go | 2 +- pkg/local_object_storage/blobstor/put.go | 2 +- pkg/local_object_storage/engine/control.go | 10 ++++---- pkg/local_object_storage/engine/evacuate.go | 2 +- pkg/local_object_storage/engine/shards.go | 12 +++++----- pkg/local_object_storage/metabase/control.go | 12 +++++----- pkg/local_object_storage/metabase/counter.go | 22 ++++++++--------- pkg/local_object_storage/metabase/delete.go | 24 +++++++++---------- pkg/local_object_storage/metabase/exists.go | 2 +- pkg/local_object_storage/metabase/get.go | 2 +- .../metabase/graveyard.go | 6 ++--- pkg/local_object_storage/metabase/inhume.go | 2 +- pkg/local_object_storage/metabase/mode.go | 4 ++-- pkg/local_object_storage/metabase/put.go | 20 ++++++++-------- pkg/local_object_storage/metabase/select.go | 2 +- pkg/local_object_storage/metabase/shard_id.go | 8 +++---- pkg/local_object_storage/metabase/upgrade.go | 8 +++---- pkg/local_object_storage/metabase/util.go | 4 ++-- pkg/local_object_storage/metabase/version.go | 2 +- pkg/local_object_storage/pilorama/boltdb.go | 10 ++++---- pkg/local_object_storage/shard/container.go | 4 ++-- pkg/local_object_storage/shard/control.go | 22 ++++++++--------- pkg/local_object_storage/shard/get.go | 2 +- pkg/local_object_storage/shard/id.go | 4 ++-- pkg/local_object_storage/shard/list.go | 12 +++++----- pkg/local_object_storage/shard/put.go | 4 ++-- pkg/local_object_storage/shard/select.go | 2 +- .../writecache/iterate.go | 2 +- pkg/local_object_storage/writecache/mode.go | 8 +++---- .../writecache/storage.go | 4 ++-- .../writecache/upgrade.go | 4 ++-- 39 files changed, 128 insertions(+), 128 deletions(-) diff --git a/pkg/local_object_storage/blobovnicza/control.go b/pkg/local_object_storage/blobovnicza/control.go index d0e71a876..4947512cc 100644 --- a/pkg/local_object_storage/blobovnicza/control.go +++ b/pkg/local_object_storage/blobovnicza/control.go @@ -129,7 +129,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error { }) }) if err != nil { - return fmt.Errorf("can't determine DB size: %w", err) + return fmt.Errorf("determine DB size: %w", err) } if (!sizeExists || !itemsCountExists) && !b.boltOptions.ReadOnly { b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMeta, zap.Uint64("size", size), zap.Uint64("items", items)) @@ -140,7 +140,7 @@ func (b *Blobovnicza) initializeCounters(ctx context.Context) error { return saveItemsCount(tx, items) }); err != nil { b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaFailed, zap.Uint64("size", size), zap.Uint64("items", items)) - return fmt.Errorf("can't save blobovnicza's size and items count: %w", err) + return fmt.Errorf("save blobovnicza's size and items count: %w", err) } b.log.Debug(ctx, logs.BlobovniczaSavingCountersToMetaSuccess, zap.Uint64("size", size), zap.Uint64("items", items)) } diff --git a/pkg/local_object_storage/blobovnicza/iterate.go b/pkg/local_object_storage/blobovnicza/iterate.go index 01e5529da..cd33b263c 100644 --- a/pkg/local_object_storage/blobovnicza/iterate.go +++ b/pkg/local_object_storage/blobovnicza/iterate.go @@ -146,7 +146,7 @@ func (b *Blobovnicza) Iterate(ctx context.Context, prm IteratePrm) (IterateRes, if prm.ignoreErrors { return nil } - return fmt.Errorf("could not decode address key: %w", err) + return fmt.Errorf("decode address key: %w", err) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go index 1a4f11c29..5d158644e 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go @@ -115,13 +115,13 @@ func (b *Blobovniczas) getObject(ctx context.Context, blz *blobovnicza.Blobovnic // decompress the data data, err := b.compression.Decompress(res.Object()) if err != nil { - return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err) + return common.GetRes{}, fmt.Errorf("decompress object data: %w", err) } // unmarshal the object obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err) } return common.GetRes{Object: obj, RawData: data}, nil diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go index 6d06b8e6f..84b9bc55f 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go @@ -130,13 +130,13 @@ func (b *Blobovniczas) getObjectRange(ctx context.Context, blz *blobovnicza.Blob // decompress the data data, err := b.compression.Decompress(res.Object()) if err != nil { - return common.GetRangeRes{}, fmt.Errorf("could not decompress object data: %w", err) + return common.GetRangeRes{}, fmt.Errorf("decompress object data: %w", err) } // unmarshal the object obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - return common.GetRangeRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + return common.GetRangeRes{}, fmt.Errorf("unmarshal the object: %w", err) } from := prm.Range.GetOffset() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go index a710cf988..5c2d58ca1 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go @@ -49,7 +49,7 @@ func (b *Blobovniczas) Iterate(ctx context.Context, prm common.IteratePrm) (comm zap.String("root_path", b.rootPath)) return nil } - return fmt.Errorf("could not decompress object data: %w", err) + return fmt.Errorf("decompress object data: %w", err) } if prm.Handler != nil { @@ -82,7 +82,7 @@ func (b *Blobovniczas) iterateBlobovniczas(ctx context.Context, ignoreErrors boo zap.String("root_path", b.rootPath)) return false, nil } - return false, fmt.Errorf("could not open blobovnicza %s: %w", p, err) + return false, fmt.Errorf("open blobovnicza %s: %w", p, err) } defer shBlz.Close(ctx) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go index 7d44aa5c6..f2f9509ad 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go @@ -69,10 +69,10 @@ func (b *sharedDB) Open(ctx context.Context) (*blobovnicza.Blobovnicza, error) { )...) if err := blz.Open(ctx); err != nil { - return nil, fmt.Errorf("could not open blobovnicza %s: %w", b.path, err) + return nil, fmt.Errorf("open blobovnicza %s: %w", b.path, err) } if err := blz.Init(ctx); err != nil { - return nil, fmt.Errorf("could not init blobovnicza %s: %w", b.path, err) + return nil, fmt.Errorf("init blobovnicza %s: %w", b.path, err) } b.refCount++ @@ -127,7 +127,7 @@ func (b *sharedDB) CloseAndRemoveFile(ctx context.Context) error { zap.String("id", b.path), zap.Error(err), ) - return fmt.Errorf("failed to close blobovnicza (path = %s): %w", b.path, err) + return fmt.Errorf("close blobovnicza (path = %s): %w", b.path, err) } b.refCount = 0 diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go index a77ad2f93..031b385b2 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree.go @@ -538,7 +538,7 @@ func (t *FSTree) countFiles() (uint64, uint64, error) { }, ) if err != nil { - return 0, 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err) + return 0, 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err) } return count, size, nil @@ -577,7 +577,7 @@ func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) { }, ) if err != nil { - return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err) + return 0, fmt.Errorf("walk through %s directory: %w", t.RootPath, err) } success = true return result, nil diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go index 0252c7983..3afef7d18 100644 --- a/pkg/local_object_storage/blobstor/memstore/memstore.go +++ b/pkg/local_object_storage/blobstor/memstore/memstore.go @@ -47,13 +47,13 @@ func (s *memstoreImpl) Get(_ context.Context, req common.GetPrm) (common.GetRes, // Decompress the data. var err error if data, err = s.compression.Decompress(data); err != nil { - return common.GetRes{}, fmt.Errorf("could not decompress object data: %w", err) + return common.GetRes{}, fmt.Errorf("decompress object data: %w", err) } // Unmarshal the SDK object. obj := objectSDK.New() if err := obj.Unmarshal(data); err != nil { - return common.GetRes{}, fmt.Errorf("could not unmarshal the object: %w", err) + return common.GetRes{}, fmt.Errorf("unmarshal the object: %w", err) } return common.GetRes{Object: obj, RawData: data}, nil diff --git a/pkg/local_object_storage/blobstor/mode.go b/pkg/local_object_storage/blobstor/mode.go index af19e398e..80268fa7a 100644 --- a/pkg/local_object_storage/blobstor/mode.go +++ b/pkg/local_object_storage/blobstor/mode.go @@ -27,7 +27,7 @@ func (b *BlobStor) SetMode(ctx context.Context, m mode.Mode) error { } } if err != nil { - return fmt.Errorf("can't set blobstor mode (old=%s, new=%s): %w", b.mode, m, err) + return fmt.Errorf("set blobstor mode (old=%s, new=%s): %w", b.mode, m, err) } b.mode = m diff --git a/pkg/local_object_storage/blobstor/put.go b/pkg/local_object_storage/blobstor/put.go index 342da28bf..fe9c109dd 100644 --- a/pkg/local_object_storage/blobstor/put.go +++ b/pkg/local_object_storage/blobstor/put.go @@ -52,7 +52,7 @@ func (b *BlobStor) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, e // marshal object data, err := prm.Object.Marshal() if err != nil { - return common.PutRes{}, fmt.Errorf("could not marshal the object: %w", err) + return common.PutRes{}, fmt.Errorf("marshal the object: %w", err) } prm.RawData = data } diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index bd9eb1021..6a416cfd9 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -95,7 +95,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { err := eg.Wait() close(errCh) if err != nil { - return fmt.Errorf("failed to initialize shards: %w", err) + return fmt.Errorf("initialize shards: %w", err) } for res := range errCh { @@ -117,7 +117,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { continue } - return fmt.Errorf("could not initialize shard %s: %w", res.id, res.err) + return fmt.Errorf("initialize shard %s: %w", res.id, res.err) } } @@ -320,7 +320,7 @@ loop: for _, newID := range shardsToAdd { sh, err := e.createShard(ctx, rcfg.shards[newID]) if err != nil { - return fmt.Errorf("could not add new shard with '%s' metabase path: %w", newID, err) + return fmt.Errorf("add new shard with '%s' metabase path: %w", newID, err) } idStr := sh.ID().String() @@ -331,13 +331,13 @@ loop: } if err != nil { _ = sh.Close(ctx) - return fmt.Errorf("could not init %s shard: %w", idStr, err) + return fmt.Errorf("init %s shard: %w", idStr, err) } err = e.addShard(sh) if err != nil { _ = sh.Close(ctx) - return fmt.Errorf("could not add %s shard: %w", idStr, err) + return fmt.Errorf("add %s shard: %w", idStr, err) } e.log.Info(ctx, logs.EngineAddedNewShard, zap.String("id", idStr)) diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index 2e0344bfb..623f5c941 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -578,7 +578,7 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, prm EvacuateShardPrm) (bool, string, error) { if prm.TreeHandler == nil { - return false, "", fmt.Errorf("failed to evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID()) + return false, "", fmt.Errorf("evacuate tree '%s' for container %s from shard %s: local evacuation failed, but no remote evacuation available", tree.TreeID, tree.CID, sh.ID()) } return prm.TreeHandler(ctx, tree.CID, tree.TreeID, sh) diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 898f685ec..6d4844b75 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -108,12 +108,12 @@ func (m *metricsWithID) SetEvacuationInProgress(value bool) { func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*shard.ID, error) { sh, err := e.createShard(ctx, opts) if err != nil { - return nil, fmt.Errorf("could not create a shard: %w", err) + return nil, fmt.Errorf("create a shard: %w", err) } err = e.addShard(sh) if err != nil { - return nil, fmt.Errorf("could not add %s shard: %w", sh.ID().String(), err) + return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err) } e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode()) @@ -124,7 +124,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh func (e *StorageEngine) createShard(ctx context.Context, opts []shard.Option) (*shard.Shard, error) { id, err := generateShardID() if err != nil { - return nil, fmt.Errorf("could not generate shard ID: %w", err) + return nil, fmt.Errorf("generate shard ID: %w", err) } opts = e.appendMetrics(id, opts) @@ -180,7 +180,7 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error { pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true)) if err != nil { - return fmt.Errorf("could not create pool: %w", err) + return fmt.Errorf("create pool: %w", err) } strID := sh.ID().String() @@ -374,7 +374,7 @@ func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedS zap.Error(err), ) multiErrGuard.Lock() - multiErr = errors.Join(multiErr, fmt.Errorf("could not change shard (id:%s) mode to disabled: %w", sh.ID(), err)) + multiErr = errors.Join(multiErr, fmt.Errorf("change shard (id:%s) mode to disabled: %w", sh.ID(), err)) multiErrGuard.Unlock() } @@ -385,7 +385,7 @@ func (e *StorageEngine) closeShards(ctx context.Context, deletedShards []hashedS zap.Error(err), ) multiErrGuard.Lock() - multiErr = errors.Join(multiErr, fmt.Errorf("could not close removed shard (id:%s): %w", sh.ID(), err)) + multiErr = errors.Join(multiErr, fmt.Errorf("close removed shard (id:%s): %w", sh.ID(), err)) multiErrGuard.Unlock() } return nil diff --git a/pkg/local_object_storage/metabase/control.go b/pkg/local_object_storage/metabase/control.go index 07fa7e9cf..c19c65224 100644 --- a/pkg/local_object_storage/metabase/control.go +++ b/pkg/local_object_storage/metabase/control.go @@ -54,7 +54,7 @@ func (db *DB) Open(ctx context.Context, m mode.Mode) error { func (db *DB) openDB(ctx context.Context, mode mode.Mode) error { err := util.MkdirAllX(filepath.Dir(db.info.Path), db.info.Permission) if err != nil { - return fmt.Errorf("can't create dir %s for metabase: %w", db.info.Path, err) + return fmt.Errorf("create dir %s for metabase: %w", db.info.Path, err) } db.log.Debug(ctx, logs.MetabaseCreatedDirectoryForMetabase, zap.String("path", db.info.Path)) @@ -73,7 +73,7 @@ func (db *DB) openBolt(ctx context.Context) error { db.boltDB, err = bbolt.Open(db.info.Path, db.info.Permission, db.boltOptions) if err != nil { - return fmt.Errorf("can't open boltDB database: %w", err) + return fmt.Errorf("open boltDB database: %w", err) } db.boltDB.MaxBatchDelay = db.boltBatchDelay db.boltDB.MaxBatchSize = db.boltBatchSize @@ -145,27 +145,27 @@ func (db *DB) init(reset bool) error { if reset { err := tx.DeleteBucket(name) if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) { - return fmt.Errorf("could not delete static bucket %s: %w", k, err) + return fmt.Errorf("delete static bucket %s: %w", k, err) } } _, err := tx.CreateBucketIfNotExists(name) if err != nil { - return fmt.Errorf("could not create static bucket %s: %w", k, err) + return fmt.Errorf("create static bucket %s: %w", k, err) } } for _, b := range deprecatedBuckets { err := tx.DeleteBucket(b) if err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) { - return fmt.Errorf("could not delete deprecated bucket %s: %w", string(b), err) + return fmt.Errorf("delete deprecated bucket %s: %w", string(b), err) } } if !reset { // counters will be recalculated by refill metabase err = syncCounter(tx, false) if err != nil { - return fmt.Errorf("could not sync object counter: %w", err) + return fmt.Errorf("sync object counter: %w", err) } return nil diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go index 3ead0d9a0..f29dafe77 100644 --- a/pkg/local_object_storage/metabase/counter.go +++ b/pkg/local_object_storage/metabase/counter.go @@ -238,14 +238,14 @@ func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error { } if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil { - return fmt.Errorf("could not increase phy object counter: %w", err) + return fmt.Errorf("increase phy object counter: %w", err) } if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil { - return fmt.Errorf("could not increase logical object counter: %w", err) + return fmt.Errorf("increase logical object counter: %w", err) } if isUserObject { if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil { - return fmt.Errorf("could not increase user object counter: %w", err) + return fmt.Errorf("increase user object counter: %w", err) } } return db.incContainerObjectCounter(tx, cnrID, isUserObject) @@ -362,7 +362,7 @@ func (db *DB) incContainerObjectCounter(tx *bbolt.Tx, cnrID cid.ID, isUserObject func syncCounter(tx *bbolt.Tx, force bool) error { shardInfoB, err := createBucketLikelyExists(tx, shardInfoBucket) if err != nil { - return fmt.Errorf("could not get shard info bucket: %w", err) + return fmt.Errorf("get shard info bucket: %w", err) } shardObjectCounterInitialized := len(shardInfoB.Get(objectPhyCounterKey)) == 8 && len(shardInfoB.Get(objectLogicCounterKey)) == 8 && @@ -375,7 +375,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error { containerCounterB, err := createBucketLikelyExists(tx, containerCounterBucketName) if err != nil { - return fmt.Errorf("could not get container counter bucket: %w", err) + return fmt.Errorf("get container counter bucket: %w", err) } var addr oid.Address @@ -428,7 +428,7 @@ func syncCounter(tx *bbolt.Tx, force bool) error { return nil }) if err != nil { - return fmt.Errorf("could not iterate objects: %w", err) + return fmt.Errorf("iterate objects: %w", err) } return setObjectCounters(counters, shardInfoB, containerCounterB) @@ -448,7 +448,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container value := containerCounterValue(count) err := containerCounterB.Put(key, value) if err != nil { - return fmt.Errorf("could not update phy container object counter: %w", err) + return fmt.Errorf("update phy container object counter: %w", err) } } phyData := make([]byte, 8) @@ -456,7 +456,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container err := shardInfoB.Put(objectPhyCounterKey, phyData) if err != nil { - return fmt.Errorf("could not update phy object counter: %w", err) + return fmt.Errorf("update phy object counter: %w", err) } logData := make([]byte, 8) @@ -464,7 +464,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container err = shardInfoB.Put(objectLogicCounterKey, logData) if err != nil { - return fmt.Errorf("could not update logic object counter: %w", err) + return fmt.Errorf("update logic object counter: %w", err) } userData := make([]byte, 8) @@ -472,7 +472,7 @@ func setObjectCounters(counters map[cid.ID]ObjectCounters, shardInfoB, container err = shardInfoB.Put(objectUserCounterKey, userData) if err != nil { - return fmt.Errorf("could not update user object counter: %w", err) + return fmt.Errorf("update user object counter: %w", err) } return nil @@ -492,7 +492,7 @@ func parseContainerCounterKey(buf []byte) (cid.ID, error) { } var cnrID cid.ID if err := cnrID.Decode(buf); err != nil { - return cid.ID{}, fmt.Errorf("failed to decode container ID: %w", err) + return cid.ID{}, fmt.Errorf("decode container ID: %w", err) } return cnrID, nil } diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index 62ab1056d..00ee2baa3 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -163,26 +163,26 @@ func (db *DB) updateCountersDelete(tx *bbolt.Tx, res DeleteRes) error { if res.phyCount > 0 { err := db.updateShardObjectCounter(tx, phy, res.phyCount, false) if err != nil { - return fmt.Errorf("could not decrease phy object counter: %w", err) + return fmt.Errorf("decrease phy object counter: %w", err) } } if res.logicCount > 0 { err := db.updateShardObjectCounter(tx, logical, res.logicCount, false) if err != nil { - return fmt.Errorf("could not decrease logical object counter: %w", err) + return fmt.Errorf("decrease logical object counter: %w", err) } } if res.userCount > 0 { err := db.updateShardObjectCounter(tx, user, res.userCount, false) if err != nil { - return fmt.Errorf("could not decrease user object counter: %w", err) + return fmt.Errorf("decrease user object counter: %w", err) } } if err := db.updateContainerCounter(tx, res.removedByCnrID, false); err != nil { - return fmt.Errorf("could not decrease container object counter: %w", err) + return fmt.Errorf("decrease container object counter: %w", err) } return nil } @@ -259,7 +259,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter if garbageBKT != nil { err := garbageBKT.Delete(addrKey) if err != nil { - return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err) + return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err) } } return deleteSingleResult{}, nil @@ -280,7 +280,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter if garbageBKT != nil { err := garbageBKT.Delete(addrKey) if err != nil { - return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err) + return deleteSingleResult{}, fmt.Errorf("remove from garbage bucket: %w", err) } } @@ -308,7 +308,7 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter // remove object err = db.deleteObject(tx, obj, false) if err != nil { - return deleteSingleResult{}, fmt.Errorf("could not remove object: %w", err) + return deleteSingleResult{}, fmt.Errorf("remove object: %w", err) } if err := deleteECRelatedInfo(tx, garbageBKT, obj, addr.Container(), refCounter); err != nil { @@ -335,12 +335,12 @@ func (db *DB) deleteObject( err = updateListIndexes(tx, obj, delListIndexItem) if err != nil { - return fmt.Errorf("can't remove list indexes: %w", err) + return fmt.Errorf("remove list indexes: %w", err) } err = updateFKBTIndexes(tx, obj, delFKBTIndexItem) if err != nil { - return fmt.Errorf("can't remove fake bucket tree indexes: %w", err) + return fmt.Errorf("remove fake bucket tree indexes: %w", err) } if isParent { @@ -351,7 +351,7 @@ func (db *DB) deleteObject( addrKey := addressKey(object.AddressOf(obj), key) err := garbageBKT.Delete(addrKey) if err != nil { - return fmt.Errorf("could not remove from garbage bucket: %w", err) + return fmt.Errorf("remove from garbage bucket: %w", err) } } } @@ -529,7 +529,7 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. addrKey := addressKey(ecParentAddress, make([]byte, addressKeySize)) err := garbageBKT.Delete(addrKey) if err != nil { - return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err) + return fmt.Errorf("remove EC parent from garbage bucket: %w", err) } } @@ -567,7 +567,7 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. addrKey := addressKey(splitParentAddress, make([]byte, addressKeySize)) err := garbageBKT.Delete(addrKey) if err != nil { - return fmt.Errorf("could not remove EC parent from garbage bucket: %w", err) + return fmt.Errorf("remove EC parent from garbage bucket: %w", err) } } diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go index 411beb6b3..3133c5480 100644 --- a/pkg/local_object_storage/metabase/exists.go +++ b/pkg/local_object_storage/metabase/exists.go @@ -229,7 +229,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e err := splitInfo.Unmarshal(bytes.Clone(rawSplitInfo)) if err != nil { - return nil, fmt.Errorf("can't unmarshal split info from root index: %w", err) + return nil, fmt.Errorf("unmarshal split info from root index: %w", err) } return splitInfo, nil diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go index 1cbf78ab2..af274b245 100644 --- a/pkg/local_object_storage/metabase/get.go +++ b/pkg/local_object_storage/metabase/get.go @@ -187,7 +187,7 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD err = child.Unmarshal(bytes.Clone(data)) if err != nil { - return nil, fmt.Errorf("can't unmarshal child with parent: %w", err) + return nil, fmt.Errorf("unmarshal child with parent: %w", err) } par := child.Parent() diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go index b0db952b2..2f23d424c 100644 --- a/pkg/local_object_storage/metabase/graveyard.go +++ b/pkg/local_object_storage/metabase/graveyard.go @@ -177,7 +177,7 @@ type gcHandler struct { func (g gcHandler) handleKV(k, _ []byte) error { o, err := garbageFromKV(k) if err != nil { - return fmt.Errorf("could not parse garbage object: %w", err) + return fmt.Errorf("parse garbage object: %w", err) } return g.h(o) @@ -190,7 +190,7 @@ type graveyardHandler struct { func (g graveyardHandler) handleKV(k, v []byte) error { o, err := graveFromKV(k, v) if err != nil { - return fmt.Errorf("could not parse grave: %w", err) + return fmt.Errorf("parse grave: %w", err) } return g.h(o) @@ -240,7 +240,7 @@ func (db *DB) iterateDeletedObj(tx *bbolt.Tx, h kvHandler, offset *oid.Address) func garbageFromKV(k []byte) (res GarbageObject, err error) { err = decodeAddressFromKey(&res.addr, k) if err != nil { - err = fmt.Errorf("could not parse address: %w", err) + err = fmt.Errorf("parse address: %w", err) } return diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index 5ac0c0be5..99fdec310 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -373,7 +373,7 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck if data != nil { err := targetBucket.Delete(tombKey) if err != nil { - return nil, nil, fmt.Errorf("could not remove grave with tombstone key: %w", err) + return nil, nil, fmt.Errorf("remove grave with tombstone key: %w", err) } } diff --git a/pkg/local_object_storage/metabase/mode.go b/pkg/local_object_storage/metabase/mode.go index ce6ae1004..7edb96384 100644 --- a/pkg/local_object_storage/metabase/mode.go +++ b/pkg/local_object_storage/metabase/mode.go @@ -19,7 +19,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error { if !db.mode.NoMetabase() { if err := db.Close(ctx); err != nil { - return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err) + return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err) } } @@ -31,7 +31,7 @@ func (db *DB) SetMode(ctx context.Context, m mode.Mode) error { err = db.Init(ctx) } if err != nil { - return fmt.Errorf("can't set metabase mode (old=%s, new=%s): %w", db.mode, m, err) + return fmt.Errorf("set metabase mode (old=%s, new=%s): %w", db.mode, m, err) } } diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index 6f9dc1bf0..16918c4d9 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -180,18 +180,18 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o err := putUniqueIndexes(tx, obj, si, id) if err != nil { - return fmt.Errorf("can't put unique indexes: %w", err) + return fmt.Errorf("put unique indexes: %w", err) } err = updateListIndexes(tx, obj, putListIndexItem) if err != nil { - return fmt.Errorf("can't put list indexes: %w", err) + return fmt.Errorf("put list indexes: %w", err) } if indexAttributes { err = updateFKBTIndexes(tx, obj, putFKBTIndexItem) if err != nil { - return fmt.Errorf("can't put fake bucket tree indexes: %w", err) + return fmt.Errorf("put fake bucket tree indexes: %w", err) } } @@ -250,7 +250,7 @@ func putRawObjectData(tx *bbolt.Tx, obj *objectSDK.Object, bucketName []byte, ad } rawObject, err := obj.CutPayload().Marshal() if err != nil { - return fmt.Errorf("can't marshal object header: %w", err) + return fmt.Errorf("marshal object header: %w", err) } return putUniqueIndexItem(tx, namedBucketItem{ name: bucketName, @@ -475,7 +475,7 @@ func createBucketLikelyExists[T bucketContainer](tx T, name []byte) (*bbolt.Buck func updateUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem, update func(oldData, newData []byte) ([]byte, error)) error { bkt, err := createBucketLikelyExists(tx, item.name) if err != nil { - return fmt.Errorf("can't create index %v: %w", item.name, err) + return fmt.Errorf("create index %v: %w", item.name, err) } data, err := update(bkt.Get(item.key), item.val) @@ -492,12 +492,12 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error { func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { bkt, err := createBucketLikelyExists(tx, item.name) if err != nil { - return fmt.Errorf("can't create index %v: %w", item.name, err) + return fmt.Errorf("create index %v: %w", item.name, err) } fkbtRoot, err := createBucketLikelyExists(bkt, item.key) if err != nil { - return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err) + return fmt.Errorf("create fake bucket tree index %v: %w", item.key, err) } return fkbtRoot.Put(item.val, zeroValue) @@ -506,19 +506,19 @@ func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { bkt, err := createBucketLikelyExists(tx, item.name) if err != nil { - return fmt.Errorf("can't create index %v: %w", item.name, err) + return fmt.Errorf("create index %v: %w", item.name, err) } lst, err := decodeList(bkt.Get(item.key)) if err != nil { - return fmt.Errorf("can't decode leaf list %v: %w", item.key, err) + return fmt.Errorf("decode leaf list %v: %w", item.key, err) } lst = append(lst, item.val) encodedLst, err := encodeList(lst) if err != nil { - return fmt.Errorf("can't encode leaf list %v: %w", item.key, err) + return fmt.Errorf("encode leaf list %v: %w", item.key, err) } return bkt.Put(item.key, encodedLst) diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index f802036be..9f1b8b060 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -565,7 +565,7 @@ func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filt case v2object.FilterHeaderContainerID: // support deprecated field err := res.cnr.DecodeString(filters[i].Value()) if err != nil { - return filterGroup{}, fmt.Errorf("can't parse container id: %w", err) + return filterGroup{}, fmt.Errorf("parse container id: %w", err) } res.withCnrFilter = true diff --git a/pkg/local_object_storage/metabase/shard_id.go b/pkg/local_object_storage/metabase/shard_id.go index e58115bc8..72618b1a0 100644 --- a/pkg/local_object_storage/metabase/shard_id.go +++ b/pkg/local_object_storage/metabase/shard_id.go @@ -32,13 +32,13 @@ func (db *DB) GetShardID(ctx context.Context, mode metamode.Mode) ([]byte, error } if err := db.openDB(ctx, mode); err != nil { - return nil, fmt.Errorf("failed to open metabase: %w", err) + return nil, fmt.Errorf("open metabase: %w", err) } id, err := db.readShardID() if cErr := db.close(); cErr != nil { - err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr)) + err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr)) } return id, metaerr.Wrap(err) @@ -70,7 +70,7 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err } if err := db.openDB(ctx, mode); err != nil { - return fmt.Errorf("failed to open metabase: %w", err) + return fmt.Errorf("open metabase: %w", err) } err := db.writeShardID(id) @@ -79,7 +79,7 @@ func (db *DB) SetShardID(ctx context.Context, id []byte, mode metamode.Mode) err } if cErr := db.close(); cErr != nil { - err = errors.Join(err, fmt.Errorf("failed to close metabase: %w", cErr)) + err = errors.Join(err, fmt.Errorf("close metabase: %w", cErr)) } return metaerr.Wrap(err) diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go index bcf72f440..6eba58c69 100644 --- a/pkg/local_object_storage/metabase/upgrade.go +++ b/pkg/local_object_storage/metabase/upgrade.go @@ -95,7 +95,7 @@ func compactDB(db *bbolt.DB) error { NoSync: true, }) if err != nil { - return fmt.Errorf("can't open new metabase to compact: %w", err) + return fmt.Errorf("open new metabase to compact: %w", err) } if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil { return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName))) @@ -292,7 +292,7 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i } expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64) if err != nil { - return fmt.Errorf("could not parse expiration epoch: %w", err) + return fmt.Errorf("parse expiration epoch: %w", err) } expirationEpochBucket := b.Bucket(attrValue) attrKeyValueC := expirationEpochBucket.Cursor() @@ -399,7 +399,7 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([] for _, key := range keys { attr, ok := attributeFromAttributeBucket(key) if !ok { - return nil, fmt.Errorf("failed to parse attribute key from user attribute bucket key %s", hex.EncodeToString(key)) + return nil, fmt.Errorf("parse attribute key from user attribute bucket key %s", hex.EncodeToString(key)) } if !IsAtrributeIndexed(attr) { keysToDrop = append(keysToDrop, key) @@ -407,7 +407,7 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([] } contID, ok := cidFromAttributeBucket(key) if !ok { - return nil, fmt.Errorf("failed to parse container ID from user attribute bucket key %s", hex.EncodeToString(key)) + return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key)) } info, err := cs.Info(contID) if err != nil { diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go index 0a2f91a47..80851f1c4 100644 --- a/pkg/local_object_storage/metabase/util.go +++ b/pkg/local_object_storage/metabase/util.go @@ -231,11 +231,11 @@ func parseExpirationEpochKey(key []byte) (uint64, cid.ID, oid.ID, error) { epoch := binary.BigEndian.Uint64(key) var cnr cid.ID if err := cnr.Decode(key[epochSize : epochSize+cidSize]); err != nil { - return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (container ID): %w", err) + return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (container ID): %w", err) } var obj oid.ID if err := obj.Decode(key[epochSize+cidSize:]); err != nil { - return 0, cid.ID{}, oid.ID{}, fmt.Errorf("failed to decode expiration epoch to object key (object ID): %w", err) + return 0, cid.ID{}, oid.ID{}, fmt.Errorf("decode expiration epoch to object key (object ID): %w", err) } return epoch, cnr, obj, nil } diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go index 048bb9af6..fbc0f1ad9 100644 --- a/pkg/local_object_storage/metabase/version.go +++ b/pkg/local_object_storage/metabase/version.go @@ -67,7 +67,7 @@ func updateVersion(tx *bbolt.Tx, version uint64) error { b, err := tx.CreateBucketIfNotExists(shardInfoBucket) if err != nil { - return fmt.Errorf("can't create auxiliary bucket: %w", err) + return fmt.Errorf("create auxiliary bucket: %w", err) } return b.Put(versionKey, data) } diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 6e68e9986..fecf96f66 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -106,7 +106,7 @@ func (t *boltForest) SetMode(ctx context.Context, m mode.Mode) error { } } if err != nil { - return fmt.Errorf("can't set pilorama mode (old=%s, new=%s): %w", t.mode, m, err) + return fmt.Errorf("set pilorama mode (old=%s, new=%s): %w", t.mode, m, err) } t.mode = m @@ -128,7 +128,7 @@ func (t *boltForest) openBolt(m mode.Mode) error { readOnly := m.ReadOnly() err := util.MkdirAllX(filepath.Dir(t.path), t.perm) if err != nil { - return metaerr.Wrap(fmt.Errorf("can't create dir %s for the pilorama: %w", t.path, err)) + return metaerr.Wrap(fmt.Errorf("create dir %s for the pilorama: %w", t.path, err)) } opts := *bbolt.DefaultOptions @@ -139,7 +139,7 @@ func (t *boltForest) openBolt(m mode.Mode) error { t.db, err = bbolt.Open(t.path, t.perm, &opts) if err != nil { - return metaerr.Wrap(fmt.Errorf("can't open the pilorama DB: %w", err)) + return metaerr.Wrap(fmt.Errorf("open the pilorama DB: %w", err)) } t.db.MaxBatchSize = t.maxBatchSize @@ -1360,7 +1360,7 @@ func (t *boltForest) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, err return nil }) if err != nil { - return nil, metaerr.Wrap(fmt.Errorf("could not list trees: %w", err)) + return nil, metaerr.Wrap(fmt.Errorf("list trees: %w", err)) } success = true return ids, nil @@ -1504,7 +1504,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (* var contID cidSDK.ID if err := contID.Decode(k[:32]); err != nil { - return fmt.Errorf("failed to decode containerID: %w", err) + return fmt.Errorf("decode containerID: %w", err) } res.Items = append(res.Items, ContainerIDTreeID{ CID: contID, diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go index 364649b50..0309f0c81 100644 --- a/pkg/local_object_storage/shard/container.go +++ b/pkg/local_object_storage/shard/container.go @@ -36,7 +36,7 @@ func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { size, err := s.metaBase.ContainerSize(prm.cnr) if err != nil { - return ContainerSizeRes{}, fmt.Errorf("could not get container size: %w", err) + return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err) } return ContainerSizeRes{ @@ -71,7 +71,7 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID) if err != nil { - return ContainerCountRes{}, fmt.Errorf("could not get container counters: %w", err) + return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err) } return ContainerCountRes{ diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 78ce241fe..1c1933af5 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -38,7 +38,7 @@ func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err err err = s.SetMode(ctx, mode.DegradedReadOnly) if err != nil { - return fmt.Errorf("could not switch to mode %s", mode.Mode(mode.DegradedReadOnly)) + return fmt.Errorf("switch to mode %s", mode.Mode(mode.DegradedReadOnly)) } return nil } @@ -72,7 +72,7 @@ func (s *Shard) Open(ctx context.Context) error { for j := i + 1; j < len(components); j++ { if err := components[j].Open(ctx, m); err != nil { // Other components must be opened, fail. - return fmt.Errorf("could not open %T: %w", components[j], err) + return fmt.Errorf("open %T: %w", components[j], err) } } err = s.handleMetabaseFailure(ctx, "open", err) @@ -83,7 +83,7 @@ func (s *Shard) Open(ctx context.Context) error { break } - return fmt.Errorf("could not open %T: %w", component, err) + return fmt.Errorf("open %T: %w", component, err) } } return nil @@ -184,7 +184,7 @@ func (s *Shard) initializeComponents(ctx context.Context, m mode.Mode) error { break } - return fmt.Errorf("could not initialize %T: %w", component, err) + return fmt.Errorf("initialize %T: %w", component, err) } } return nil @@ -205,7 +205,7 @@ func (s *Shard) refillMetabase(ctx context.Context) error { err := s.metaBase.Reset() if err != nil { - return fmt.Errorf("could not reset metabase: %w", err) + return fmt.Errorf("reset metabase: %w", err) } withCount := true @@ -254,12 +254,12 @@ func (s *Shard) refillMetabase(ctx context.Context) error { err = errors.Join(egErr, itErr) if err != nil { - return fmt.Errorf("could not put objects to the meta: %w", err) + return fmt.Errorf("put objects to the meta: %w", err) } err = s.metaBase.SyncCounters() if err != nil { - return fmt.Errorf("could not sync object counters: %w", err) + return fmt.Errorf("sync object counters: %w", err) } success = true @@ -318,7 +318,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) error { var lock objectSDK.Lock if err := lock.Unmarshal(obj.Payload()); err != nil { - return fmt.Errorf("could not unmarshal lock content: %w", err) + return fmt.Errorf("unmarshal lock content: %w", err) } locked := make([]oid.ID, lock.NumberOfMembers()) @@ -328,7 +328,7 @@ func (s *Shard) refillLockObject(ctx context.Context, obj *objectSDK.Object) err id, _ := obj.ID() err := s.metaBase.Lock(ctx, cnr, id, locked) if err != nil { - return fmt.Errorf("could not lock objects: %w", err) + return fmt.Errorf("lock objects: %w", err) } return nil } @@ -337,7 +337,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object tombstone := objectSDK.NewTombstone() if err := tombstone.Unmarshal(obj.Payload()); err != nil { - return fmt.Errorf("could not unmarshal tombstone content: %w", err) + return fmt.Errorf("unmarshal tombstone content: %w", err) } tombAddr := object.AddressOf(obj) @@ -358,7 +358,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object _, err := s.metaBase.Inhume(ctx, inhumePrm) if err != nil { - return fmt.Errorf("could not inhume objects: %w", err) + return fmt.Errorf("inhume objects: %w", err) } return nil } diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go index 7a31a705e..15d1eb6ba 100644 --- a/pkg/local_object_storage/shard/get.go +++ b/pkg/local_object_storage/shard/get.go @@ -175,7 +175,7 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta mExRes, err := s.metaBase.StorageID(ctx, mPrm) if err != nil { - return nil, true, fmt.Errorf("can't fetch blobovnicza id from metabase: %w", err) + return nil, true, fmt.Errorf("fetch blobovnicza id from metabase: %w", err) } storageID := mExRes.StorageID() diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go index 6ccae3f53..26492cf01 100644 --- a/pkg/local_object_storage/shard/id.go +++ b/pkg/local_object_storage/shard/id.go @@ -36,7 +36,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { modeDegraded := s.GetMode().NoMetabase() if !modeDegraded { if idFromMetabase, err = s.metaBase.GetShardID(ctx, mode.ReadOnly); err != nil { - err = fmt.Errorf("failed to read shard id from metabase: %w", err) + err = fmt.Errorf("read shard id from metabase: %w", err) } } @@ -64,7 +64,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { if len(idFromMetabase) == 0 && !modeDegraded { if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil { - err = errors.Join(err, fmt.Errorf("failed to write shard id to metabase: %w", setErr)) + err = errors.Join(err, fmt.Errorf("write shard id to metabase: %w", setErr)) } } return diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index f583ef5d9..c5275dafd 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -109,7 +109,7 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { lst, err := s.metaBase.Containers(ctx) if err != nil { - return res, fmt.Errorf("can't list stored containers: %w", err) + return res, fmt.Errorf("list stored containers: %w", err) } filters := objectSDK.NewSearchFilters() @@ -149,7 +149,7 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo containers, err := s.metaBase.Containers(ctx) if err != nil { - return ListContainersRes{}, fmt.Errorf("could not get list of containers: %w", err) + return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err) } return ListContainersRes{ @@ -180,7 +180,7 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List metaPrm.SetCursor(prm.cursor) res, err := s.metaBase.ListWithCursor(ctx, metaPrm) if err != nil { - return ListWithCursorRes{}, fmt.Errorf("could not get list of objects: %w", err) + return ListWithCursorRes{}, fmt.Errorf("get list of objects: %w", err) } return ListWithCursorRes{ @@ -208,7 +208,7 @@ func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContai metaPrm.Handler = prm.Handler err := s.metaBase.IterateOverContainers(ctx, metaPrm) if err != nil { - return fmt.Errorf("could not iterate over containers: %w", err) + return fmt.Errorf("iterate over containers: %w", err) } return nil @@ -235,7 +235,7 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv metaPrm.Handler = prm.Handler err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) if err != nil { - return fmt.Errorf("could not iterate over objects: %w", err) + return fmt.Errorf("iterate over objects: %w", err) } return nil @@ -258,7 +258,7 @@ func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAlive metaPrm.ContainerID = prm.ContainerID count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm) if err != nil { - return 0, fmt.Errorf("could not count alive objects in bucket: %w", err) + return 0, fmt.Errorf("count alive objects in bucket: %w", err) } return count, nil diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index 1e4643db5..3f23111af 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -81,7 +81,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { res, err = s.blobStor.Put(ctx, putPrm) if err != nil { - return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err) + return PutRes{}, fmt.Errorf("put object to BLOB storage: %w", err) } } @@ -94,7 +94,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { if err != nil { // may we need to handle this case in a special way // since the object has been successfully written to BlobStor - return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err) + return PutRes{}, fmt.Errorf("put object to metabase: %w", err) } if res.Inserted { diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go index 184ca9b71..c7c7e11c2 100644 --- a/pkg/local_object_storage/shard/select.go +++ b/pkg/local_object_storage/shard/select.go @@ -67,7 +67,7 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) { mRes, err := s.metaBase.Select(ctx, selectPrm) if err != nil { - return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err) + return SelectRes{}, fmt.Errorf("select objects from metabase: %w", err) } return SelectRes{ diff --git a/pkg/local_object_storage/writecache/iterate.go b/pkg/local_object_storage/writecache/iterate.go index 9ec039f91..e369fbd50 100644 --- a/pkg/local_object_storage/writecache/iterate.go +++ b/pkg/local_object_storage/writecache/iterate.go @@ -30,7 +30,7 @@ func IterateDB(db *bbolt.DB, f func(oid.Address) error) error { return b.ForEach(func(k, _ []byte) error { err := addr.DecodeString(string(k)) if err != nil { - return fmt.Errorf("could not parse object address: %w", err) + return fmt.Errorf("parse object address: %w", err) } return f(addr) diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go index 73d12fd33..c491be60b 100644 --- a/pkg/local_object_storage/writecache/mode.go +++ b/pkg/local_object_storage/writecache/mode.go @@ -83,7 +83,7 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error { } if !shrink { if err := c.fsTree.Close(ctx); err != nil { - return fmt.Errorf("can't close write-cache storage: %w", err) + return fmt.Errorf("close write-cache storage: %w", err) } return nil } @@ -98,16 +98,16 @@ func (c *cache) closeStorage(ctx context.Context, shrink bool) error { if errors.Is(err, errIterationCompleted) { empty = false } else { - return fmt.Errorf("failed to check write-cache items: %w", err) + return fmt.Errorf("check write-cache items: %w", err) } } if err := c.fsTree.Close(ctx); err != nil { - return fmt.Errorf("can't close write-cache storage: %w", err) + return fmt.Errorf("close write-cache storage: %w", err) } if empty { err := os.RemoveAll(c.path) if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("failed to remove write-cache files: %w", err) + return fmt.Errorf("remove write-cache files: %w", err) } } else { c.log.Info(ctx, logs.WritecacheShrinkSkippedNotEmpty) diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go index a0e236cb7..e88566cdf 100644 --- a/pkg/local_object_storage/writecache/storage.go +++ b/pkg/local_object_storage/writecache/storage.go @@ -31,10 +31,10 @@ func (c *cache) openStore(mod mode.ComponentMode) error { fstree.WithFileCounter(c.counter), ) if err := c.fsTree.Open(mod); err != nil { - return fmt.Errorf("could not open FSTree: %w", err) + return fmt.Errorf("open FSTree: %w", err) } if err := c.fsTree.Init(); err != nil { - return fmt.Errorf("could not init FSTree: %w", err) + return fmt.Errorf("init FSTree: %w", err) } return nil diff --git a/pkg/local_object_storage/writecache/upgrade.go b/pkg/local_object_storage/writecache/upgrade.go index 3a100f1a3..5eb341ba4 100644 --- a/pkg/local_object_storage/writecache/upgrade.go +++ b/pkg/local_object_storage/writecache/upgrade.go @@ -25,11 +25,11 @@ func (c *cache) flushAndDropBBoltDB(ctx context.Context) error { return nil } if err != nil { - return fmt.Errorf("could not check write-cache database existence: %w", err) + return fmt.Errorf("check write-cache database existence: %w", err) } db, err := OpenDB(c.path, true, os.OpenFile) if err != nil { - return fmt.Errorf("could not open write-cache database: %w", err) + return fmt.Errorf("open write-cache database: %w", err) } defer func() { _ = db.Close() From 226dd25dd0f464f440248d0d4c7180db0ca4be56 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 18 Dec 2024 09:40:19 +0300 Subject: [PATCH 283/591] [#1568] pilorama: Replace "containerID" with "container ID" in the error message It is "container ID" in every other place. Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/pilorama/boltdb.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index fecf96f66..86b19e3af 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -1504,7 +1504,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (* var contID cidSDK.ID if err := contID.Decode(k[:32]); err != nil { - return fmt.Errorf("decode containerID: %w", err) + return fmt.Errorf("decode container ID: %w", err) } res.Items = append(res.Items, ContainerIDTreeID{ CID: contID, From 51ee132ea311b6227d1f0642ab736b5b8e21a0f7 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Wed, 18 Dec 2024 19:27:14 +0300 Subject: [PATCH 284/591] [#1342] network/cache: Add node address to error multiClient Signed-off-by: Alexander Chuprov --- pkg/network/cache/multi.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index 481d1ea4a..1bcb83259 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -155,7 +155,7 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie group.IterateAddresses(func(addr network.Address) bool { select { case <-ctx.Done(): - firstErr = context.Canceled + firstErr = fmt.Errorf("try %v: %w", addr, context.Canceled) return true default: } From 148d68933bb1f3fda17db2c7f4fe073336554f67 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 19 Dec 2024 16:07:22 +0300 Subject: [PATCH 285/591] [#1573] node: Simplify bootstrapWithState() After #1382 we have no need to use lambdas. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/config.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 40be8f45f..18d3e2454 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1220,9 +1220,9 @@ func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { // bootstrapWithState calls "addPeer" method of the Sidechain Netmap contract // with the binary-encoded information from the current node's configuration. // The state is set using the provided setter which MUST NOT be nil. -func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.NodeInfo)) error { +func (c *cfg) bootstrapWithState(ctx context.Context, state netmap.NodeState) error { ni := c.cfgNodeInfo.localInfo - stateSetter(&ni) + ni.SetStatus(state) prm := nmClient.AddPeerPrm{} prm.SetNodeInfo(ni) @@ -1232,9 +1232,7 @@ func (c *cfg) bootstrapWithState(ctx context.Context, stateSetter func(*netmap.N // bootstrapOnline calls cfg.bootstrapWithState with "online" state. func bootstrapOnline(ctx context.Context, c *cfg) error { - return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) { - ni.SetStatus(netmap.Online) - }) + return c.bootstrapWithState(ctx, netmap.Online) } // bootstrap calls bootstrapWithState with: @@ -1245,9 +1243,7 @@ func (c *cfg) bootstrap(ctx context.Context) error { st := c.cfgNetmap.state.controlNetmapStatus() if st == control.NetmapStatus_MAINTENANCE { c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithTheMaintenanceState) - return c.bootstrapWithState(ctx, func(ni *netmap.NodeInfo) { - ni.SetStatus(netmap.Maintenance) - }) + return c.bootstrapWithState(ctx, netmap.Maintenance) } c.log.Info(ctx, logs.FrostFSNodeBootstrappingWithOnlineState, From db03742d332681ab3d4626d50e85f5da6c482629 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 20 Dec 2024 15:04:17 +0300 Subject: [PATCH 286/591] [#1578] adm: Reword help message for `morph refill-gas` Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/generate/generate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go index 7af776797..37c5d4a4a 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go @@ -159,7 +159,7 @@ func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error } } else { if storageWalletPath == "" { - return fmt.Errorf("missing wallet path (use '--%s ')", commonflags.StorageWalletFlag) + return fmt.Errorf("missing wallet path (use '--%s ')", commonflags.StorageWalletFlag) } var w *wallet.Wallet From bb9ba1bce2101e44999aeae10815688aec8d7a01 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 23 Dec 2024 09:55:09 +0300 Subject: [PATCH 287/591] [#1578] adm: Remove bool flag from refillGas() Signed-off-by: Evgenii Stratonikov --- .../modules/morph/generate/generate.go | 74 ++++++------------- .../internal/modules/morph/generate/root.go | 32 +++++++- 2 files changed, 52 insertions(+), 54 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go index 37c5d4a4a..388d5c060 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go @@ -12,7 +12,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" "github.com/nspcc-dev/neo-go/pkg/smartcontract" @@ -141,60 +140,29 @@ func addMultisigAccount(w *wallet.Wallet, m int, name, password string, pubs key } func generateStorageCreds(cmd *cobra.Command, _ []string) error { - return refillGas(cmd, storageGasConfigFlag, true) -} - -func refillGas(cmd *cobra.Command, gasFlag string, createWallet bool) (err error) { - // storage wallet path is not part of the config - storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag) - // wallet address is not part of the config - walletAddress, _ := cmd.Flags().GetString(walletAddressFlag) - - var gasReceiver util.Uint160 - - if len(walletAddress) != 0 { - gasReceiver, err = address.StringToUint160(walletAddress) - if err != nil { - return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err) - } - } else { - if storageWalletPath == "" { - return fmt.Errorf("missing wallet path (use '--%s ')", commonflags.StorageWalletFlag) - } - - var w *wallet.Wallet - - if createWallet { - w, err = wallet.NewWallet(storageWalletPath) - } else { - w, err = wallet.NewWalletFromFile(storageWalletPath) - } - - if err != nil { - return fmt.Errorf("can't create wallet: %w", err) - } - - if createWallet { - var password string - - label, _ := cmd.Flags().GetString(storageWalletLabelFlag) - password, err := config.GetStoragePassword(viper.GetViper(), label) - if err != nil { - return fmt.Errorf("can't fetch password: %w", err) - } - - if label == "" { - label = constants.SingleAccountName - } - - if err := w.CreateAccount(label, password); err != nil { - return fmt.Errorf("can't create account: %w", err) - } - } - - gasReceiver = w.Accounts[0].Contract.ScriptHash() + walletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag) + w, err := wallet.NewWallet(walletPath) + if err != nil { + return fmt.Errorf("create wallet: %w", err) } + label, _ := cmd.Flags().GetString(storageWalletLabelFlag) + password, err := config.GetStoragePassword(viper.GetViper(), label) + if err != nil { + return fmt.Errorf("can't fetch password: %w", err) + } + + if label == "" { + label = constants.SingleAccountName + } + + if err := w.CreateAccount(label, password); err != nil { + return fmt.Errorf("can't create account: %w", err) + } + return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash()) +} + +func refillGas(cmd *cobra.Command, gasFlag string, gasReceiver util.Uint160) (err error) { gasStr := viper.GetString(gasFlag) gasAmount, err := helper.ParseGASAmount(gasStr) diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/root.go b/cmd/frostfs-adm/internal/modules/morph/generate/root.go index 3633d9a8e..da9665d22 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate/root.go @@ -1,7 +1,12 @@ package generate import ( + "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "github.com/nspcc-dev/neo-go/pkg/encoding/address" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -33,7 +38,32 @@ var ( _ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag)) }, RunE: func(cmd *cobra.Command, _ []string) error { - return refillGas(cmd, commonflags.RefillGasAmountFlag, false) + // storage wallet path is not part of the config + storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag) + // wallet address is not part of the config + walletAddress, _ := cmd.Flags().GetString(walletAddressFlag) + + var gasReceiver util.Uint160 + + if len(walletAddress) != 0 { + var err error + gasReceiver, err = address.StringToUint160(walletAddress) + if err != nil { + return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err) + } + } else { + if storageWalletPath == "" { + return fmt.Errorf("missing wallet path (use '--%s ')", commonflags.StorageWalletFlag) + } + + w, err := wallet.NewWalletFromFile(storageWalletPath) + if err != nil { + return fmt.Errorf("can't create wallet: %w", err) + } + + gasReceiver = w.Accounts[0].Contract.ScriptHash() + } + return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceiver) }, } GenerateAlphabetCmd = &cobra.Command{ From 303cd35a019f110f1f47d264ac30827977643b91 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 23 Dec 2024 09:55:54 +0300 Subject: [PATCH 288/591] [#1578] adm: Remove unnecessary comments in RefillGasCmd Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/generate/root.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/root.go b/cmd/frostfs-adm/internal/modules/morph/generate/root.go index da9665d22..bdf4dc3d8 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate/root.go @@ -38,9 +38,7 @@ var ( _ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag)) }, RunE: func(cmd *cobra.Command, _ []string) error { - // storage wallet path is not part of the config storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag) - // wallet address is not part of the config walletAddress, _ := cmd.Flags().GetString(walletAddressFlag) var gasReceiver util.Uint160 From e64871c3fdcd1180b673ac7eb96e2ff6dffe6b87 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 23 Dec 2024 10:20:26 +0300 Subject: [PATCH 289/591] [#1578] adm: Allow to transfer GAS to multiple recepients Signed-off-by: Evgenii Stratonikov --- .../modules/morph/generate/generate.go | 10 ++++--- .../internal/modules/morph/generate/root.go | 29 +++++++++---------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go index 388d5c060..78f8617f1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go @@ -162,7 +162,7 @@ func generateStorageCreds(cmd *cobra.Command, _ []string) error { return refillGas(cmd, storageGasConfigFlag, w.Accounts[0].ScriptHash()) } -func refillGas(cmd *cobra.Command, gasFlag string, gasReceiver util.Uint160) (err error) { +func refillGas(cmd *cobra.Command, gasFlag string, gasReceivers ...util.Uint160) (err error) { gasStr := viper.GetString(gasFlag) gasAmount, err := helper.ParseGASAmount(gasStr) @@ -176,9 +176,11 @@ func refillGas(cmd *cobra.Command, gasFlag string, gasReceiver util.Uint160) (er } bw := io.NewBufBinWriter() - emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All, - wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil) - emit.Opcodes(bw.BinWriter, opcode.ASSERT) + for _, gasReceiver := range gasReceivers { + emit.AppCall(bw.BinWriter, gas.Hash, "transfer", callflag.All, + wCtx.CommitteeAcc.Contract.ScriptHash(), gasReceiver, int64(gasAmount), nil) + emit.Opcodes(bw.BinWriter, opcode.ASSERT) + } if bw.Err != nil { return fmt.Errorf("BUG: invalid transfer arguments: %w", bw.Err) } diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/root.go b/cmd/frostfs-adm/internal/modules/morph/generate/root.go index bdf4dc3d8..73c986713 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate/root.go @@ -38,30 +38,27 @@ var ( _ = viper.BindPFlag(commonflags.RefillGasAmountFlag, cmd.Flags().Lookup(commonflags.RefillGasAmountFlag)) }, RunE: func(cmd *cobra.Command, _ []string) error { - storageWalletPath, _ := cmd.Flags().GetString(commonflags.StorageWalletFlag) - walletAddress, _ := cmd.Flags().GetString(walletAddressFlag) + storageWalletPaths, _ := cmd.Flags().GetStringArray(commonflags.StorageWalletFlag) + walletAddresses, _ := cmd.Flags().GetStringArray(walletAddressFlag) - var gasReceiver util.Uint160 - - if len(walletAddress) != 0 { - var err error - gasReceiver, err = address.StringToUint160(walletAddress) + var gasReceivers []util.Uint160 + for _, walletAddress := range walletAddresses { + addr, err := address.StringToUint160(walletAddress) if err != nil { return fmt.Errorf("invalid wallet address %s: %w", walletAddress, err) } - } else { - if storageWalletPath == "" { - return fmt.Errorf("missing wallet path (use '--%s ')", commonflags.StorageWalletFlag) - } + gasReceivers = append(gasReceivers, addr) + } + for _, storageWalletPath := range storageWalletPaths { w, err := wallet.NewWalletFromFile(storageWalletPath) if err != nil { return fmt.Errorf("can't create wallet: %w", err) } - gasReceiver = w.Accounts[0].Contract.ScriptHash() + gasReceivers = append(gasReceivers, w.Accounts[0].Contract.ScriptHash()) } - return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceiver) + return refillGas(cmd, commonflags.RefillGasAmountFlag, gasReceivers...) }, } GenerateAlphabetCmd = &cobra.Command{ @@ -78,10 +75,10 @@ var ( func initRefillGasCmd() { RefillGasCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) RefillGasCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - RefillGasCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet") - RefillGasCmd.Flags().String(walletAddressFlag, "", "Address of wallet") + RefillGasCmd.Flags().StringArray(commonflags.StorageWalletFlag, nil, "Path to storage node wallet") + RefillGasCmd.Flags().StringArray(walletAddressFlag, nil, "Address of wallet") RefillGasCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Additional amount of GAS to transfer") - RefillGasCmd.MarkFlagsMutuallyExclusive(walletAddressFlag, commonflags.StorageWalletFlag) + RefillGasCmd.MarkFlagsOneRequired(walletAddressFlag, commonflags.StorageWalletFlag) } func initGenerateStorageCmd() { From 7c3bcb0f44cca381ae9c430d032f685265ed6938 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 23 Dec 2024 10:22:09 +0300 Subject: [PATCH 290/591] [#1578] Makefile: Refill GAS with a single command in env-up Signed-off-by: Evgenii Stratonikov --- Makefile | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index ecac760e9..f0cdc273c 100755 --- a/Makefile +++ b/Makefile @@ -270,10 +270,12 @@ env-up: all echo "Frostfs contracts not found"; exit 1; \ fi ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph init --contracts ${FROSTFS_CONTRACTS_PATH} - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet01.json --gas 10.0 - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet02.json --gas 10.0 - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet03.json --gas 10.0 - ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --storage-wallet ./dev/storage/wallet04.json --gas 10.0 + ${BIN}/frostfs-adm --config ./dev/adm/frostfs-adm.yml morph refill-gas --gas 10.0 \ + --storage-wallet ./dev/storage/wallet01.json \ + --storage-wallet ./dev/storage/wallet02.json \ + --storage-wallet ./dev/storage/wallet03.json \ + --storage-wallet ./dev/storage/wallet04.json + @if [ ! -f "$(LOCODE_DB_PATH)" ]; then \ make locode-download; \ fi From 2832f4443750fb11ee827be5491f34f749c9122a Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Fri, 20 Dec 2024 12:30:06 +0300 Subject: [PATCH 291/591] [#1531] metrics: Rename `app_info` metric Signed-off-by: Ekaterina Lebedeva --- internal/metrics/application.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/metrics/application.go b/internal/metrics/application.go index 8bc408ab6..53acf9b7f 100644 --- a/internal/metrics/application.go +++ b/internal/metrics/application.go @@ -12,8 +12,9 @@ type ApplicationInfo struct { func NewApplicationInfo(version string) *ApplicationInfo { appInfo := &ApplicationInfo{ versionValue: metrics.NewGaugeVec(prometheus.GaugeOpts{ - Name: "app_info", - Help: "General information about the application.", + Namespace: namespace, + Name: "app_info", + Help: "General information about the application.", }, []string{"version"}), } appInfo.versionValue.With(prometheus.Labels{"version": version}) From b5b4f78b4925f71e6a774a4b0b7753a6c3833cbe Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 25 Dec 2024 14:14:57 +0300 Subject: [PATCH 292/591] [#1582] adm: Allow using the default account in `deposit-notary` It has never worked, actually. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/notary/notary.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go index fd42d5a4a..6e159f11e 100644 --- a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go +++ b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go @@ -41,7 +41,8 @@ func depositNotary(cmd *cobra.Command, _ []string) error { } accHash := w.GetChangeAddress() - if addr, err := cmd.Flags().GetString(walletAccountFlag); err == nil { + addr, _ := cmd.Flags().GetString(walletAccountFlag) + if addr != "" { accHash, err = address.StringToUint160(addr) if err != nil { return fmt.Errorf("invalid address: %s", addr) From 31d3d299bfd03b9de5b3d3f7e973ea6e3718f98a Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 25 Dec 2024 14:17:28 +0300 Subject: [PATCH 293/591] [#1582] adm: Unify promps for reading a password Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/notary/notary.go | 2 +- cmd/frostfs-adm/internal/modules/storagecfg/root.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go index 6e159f11e..7058818c0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go +++ b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go @@ -54,7 +54,7 @@ func depositNotary(cmd *cobra.Command, _ []string) error { return fmt.Errorf("can't find account for %s", accHash) } - prompt := fmt.Sprintf("Enter password for %s >", address.Uint160ToString(accHash)) + prompt := fmt.Sprintf("Enter password for %s > ", address.Uint160ToString(accHash)) pass, err := input.ReadPassword(prompt) if err != nil { return fmt.Errorf("can't get password: %v", err) diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/root.go b/cmd/frostfs-adm/internal/modules/storagecfg/root.go index 127272da5..8e6a8354e 100644 --- a/cmd/frostfs-adm/internal/modules/storagecfg/root.go +++ b/cmd/frostfs-adm/internal/modules/storagecfg/root.go @@ -105,7 +105,7 @@ func storageConfig(cmd *cobra.Command, args []string) { fatalOnErr(errors.New("can't find account in wallet")) } - c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Account password for %s: ", c.Wallet.Account)) + c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account)) fatalOnErr(err) err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams()) From ea868e09f84e0be28acb6c403006900f68ad8bf0 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 25 Dec 2024 14:21:36 +0300 Subject: [PATCH 294/591] [#1582] adm: Use int64 type and the default value for `--till` flag Signed-off-by: Evgenii Stratonikov --- .../internal/modules/morph/notary/notary.go | 14 +++----------- .../internal/modules/morph/notary/root.go | 2 +- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go index 7058818c0..3435926c0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/notary/notary.go +++ b/cmd/frostfs-adm/internal/modules/morph/notary/notary.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "math/big" - "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" @@ -74,16 +73,9 @@ func depositNotary(cmd *cobra.Command, _ []string) error { return err } - till := int64(defaultNotaryDepositLifetime) - tillStr, err := cmd.Flags().GetString(notaryDepositTillFlag) - if err != nil { - return err - } - if tillStr != "" { - till, err = strconv.ParseInt(tillStr, 10, 64) - if err != nil || till <= 0 { - return errInvalidNotaryDepositLifetime - } + till, _ := cmd.Flags().GetInt64(notaryDepositTillFlag) + if till <= 0 { + return errInvalidNotaryDepositLifetime } return transferGas(cmd, acc, accHash, gasAmount, till) diff --git a/cmd/frostfs-adm/internal/modules/morph/notary/root.go b/cmd/frostfs-adm/internal/modules/morph/notary/root.go index 497ff8ea1..d7be2e503 100644 --- a/cmd/frostfs-adm/internal/modules/morph/notary/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/notary/root.go @@ -20,7 +20,7 @@ func initDepositoryNotaryCmd() { DepositCmd.Flags().String(commonflags.StorageWalletFlag, "", "Path to storage node wallet") DepositCmd.Flags().String(walletAccountFlag, "", "Wallet account address") DepositCmd.Flags().String(commonflags.RefillGasAmountFlag, "", "Amount of GAS to deposit") - DepositCmd.Flags().String(notaryDepositTillFlag, "", "Notary deposit duration in blocks") + DepositCmd.Flags().Int64(notaryDepositTillFlag, defaultNotaryDepositLifetime, "Notary deposit duration in blocks") } func init() { From f6c5222952ebce32569b4ff3801943895b56258f Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 25 Dec 2024 12:15:56 +0300 Subject: [PATCH 295/591] [#1581] services/session: Use user.ID.EncodeToString() where possible gopatch: ``` @@ var id expression @@ -base58.Encode(id.WalletBytes()) +id.EncodeToString() ``` Signed-off-by: Evgenii Stratonikov --- pkg/services/session/storage/temporary/executor.go | 2 +- pkg/services/session/storage/temporary/storage.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/services/session/storage/temporary/executor.go b/pkg/services/session/storage/temporary/executor.go index d531b25cb..423e579d7 100644 --- a/pkg/services/session/storage/temporary/executor.go +++ b/pkg/services/session/storage/temporary/executor.go @@ -38,7 +38,7 @@ func (s *TokenStore) Create(_ context.Context, body *session.CreateRequestBody) s.mtx.Lock() s.tokens[key{ tokenID: base58.Encode(uidBytes), - ownerID: base58.Encode(id.WalletBytes()), + ownerID: id.EncodeToString(), }] = storage.NewPrivateToken(&sk.PrivateKey, body.GetExpiration()) s.mtx.Unlock() diff --git a/pkg/services/session/storage/temporary/storage.go b/pkg/services/session/storage/temporary/storage.go index 9ae9db9dc..c9da6b842 100644 --- a/pkg/services/session/storage/temporary/storage.go +++ b/pkg/services/session/storage/temporary/storage.go @@ -41,7 +41,7 @@ func (s *TokenStore) Get(ownerID user.ID, tokenID []byte) *storage.PrivateToken s.mtx.RLock() t := s.tokens[key{ tokenID: base58.Encode(tokenID), - ownerID: base58.Encode(ownerID.WalletBytes()), + ownerID: ownerID.EncodeToString(), }] s.mtx.RUnlock() From ca0a33ea0febe0a44f5ca70a61c92a23b6cbd4d5 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 25 Dec 2024 12:04:08 +0300 Subject: [PATCH 296/591] [#465] objsvc: Set NETMAP_EPOCH xheader for auxiliary requests Signed-off-by: Evgenii Stratonikov --- pkg/services/object/internal/client/client.go | 28 +++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/pkg/services/object/internal/client/client.go b/pkg/services/object/internal/client/client.go index 2c405070d..3e8832640 100644 --- a/pkg/services/object/internal/client/client.go +++ b/pkg/services/object/internal/client/client.go @@ -7,9 +7,11 @@ import ( "errors" "fmt" "io" + "strconv" coreclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + sessionAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -31,6 +33,8 @@ type commonPrm struct { local bool xHeaders []string + + netmapEpoch uint64 } // SetClient sets base client for ForstFS API communication. @@ -73,6 +77,14 @@ func (x *commonPrm) SetXHeaders(hs []string) { x.xHeaders = hs } +func (x *commonPrm) calculateXHeaders() []string { + hs := x.xHeaders + if x.netmapEpoch != 0 { + hs = append(hs, sessionAPI.XHeaderNetmapEpoch, strconv.FormatUint(x.netmapEpoch, 10)) + } + return hs +} + type readPrmCommon struct { commonPrm } @@ -80,8 +92,8 @@ type readPrmCommon struct { // SetNetmapEpoch sets the epoch number to be used to locate the objectSDK. // // By default current epoch on the server will be used. -func (x *readPrmCommon) SetNetmapEpoch(_ uint64) { - // FIXME(@fyrchik): https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/465 +func (x *readPrmCommon) SetNetmapEpoch(epoch uint64) { + x.netmapEpoch = epoch } // GetObjectPrm groups parameters of GetObject operation. @@ -139,7 +151,7 @@ func GetObject(ctx context.Context, prm GetObjectPrm) (*GetObjectRes, error) { prm.ClientParams.Session = prm.tokenSession } - prm.ClientParams.XHeaders = prm.xHeaders + prm.ClientParams.XHeaders = prm.calculateXHeaders() prm.ClientParams.BearerToken = prm.tokenBearer prm.ClientParams.Local = prm.local prm.ClientParams.Key = prm.key @@ -233,7 +245,7 @@ func HeadObject(ctx context.Context, prm HeadObjectPrm) (*HeadObjectRes, error) prm.ClientParams.BearerToken = prm.tokenBearer prm.ClientParams.Local = prm.local - prm.ClientParams.XHeaders = prm.xHeaders + prm.ClientParams.XHeaders = prm.calculateXHeaders() cliRes, err := prm.cli.ObjectHead(ctx, prm.ClientParams) if err == nil { @@ -326,7 +338,7 @@ func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, e prm.ClientParams.Session = prm.tokenSession } - prm.ClientParams.XHeaders = prm.xHeaders + prm.ClientParams.XHeaders = prm.calculateXHeaders() prm.ClientParams.BearerToken = prm.tokenBearer prm.ClientParams.Local = prm.local prm.ClientParams.Length = prm.ln @@ -390,7 +402,7 @@ func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) { defer span.End() prmCli := client.PrmObjectPutInit{ - XHeaders: prm.xHeaders, + XHeaders: prm.calculateXHeaders(), BearerToken: prm.tokenBearer, Session: prm.tokenSession, Local: true, @@ -437,7 +449,7 @@ func PutObjectSingle(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, erro } prmCli := client.PrmObjectPutSingle{ - XHeaders: prm.xHeaders, + XHeaders: prm.calculateXHeaders(), BearerToken: prm.tokenBearer, Session: prm.tokenSession, Local: true, @@ -496,7 +508,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes prm.cliPrm.Local = prm.local prm.cliPrm.Session = prm.tokenSession prm.cliPrm.BearerToken = prm.tokenBearer - prm.cliPrm.XHeaders = prm.xHeaders + prm.cliPrm.XHeaders = prm.calculateXHeaders() prm.cliPrm.Key = prm.key rdr, err := prm.cli.ObjectSearchInit(ctx, prm.cliPrm) From 9cd1bcef06b1c116910f6d9d2e4895332a628b92 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 23 Dec 2024 14:53:04 +0300 Subject: [PATCH 297/591] [#1512] object: Make raw `PutSingle` check status within response Signed-off-by: Airat Arifullin --- pkg/services/object/put/single.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go index 36b0bd54c..19d025ff8 100644 --- a/pkg/services/object/put/single.go +++ b/pkg/services/object/put/single.go @@ -29,6 +29,7 @@ import ( sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/signature" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/tzhash/tz" @@ -353,6 +354,9 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context, err = fmt.Errorf("response verification failed: %w", err) } + st := apistatus.FromStatusV2(resp.GetMetaHeader().GetStatus()) + err = apistatus.ErrFromStatus(st) + return }) From e44782473a737daac7f05b0b491dc756de31391d Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Tue, 24 Dec 2024 21:27:45 +0300 Subject: [PATCH 298/591] [#1512] object: Fix `writePart` for EC-container * Immediatly return after `ObjectAlreadyRemoved` error. Signed-off-by: Airat Arifullin --- pkg/services/object/common/writer/ec.go | 3 +++ pkg/services/object/put/single.go | 1 + 2 files changed, 4 insertions(+) diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index fdaa569da..94bcf6a32 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" + clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode" @@ -274,6 +275,8 @@ func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx err := e.putECPartToNode(ctx, obj, node) if err == nil { return nil + } else if clientSDK.IsErrObjectAlreadyRemoved(err) { + return err } e.Config.Logger.Warn(ctx, logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)), zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx), diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go index 19d025ff8..5219e64d5 100644 --- a/pkg/services/object/put/single.go +++ b/pkg/services/object/put/single.go @@ -352,6 +352,7 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context, err = signature.VerifyServiceMessage(resp) if err != nil { err = fmt.Errorf("response verification failed: %w", err) + return } st := apistatus.FromStatusV2(resp.GetMetaHeader().GetStatus()) From 0da998ef50472d8de4589bfd9105ce1892636c94 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Wed, 25 Dec 2024 22:12:41 +0300 Subject: [PATCH 299/591] [#1583] metabase: Skip expired objects in `ListWithCursor` Signed-off-by: Aleksey Savchuk --- pkg/local_object_storage/metabase/list.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index a7ff2222f..375d1cb1a 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -87,7 +87,8 @@ type CountAliveObjectsInContainerPrm struct { } // ListWithCursor lists physical objects available in metabase starting from -// cursor. Includes objects of all types. Does not include inhumed objects. +// cursor. Includes objects of all types. Does not include inhumed and expired +// objects. // Use cursor value from response for consecutive requests. // // Returns ErrEndOfListing if there are no more objects to return or count @@ -143,6 +144,8 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int, rawAddr := make([]byte, cidSize, addressKeySize) + currEpoch := db.epochState.CurrentEpoch() + loop: for ; name != nil; name, _ = c.Next() { cidRaw, prefix := parseContainerIDWithPrefix(&containerID, name) @@ -167,7 +170,7 @@ loop: if bkt != nil { copy(rawAddr, cidRaw) result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID, - result, count, cursor, threshold) + result, count, cursor, threshold, currEpoch) if err != nil { return nil, nil, err } @@ -212,6 +215,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket limit int, // stop listing at `limit` items in result cursor *Cursor, // start from cursor object threshold bool, // ignore cursor and start immediately + currEpoch uint64, ) ([]objectcore.Info, []byte, *Cursor, error) { if cursor == nil { cursor = new(Cursor) @@ -243,13 +247,19 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket continue } + var o objectSDK.Object + if err := o.Unmarshal(bytes.Clone(v)); err != nil { + return nil, nil, nil, err + } + + expEpoch, hasExpEpoch := hasExpirationEpoch(&o) + if !objectLocked(bkt.Tx(), cnt, obj) && hasExpEpoch && expEpoch < currEpoch { + continue + } + var isLinkingObj bool var ecInfo *objectcore.ECInfo if objType == objectSDK.TypeRegular { - var o objectSDK.Object - if err := o.Unmarshal(bytes.Clone(v)); err != nil { - return nil, nil, nil, err - } isLinkingObj = isLinkObject(&o) ecHeader := o.ECHeader() if ecHeader != nil { From fa08bfa553ee013b65076c38146f83134ae2f06c Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Wed, 25 Dec 2024 22:16:11 +0300 Subject: [PATCH 300/591] [#1583] metabase/test: Update `TestLisObjectsWithCursor` Update this test following recent changes to ensure that `(*DB).ListWithCursor` skips expired objects. Signed-off-by: Aleksey Savchuk --- .../metabase/list_test.go | 33 ++++++++++++++++--- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index 1d8beb175..817b22010 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -3,14 +3,17 @@ package meta_test import ( "context" "errors" + "strconv" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "github.com/stretchr/testify/require" "go.etcd.io/bbolt" @@ -71,14 +74,16 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) { func TestLisObjectsWithCursor(t *testing.T) { t.Parallel() - db := newDB(t) - defer func() { require.NoError(t, db.Close(context.Background())) }() - const ( + currEpoch = 100 + expEpoch = currEpoch - 1 containers = 5 - total = containers * 4 // regular + ts + child + lock + total = containers * 6 // regular + ts + child + lock + non-expired regular + locked expired ) + db := newDB(t, meta.WithEpochState(epochState{currEpoch})) + defer func() { require.NoError(t, db.Close(context.Background())) }() + expected := make([]object.Info, 0, total) // fill metabase with objects @@ -127,6 +132,26 @@ func TestLisObjectsWithCursor(t *testing.T) { err = putBig(db, child) require.NoError(t, err) expected = append(expected, object.Info{Address: object.AddressOf(child), Type: objectSDK.TypeRegular}) + + // add expired object (do not include into expected) + obj = testutil.GenerateObjectWithCID(containerID) + testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch)) + require.NoError(t, metaPut(db, obj, nil)) + + // add non-expired object (include into expected) + obj = testutil.GenerateObjectWithCID(containerID) + testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(currEpoch)) + require.NoError(t, metaPut(db, obj, nil)) + expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular}) + + // add locked expired object (include into expected) + obj = testutil.GenerateObjectWithCID(containerID) + objID := oidtest.ID() + obj.SetID(objID) + testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.Itoa(expEpoch)) + require.NoError(t, metaPut(db, obj, nil)) + require.NoError(t, db.Lock(context.Background(), containerID, oidtest.ID(), []oid.ID{objID})) + expected = append(expected, object.Info{Address: object.AddressOf(obj), Type: objectSDK.TypeRegular}) } t.Run("success with various count", func(t *testing.T) { From 6fe34d266a2cd8ff35929053610cb36851263cb4 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Wed, 25 Dec 2024 15:32:24 +0300 Subject: [PATCH 301/591] [#1577] morph: Fix typo Signed-off-by: Ekaterina Lebedeva --- pkg/morph/client/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index 37599e696..01fcc98e5 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -210,7 +210,7 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F // TestInvokeIterator invokes contract method returning an iterator and executes cb on each element. // If cb returns an error, the session is closed and this error is returned as-is. -// If the remove neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned. +// If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned. // batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created. // The default batchSize is 100, the default limit from neo-go. func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error { From 242f0095d08e54124807f14b30f5ee3adfe9eb6b Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Wed, 25 Dec 2024 14:25:29 +0300 Subject: [PATCH 302/591] [#1577] container: Reduce iterations through container list * Separated iteration through container ids from `ContainersOf()` so that it could be reused. * When listing containers we used to iterate through the the whole list of containers twice: first when reading from a contract, then when sending them. Now we can send batches of containers when reading from the contract. Signed-off-by: Ekaterina Lebedeva --- cmd/frostfs-node/container.go | 5 +++ pkg/morph/client/container/containers_of.go | 50 +++++++++++---------- pkg/morph/client/container/list.go | 44 +++++++++++------- pkg/services/container/morph/executor.go | 48 ++++++++++++++------ 4 files changed, 94 insertions(+), 53 deletions(-) diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index fb2550a03..be0acf738 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -222,6 +222,7 @@ type morphContainerReader struct { lister interface { ContainersOf(*user.ID) ([]cid.ID, error) + IterateContainersOf(*user.ID, func(cid.ID) error) error } } @@ -237,6 +238,10 @@ func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) { return x.lister.ContainersOf(id) } +func (x *morphContainerReader) IterateContainersOf(id *user.ID, processCID func(cid.ID) error) error { + return x.lister.IterateContainersOf(id, processCID) +} + type morphContainerWriter struct { neoClient *cntClient.Client } diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go index 5fe15be0d..6381a14c0 100644 --- a/pkg/morph/client/container/containers_of.go +++ b/pkg/morph/client/container/containers_of.go @@ -2,9 +2,7 @@ package container import ( "errors" - "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" @@ -16,27 +14,37 @@ import ( // // If remote RPC does not support neo-go session API, fallback to List() method. func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) { - var rawID []byte + var cidList []cid.ID + var err error + cb := func(id cid.ID) error { + cidList = append(cidList, id) + return nil + } + if err = c.IterateContainersOf(idUser, cb); err != nil { + return nil, err + } + return cidList, nil +} + +// iterateContainers iterates over a list of container identifiers +// belonging to the specified user of FrostFS system and executes +// `cb` on each element. If idUser is nil, calls it on the list of all containers. +func (c *Client) IterateContainersOf(idUser *user.ID, cb func(item cid.ID) error) error { + var rawID []byte if idUser != nil { rawID = idUser.WalletBytes() } - var cidList []cid.ID - cb := func(item stackitem.Item) error { - rawID, err := client.BytesFromStackItem(item) + cnrHash := c.client.ContractAddress() + itemCb := func(item stackitem.Item) error { + id, err := getCIDfromStackItem(item) if err != nil { - return fmt.Errorf("get byte array from stack item (%s): %w", containersOfMethod, err) + return err } - - var id cid.ID - - err = id.Decode(rawID) - if err != nil { - return fmt.Errorf("decode container ID: %w", err) + if err = cb(id); err != nil { + return err } - - cidList = append(cidList, id) return nil } @@ -49,14 +57,10 @@ func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) { // 512 is big enough value and it is beautiful. const batchSize = 512 - cnrHash := c.client.ContractAddress() - err := c.client.Morph().TestInvokeIterator(cb, batchSize, cnrHash, containersOfMethod, rawID) - if err != nil { - if errors.Is(err, unwrap.ErrNoSessionID) { - return c.list(idUser) - } - return nil, err + err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID) + if err != nil && errors.Is(err, unwrap.ErrNoSessionID) { + return c.iterate(idUser, cb) } - return cidList, nil + return err } diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go index d9719aedd..78ea8278f 100644 --- a/pkg/morph/client/container/list.go +++ b/pkg/morph/client/container/list.go @@ -6,15 +6,16 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" ) -// list returns a list of container identifiers belonging +// iterate iterates through a list of container identifiers belonging // to the specified user of FrostFS system. The list is composed // through Container contract call. // -// Returns the identifiers of all FrostFS containers if pointer +// Iterates through the identifiers of all FrostFS containers if pointer // to user identifier is nil. -func (c *Client) list(idUser *user.ID) ([]cid.ID, error) { +func (c *Client) iterate(idUser *user.ID, cb func(cid.ID) error) error { var rawID []byte if idUser != nil { @@ -27,32 +28,41 @@ func (c *Client) list(idUser *user.ID) ([]cid.ID, error) { res, err := c.client.TestInvoke(prm) if err != nil { - return nil, fmt.Errorf("test invoke (%s): %w", listMethod, err) + return fmt.Errorf("test invoke (%s): %w", listMethod, err) } else if ln := len(res); ln != 1 { - return nil, fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln) + return fmt.Errorf("unexpected stack item count (%s): %d", listMethod, ln) } res, err = client.ArrayFromStackItem(res[0]) if err != nil { - return nil, fmt.Errorf("get stack item array from stack item (%s): %w", listMethod, err) + return fmt.Errorf("get stack item array from stack item (%s): %w", listMethod, err) } - cidList := make([]cid.ID, 0, len(res)) for i := range res { - rawID, err := client.BytesFromStackItem(res[i]) + id, err := getCIDfromStackItem(res[i]) if err != nil { - return nil, fmt.Errorf("get byte array from stack item (%s): %w", listMethod, err) + return err } - var id cid.ID - - err = id.Decode(rawID) - if err != nil { - return nil, fmt.Errorf("decode container ID: %w", err) + if err = cb(id); err != nil { + return err } - - cidList = append(cidList, id) } - return cidList, nil + return nil +} + +func getCIDfromStackItem(item stackitem.Item) (cid.ID, error) { + rawID, err := client.BytesFromStackItem(item) + if err != nil { + return cid.ID{}, fmt.Errorf("get byte array from stack item (%s): %w", listMethod, err) + } + + var id cid.ID + + err = id.Decode(rawID) + if err != nil { + return cid.ID{}, fmt.Errorf("decode container ID: %w", err) + } + return id, nil } diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go index e9d1606f1..cadf92e19 100644 --- a/pkg/services/container/morph/executor.go +++ b/pkg/services/container/morph/executor.go @@ -30,6 +30,7 @@ type Reader interface { // to the specified user of FrostFS system. Returns the identifiers // of all FrostFS containers if pointer to owner identifier is nil. ContainersOf(*user.ID) ([]cid.ID, error) + IterateContainersOf(*user.ID, func(cid.ID) error) error } // Writer is an interface of container storage updater. @@ -201,7 +202,7 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) return res, nil } -func (s *morphExecutor) ListStream(_ context.Context, req *container.ListStreamRequest, stream containerSvc.ListStream) error { +func (s *morphExecutor) ListStream(ctx context.Context, req *container.ListStreamRequest, stream containerSvc.ListStream) error { body := req.GetBody() idV2 := body.GetOwnerID() if idV2 == nil { @@ -215,20 +216,41 @@ func (s *morphExecutor) ListStream(_ context.Context, req *container.ListStreamR return fmt.Errorf("invalid user ID: %w", err) } - cnrs, err := s.rdr.ContainersOf(&id) - if err != nil { - return err - } - - cidList := make([]refs.ContainerID, len(cnrs)) - for i := range cnrs { - cnrs[i].WriteToV2(&cidList[i]) - } - resBody := new(container.ListStreamResponseBody) - resBody.SetContainerIDs(cidList) r := new(container.ListStreamResponse) r.SetBody(resBody) - return stream.Send(r) + var cidList []refs.ContainerID + + // Amount of containers to send at once. + const batchSize = 1000 + + processCID := func(id cid.ID) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var refID refs.ContainerID + id.WriteToV2(&refID) + cidList = append(cidList, refID) + if len(cidList) == batchSize { + r.GetBody().SetContainerIDs(cidList) + cidList = cidList[:0] + return stream.Send(r) + } + return nil + } + + if err = s.rdr.IterateContainersOf(&id, processCID); err != nil { + return err + } + + if len(cidList) > 0 { + r.GetBody().SetContainerIDs(cidList) + return stream.Send(r) + } + + return nil } From c0221d76e6534a7c264c24a7b1c6af73dad2b5ed Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Fri, 27 Dec 2024 15:31:06 +0300 Subject: [PATCH 303/591] [#1577] node/container: Fix typo Signed-off-by: Ekaterina Lebedeva --- cmd/frostfs-node/config/container/container.go | 2 +- pkg/morph/client/container/containers_of.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-node/config/container/container.go b/cmd/frostfs-node/config/container/container.go index b0b8043d6..1cd64a6f8 100644 --- a/cmd/frostfs-node/config/container/container.go +++ b/cmd/frostfs-node/config/container/container.go @@ -6,7 +6,7 @@ const ( subsection = "container" listStreamSubsection = "list_stream" - // ContainerBatchSizeDefault represents he maximum amount of containers to send via stream at once. + // ContainerBatchSizeDefault represents the maximum amount of containers to send via stream at once. ContainerBatchSizeDefault = 1000 ) diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go index 6381a14c0..074a586be 100644 --- a/pkg/morph/client/container/containers_of.go +++ b/pkg/morph/client/container/containers_of.go @@ -36,7 +36,6 @@ func (c *Client) IterateContainersOf(idUser *user.ID, cb func(item cid.ID) error rawID = idUser.WalletBytes() } - cnrHash := c.client.ContractAddress() itemCb := func(item stackitem.Item) error { id, err := getCIDfromStackItem(item) if err != nil { @@ -57,6 +56,7 @@ func (c *Client) IterateContainersOf(idUser *user.ID, cb func(item cid.ID) error // 512 is big enough value and it is beautiful. const batchSize = 512 + cnrHash := c.client.ContractAddress() err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID) if err != nil && errors.Is(err, unwrap.ErrNoSessionID) { return c.iterate(idUser, cb) From d7fcc5ce304c0c8e971c4edccf778cf6b8693d52 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Sat, 28 Dec 2024 09:03:07 +0300 Subject: [PATCH 304/591] [#1586] objsvc: Allow to send search response in multiple messages Previously, `ln` was only set once, so search has really worked for small number of objects. Fix panic: ``` panic: runtime error: slice bounds out of range [:43690] with capacity 21238 goroutine 6859775 [running]: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object.(*searchStreamMsgSizeCtrl).Send(0xc001eec8d0, 0xc005734000) git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/transport_splitter.go:173 +0x1f0 git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search/v2.(*streamWriter).WriteIDs(0xc000520320, {0xc00eb1a000, 0x4fd9c, 0x7fd6475a9a68?}) git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search/v2/streamer.go:28 +0x155 git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search.(*uniqueIDWriter).WriteIDs(0xc001386420, {0xc00eb1a000?, 0xc0013ea9c0?, 0x113eef3?}) git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search/util.go:62 +0x202 git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search.(*execCtx).writeIDList(0xc00011aa38?, {0xc00eb1a000?, 0xc001eec9f0?, 0xc0008f4380?}) git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search/exec.go:68 +0x91 git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search.(*execCtx).executeLocal(0xc0008f4380, {0x176c538, 0xc001eec9f0}) git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/search/local.go:18 +0x16b ``` Signed-off-by: Evgenii Stratonikov --- pkg/services/object/transport_splitter.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/services/object/transport_splitter.go b/pkg/services/object/transport_splitter.go index 0b3676edb..b446d3605 100644 --- a/pkg/services/object/transport_splitter.go +++ b/pkg/services/object/transport_splitter.go @@ -162,13 +162,13 @@ func (s *searchStreamMsgSizeCtrl) Send(resp *object.SearchResponse) error { var newResp *object.SearchResponse - for ln := uint64(len(ids)); ; { + for { if newResp == nil { newResp = new(object.SearchResponse) newResp.SetBody(body) } - cut := min(s.addrAmount, ln) + cut := min(s.addrAmount, uint64(len(ids))) body.SetIDList(ids[:cut]) newResp.SetMetaHeader(resp.GetMetaHeader()) From cddcd73f04cbb1c1894e45dab62dc2070f3931d5 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Sat, 28 Dec 2024 14:36:18 +0300 Subject: [PATCH 305/591] [#1590] adm: Make `--account` flag required in `proxy-*` commands Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/proxy/root.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go index 1854c8d2b..5f07e5862 100644 --- a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go @@ -30,12 +30,14 @@ var ( func initProxyAddAccount() { AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string") + _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag) AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func initProxyRemoveAccount() { RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string") + _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag) RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } From dc410fca901dc0f4628fb3d64d83be32525414bd Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Sat, 28 Dec 2024 14:37:35 +0300 Subject: [PATCH 306/591] [#1590] adm: Accept many accounts in `proxy-*` commands Signed-off-by: Evgenii Stratonikov --- .../internal/modules/morph/proxy/proxy.go | 31 +++++++++++++------ .../internal/modules/morph/proxy/root.go | 4 +-- 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go index cb575b657..24cda45a6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go +++ b/cmd/frostfs-adm/internal/modules/morph/proxy/proxy.go @@ -20,23 +20,32 @@ const ( accountAddressFlag = "account" ) +func parseAddresses(cmd *cobra.Command) []util.Uint160 { + var addrs []util.Uint160 + + accs, _ := cmd.Flags().GetStringArray(accountAddressFlag) + for _, acc := range accs { + addr, err := address.StringToUint160(acc) + commonCmd.ExitOnErr(cmd, "invalid account: %w", err) + + addrs = append(addrs, addr) + } + return addrs +} + func addProxyAccount(cmd *cobra.Command, _ []string) { - acc, _ := cmd.Flags().GetString(accountAddressFlag) - addr, err := address.StringToUint160(acc) - commonCmd.ExitOnErr(cmd, "invalid account: %w", err) - err = processAccount(cmd, addr, "addAccount") + addrs := parseAddresses(cmd) + err := processAccount(cmd, addrs, "addAccount") commonCmd.ExitOnErr(cmd, "processing error: %w", err) } func removeProxyAccount(cmd *cobra.Command, _ []string) { - acc, _ := cmd.Flags().GetString(accountAddressFlag) - addr, err := address.StringToUint160(acc) - commonCmd.ExitOnErr(cmd, "invalid account: %w", err) - err = processAccount(cmd, addr, "removeAccount") + addrs := parseAddresses(cmd) + err := processAccount(cmd, addrs, "removeAccount") commonCmd.ExitOnErr(cmd, "processing error: %w", err) } -func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error { +func processAccount(cmd *cobra.Command, addrs []util.Uint160, method string) error { wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper()) if err != nil { return fmt.Errorf("can't initialize context: %w", err) @@ -54,7 +63,9 @@ func processAccount(cmd *cobra.Command, addr util.Uint160, method string) error } bw := io.NewBufBinWriter() - emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr) + for _, addr := range addrs { + emit.AppCall(bw.BinWriter, proxyHash, method, callflag.All, addr) + } if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { return err diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go index 5f07e5862..ad89af2b5 100644 --- a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go @@ -29,14 +29,14 @@ var ( func initProxyAddAccount() { AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string") + AddAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag) AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } func initProxyRemoveAccount() { RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) - RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string") + RemoveAccountCmd.Flags().StringArray(accountAddressFlag, nil, "Wallet address string") _ = AddAccountCmd.MarkFlagRequired(accountAddressFlag) RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) } From 5ccb3394b49c29ea0e9186849bfc9a909e74f5b8 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 9 Jan 2025 14:30:17 +0300 Subject: [PATCH 307/591] [#1592] go.mod: Update sdk-go Signed-off-by: Evgenii Stratonikov --- go.mod | 32 +++++++++++++-------------- go.sum | 68 +++++++++++++++++++++++++++++++--------------------------- 2 files changed, 52 insertions(+), 48 deletions(-) diff --git a/go.mod b/go.mod index 8f4053872..267e411fb 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241210104938-c4463df8d467 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250109084609-328d214d2d76 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 @@ -27,7 +27,7 @@ require ( github.com/klauspost/compress v1.17.4 github.com/mailru/easyjson v0.7.7 github.com/mr-tron/base58 v1.2.0 - github.com/multiformats/go-multiaddr v0.12.1 + github.com/multiformats/go-multiaddr v0.14.0 github.com/nspcc-dev/neo-go v0.106.3 github.com/olekukonko/tablewriter v0.0.5 github.com/panjf2000/ants/v2 v2.9.0 @@ -40,15 +40,15 @@ require ( github.com/ssgreg/journald v1.0.0 github.com/stretchr/testify v1.9.0 go.etcd.io/bbolt v1.3.10 - go.opentelemetry.io/otel v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/sync v0.7.0 - golang.org/x/sys v0.22.0 - golang.org/x/term v0.21.0 - google.golang.org/grpc v1.66.2 - google.golang.org/protobuf v1.34.2 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 + golang.org/x/term v0.27.0 + google.golang.org/grpc v1.69.2 + google.golang.org/protobuf v1.36.1 gopkg.in/yaml.v3 v3.0.1 ) @@ -119,15 +119,15 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/sdk v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/net v0.26.0 // indirect - golang.org/x/text v0.16.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/text v0.21.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect gopkg.in/ini.v1 v1.67.0 // indirect lukechampine.com/blake3 v1.2.1 // indirect rsc.io/tmplfunc v0.0.3 // indirect diff --git a/go.sum b/go.sum index d63396202..935d3c56a 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241210104938-c4463df8d467 h1:MH9uHZFZNyUCL+YChiDcVeXPjhTDcFDeoGr8Mc8NY9M= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241210104938-c4463df8d467/go.mod h1:eoK7+KZQ9GJxbzIs6vTnoUJqFDppavInLRHaN4MYgZg= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250109084609-328d214d2d76 h1:wzvSJIiS+p9qKfl3eg1oH6qlrjaEWiqTc/iMDKG3Ml4= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250109084609-328d214d2d76/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= @@ -106,6 +106,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -188,8 +190,8 @@ github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aG github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.12.1 h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk= -github.com/multiformats/go-multiaddr v0.12.1/go.mod h1:7mPkiBMmLeFipt+nNSq9pHZUeJSt8lHBgH6yhj0YQzE= +github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= +github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= @@ -290,20 +292,22 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -318,8 +322,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -339,16 +343,16 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -375,16 +379,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -392,8 +396,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -406,12 +410,12 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= -google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= +google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -420,8 +424,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= From 3900b92927bb0343cf7282bf91e817f8cd44f827 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 26 Dec 2024 14:12:35 +0300 Subject: [PATCH 308/591] Revert "[#1492] metabase: Ensure Unmarshal() is called on a cloned slice" This reverts commit 8ed7a676d50e24489e2abeb5269d6eb3332df1f8. Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/metabase/exists.go | 3 +-- pkg/local_object_storage/metabase/get.go | 11 +++++------ pkg/local_object_storage/metabase/iterators.go | 3 +-- pkg/local_object_storage/metabase/list.go | 4 ++-- pkg/local_object_storage/metabase/put.go | 3 +-- 5 files changed, 10 insertions(+), 14 deletions(-) diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go index 3133c5480..962108a76 100644 --- a/pkg/local_object_storage/metabase/exists.go +++ b/pkg/local_object_storage/metabase/exists.go @@ -1,7 +1,6 @@ package meta import ( - "bytes" "context" "fmt" "time" @@ -227,7 +226,7 @@ func getSplitInfo(tx *bbolt.Tx, cnr cid.ID, key []byte) (*objectSDK.SplitInfo, e splitInfo := objectSDK.NewSplitInfo() - err := splitInfo.Unmarshal(bytes.Clone(rawSplitInfo)) + err := splitInfo.Unmarshal(rawSplitInfo) if err != nil { return nil, fmt.Errorf("unmarshal split info from root index: %w", err) } diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go index af274b245..615add1af 100644 --- a/pkg/local_object_storage/metabase/get.go +++ b/pkg/local_object_storage/metabase/get.go @@ -1,7 +1,6 @@ package meta import ( - "bytes" "context" "fmt" "time" @@ -112,7 +111,7 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b // check in primary index data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key) if len(data) != 0 { - return obj, obj.Unmarshal(bytes.Clone(data)) + return obj, obj.Unmarshal(data) } data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key) @@ -123,13 +122,13 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b // if not found then check in tombstone index data = getFromBucket(tx, tombstoneBucketName(cnr, bucketName), key) if len(data) != 0 { - return obj, obj.Unmarshal(bytes.Clone(data)) + return obj, obj.Unmarshal(data) } // if not found then check in locker index data = getFromBucket(tx, bucketNameLockers(cnr, bucketName), key) if len(data) != 0 { - return obj, obj.Unmarshal(bytes.Clone(data)) + return obj, obj.Unmarshal(data) } // if not found then check if object is a virtual @@ -185,7 +184,7 @@ func getVirtualObject(tx *bbolt.Tx, cnr cid.ID, key []byte, raw bool) (*objectSD child := objectSDK.New() - err = child.Unmarshal(bytes.Clone(data)) + err = child.Unmarshal(data) if err != nil { return nil, fmt.Errorf("unmarshal child with parent: %w", err) } @@ -219,7 +218,7 @@ func getECInfoError(tx *bbolt.Tx, cnr cid.ID, data []byte) error { objData := getFromBucket(tx, primaryBucketName(cnr, make([]byte, bucketKeySize)), key) if len(objData) != 0 { obj := objectSDK.New() - if err := obj.Unmarshal(bytes.Clone(objData)); err != nil { + if err := obj.Unmarshal(objData); err != nil { return err } chunk := objectSDK.ECChunk{} diff --git a/pkg/local_object_storage/metabase/iterators.go b/pkg/local_object_storage/metabase/iterators.go index 0d438e102..9cccd7dad 100644 --- a/pkg/local_object_storage/metabase/iterators.go +++ b/pkg/local_object_storage/metabase/iterators.go @@ -1,7 +1,6 @@ package meta import ( - "bytes" "context" "errors" "strconv" @@ -130,7 +129,7 @@ func iteratePhyObjects(tx *bbolt.Tx, f func(cid.ID, oid.ID, *objectSDK.Object) e } return b.ForEach(func(k, v []byte) error { - if oid.Decode(k) == nil && obj.Unmarshal(bytes.Clone(v)) == nil { + if oid.Decode(k) == nil && obj.Unmarshal(v) == nil { return f(cid, oid, obj) } diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index 375d1cb1a..f488c3ced 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -248,7 +248,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket } var o objectSDK.Object - if err := o.Unmarshal(bytes.Clone(v)); err != nil { + if err := o.Unmarshal(v); err != nil { return nil, nil, nil, err } @@ -423,7 +423,7 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, p var ecInfo *objectcore.ECInfo if prm.ObjectType == objectSDK.TypeRegular { var o objectSDK.Object - if err := o.Unmarshal(bytes.Clone(v)); err != nil { + if err := o.Unmarshal(v); err != nil { return err } isLinkingObj = isLinkObject(&o) diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go index 16918c4d9..5e1bbfe9e 100644 --- a/pkg/local_object_storage/metabase/put.go +++ b/pkg/local_object_storage/metabase/put.go @@ -1,7 +1,6 @@ package meta import ( - "bytes" "context" "encoding/binary" "errors" @@ -320,7 +319,7 @@ func updateSplitInfoIndex(tx *bbolt.Tx, objKey []byte, cnr cid.ID, bucketName [] return si.Marshal() default: oldSI := objectSDK.NewSplitInfo() - if err := oldSI.Unmarshal(bytes.Clone(old)); err != nil { + if err := oldSI.Unmarshal(old); err != nil { return nil, err } si = util.MergeSplitInfo(si, oldSI) From 8a658de0b28835fcf55a7c611eed497dd1e253ee Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 20 Dec 2024 11:40:34 +0300 Subject: [PATCH 309/591] [#1506] ape: Do not create cosigners slice on each contract invocation Signed-off-by: Evgenii Stratonikov --- pkg/ape/contract_storage/proxy.go | 47 +++++++++++++------------------ 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/pkg/ape/contract_storage/proxy.go b/pkg/ape/contract_storage/proxy.go index 953b91a79..8ae3f5203 100644 --- a/pkg/ape/contract_storage/proxy.go +++ b/pkg/ape/contract_storage/proxy.go @@ -31,9 +31,7 @@ type RPCActorProvider interface { type ProxyVerificationContractStorage struct { rpcActorProvider RPCActorProvider - acc *wallet.Account - - proxyScriptHash util.Uint160 + cosigners []actor.SignerAccount policyScriptHash util.Uint160 } @@ -41,12 +39,27 @@ type ProxyVerificationContractStorage struct { var _ ProxyAdaptedContractStorage = (*ProxyVerificationContractStorage)(nil) func NewProxyVerificationContractStorage(rpcActorProvider RPCActorProvider, key *keys.PrivateKey, proxyScriptHash, policyScriptHash util.Uint160) *ProxyVerificationContractStorage { + acc := wallet.NewAccountFromPrivateKey(key) return &ProxyVerificationContractStorage{ rpcActorProvider: rpcActorProvider, - acc: wallet.NewAccountFromPrivateKey(key), - - proxyScriptHash: proxyScriptHash, + cosigners: []actor.SignerAccount{ + { + Signer: transaction.Signer{ + Account: proxyScriptHash, + Scopes: transaction.CustomContracts, + AllowedContracts: []util.Uint160{policyScriptHash}, + }, + Account: notary.FakeContractAccount(proxyScriptHash), + }, + { + Signer: transaction.Signer{ + Account: acc.Contract.ScriptHash(), + Scopes: transaction.CalledByEntry, + }, + Account: acc, + }, + }, policyScriptHash: policyScriptHash, } @@ -64,7 +77,7 @@ func (n *contractStorageActorAdapter) GetRPCInvoker() invoker.RPCInvoke { func (contractStorage *ProxyVerificationContractStorage) newContractStorageActor() (policy_morph.ContractStorageActor, error) { rpcActor := contractStorage.rpcActorProvider.GetRPCActor() - act, err := actor.New(rpcActor, cosigners(contractStorage.acc, contractStorage.proxyScriptHash, contractStorage.policyScriptHash)) + act, err := actor.New(rpcActor, contractStorage.cosigners) if err != nil { return nil, err } @@ -106,23 +119,3 @@ func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(nam } return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).ListMorphRuleChains(name, target) } - -func cosigners(acc *wallet.Account, proxyScriptHash, policyScriptHash util.Uint160) []actor.SignerAccount { - return []actor.SignerAccount{ - { - Signer: transaction.Signer{ - Account: proxyScriptHash, - Scopes: transaction.CustomContracts, - AllowedContracts: []util.Uint160{policyScriptHash}, - }, - Account: notary.FakeContractAccount(proxyScriptHash), - }, - { - Signer: transaction.Signer{ - Account: acc.Contract.ScriptHash(), - Scopes: transaction.CalledByEntry, - }, - Account: acc, - }, - } -} From 85af6bcd5c31f43672d88345f730e7edb2b1de2a Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 20 Dec 2024 11:47:28 +0300 Subject: [PATCH 310/591] [#1506] ape: Use contract reader in ListMorphRuleChains() Signed-off-by: Evgenii Stratonikov --- pkg/ape/contract_storage/proxy.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/pkg/ape/contract_storage/proxy.go b/pkg/ape/contract_storage/proxy.go index 8ae3f5203..8cbb1cce9 100644 --- a/pkg/ape/contract_storage/proxy.go +++ b/pkg/ape/contract_storage/proxy.go @@ -111,11 +111,16 @@ func (contractStorage *ProxyVerificationContractStorage) RemoveMorphRuleChain(na // ListMorphRuleChains lists morph rule chains from Policy contract using both Proxy contract and storage account as consigners. func (contractStorage *ProxyVerificationContractStorage) ListMorphRuleChains(name chain.Name, target engine.Target) ([]*chain.Chain, error) { - // contractStorageActor is reconstructed per each method invocation because RPCActor's (that is, basically, WSClient) connection may get invalidated, but - // ProxyVerificationContractStorage does not manage reconnections. - contractStorageActor, err := contractStorage.newContractStorageActor() - if err != nil { - return nil, err - } - return policy_morph.NewContractStorage(contractStorageActor, contractStorage.policyScriptHash).ListMorphRuleChains(name, target) + rpcActor := contractStorage.rpcActorProvider.GetRPCActor() + inv := &invokerAdapter{Invoker: invoker.New(rpcActor, nil), rpcInvoker: rpcActor} + return policy_morph.NewContractStorageReader(inv, contractStorage.policyScriptHash).ListMorphRuleChains(name, target) +} + +type invokerAdapter struct { + *invoker.Invoker + rpcInvoker invoker.RPCInvoke +} + +func (n *invokerAdapter) GetRPCInvoker() invoker.RPCInvoke { + return n.rpcInvoker } From 198aaebc94495b992a2eb143f501b14fde4ca641 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Sat, 28 Dec 2024 10:20:53 +0300 Subject: [PATCH 311/591] [#1506] morph: Simplify WaitTxHalt() signature Avoid dependency on `morph/client` package because of `InvokeRes`. Make signature resemble `WaitAny()` method of `waiter.Waiter` from neo-go. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/morph.go | 2 +- cmd/frostfs-node/netmap.go | 2 +- pkg/morph/client/waiter.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 5415da12a..657e22389 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -151,7 +151,7 @@ func makeNotaryDeposit(ctx context.Context, c *cfg) (util.Uint256, uint32, error } func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256, vub uint32) error { - if err := c.cfgMorph.client.WaitTxHalt(ctx, client.InvokeRes{Hash: tx, VUB: vub}); err != nil { + if err := c.cfgMorph.client.WaitTxHalt(ctx, vub, tx); err != nil { return err } diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 2eb4cd132..a26fdd798 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -423,7 +423,7 @@ func (c *cfg) updateNetMapState(ctx context.Context, stateSetter func(*nmClient. if err != nil { return err } - return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res) + return c.cfgNetmap.wrapper.Morph().WaitTxHalt(ctx, res.VUB, res.Hash) } type netInfo struct { diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go index 962ec1bc2..5b9d2cbe0 100644 --- a/pkg/morph/client/waiter.go +++ b/pkg/morph/client/waiter.go @@ -33,13 +33,13 @@ func (w *waiterClient) GetVersion() (*result.Version, error) { // WaitTxHalt waits until transaction with the specified hash persists on the blockchain. // It also checks execution result to finish in HALT state. -func (c *Client) WaitTxHalt(ctx context.Context, p InvokeRes) error { +func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) error { w, err := waiter.NewPollingBased(&waiterClient{c: c}) if err != nil { return fmt.Errorf("create tx waiter: %w", err) } - res, err := w.WaitAny(ctx, p.VUB, p.Hash) + res, err := w.WaitAny(ctx, vub, h) if err != nil { return fmt.Errorf("wait until tx persists: %w", err) } From f7e75b13b0b3796852d061c06a4d40cc6f5501d7 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 20 Dec 2024 15:01:44 +0300 Subject: [PATCH 312/591] [#1506] ape_manager: Await tx persist before returning response Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/apemanager.go | 1 + pkg/services/apemanager/executor.go | 27 ++++++++++++++++++++++----- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/cmd/frostfs-node/apemanager.go b/cmd/frostfs-node/apemanager.go index de3aed660..e761a1b14 100644 --- a/cmd/frostfs-node/apemanager.go +++ b/cmd/frostfs-node/apemanager.go @@ -19,6 +19,7 @@ func initAPEManagerService(c *cfg) { c.cfgObject.cfgAccessPolicyEngine.policyContractHash) execsvc := apemanager.New(c.cfgObject.cnrSource, contractStorage, + c.cfgMorph.client, apemanager.WithLogger(c.log)) sigsvc := apemanager.NewSignService(&c.key.PrivateKey, execsvc) auditSvc := apemanager.NewAuditService(sigsvc, c.log, c.audit) diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go index cc792e23d..9d8f665af 100644 --- a/pkg/services/apemanager/executor.go +++ b/pkg/services/apemanager/executor.go @@ -22,6 +22,7 @@ import ( policy_engine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" "github.com/mr-tron/base58/base58" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" "go.uber.org/zap" ) @@ -34,6 +35,8 @@ type cfg struct { type Service struct { cfg + waiter Waiter + cnrSrc containercore.Source contractStorage ape_contract.ProxyAdaptedContractStorage @@ -41,11 +44,17 @@ type Service struct { type Option func(*cfg) -func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, opts ...Option) *Service { +type Waiter interface { + WaitTxHalt(context.Context, uint32, util.Uint256) error +} + +func New(cnrSrc containercore.Source, contractStorage ape_contract.ProxyAdaptedContractStorage, waiter Waiter, opts ...Option) *Service { s := &Service{ cnrSrc: cnrSrc, contractStorage: contractStorage, + + waiter: waiter, } for i := range opts { @@ -84,7 +93,7 @@ func (s *Service) validateContainerTargetRequest(cid string, pubKey *keys.Public return nil } -func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) { +func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainRequest) (*apemanagerV2.AddChainResponse, error) { pub, err := getSignaturePublicKey(req.GetVerificationHeader()) if err != nil { return nil, err @@ -116,7 +125,11 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) return nil, fmt.Errorf("unsupported target type: %s", targetType) } - if _, _, err = s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain); err != nil { + txHash, vub, err := s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain) + if err != nil { + return nil, err + } + if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil { return nil, err } @@ -129,7 +142,7 @@ func (s *Service) AddChain(_ context.Context, req *apemanagerV2.AddChainRequest) return resp, nil } -func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) { +func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChainRequest) (*apemanagerV2.RemoveChainResponse, error) { pub, err := getSignaturePublicKey(req.GetVerificationHeader()) if err != nil { return nil, err @@ -148,7 +161,11 @@ func (s *Service) RemoveChain(_ context.Context, req *apemanagerV2.RemoveChainRe return nil, fmt.Errorf("unsupported target type: %s", targetType) } - if _, _, err = s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()); err != nil { + txHash, vub, err := s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()) + if err != nil { + return nil, err + } + if err := s.waiter.WaitTxHalt(ctx, vub, txHash); err != nil { return nil, err } From ceac1c870915898770603208de2c1cb2d9a23db0 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Thu, 9 Jan 2025 20:52:24 +0300 Subject: [PATCH 313/591] [#1594] dev: Remove unused parameter 'FROSTFS_MORPH_INACTIVITY_TIMEOUT' Signed-off-by: Alexander Chuprov --- dev/.vscode-example/launch.json | 4 ---- 1 file changed, 4 deletions(-) diff --git a/dev/.vscode-example/launch.json b/dev/.vscode-example/launch.json index 6abf5ecdc..b68ce4fa3 100644 --- a/dev/.vscode-example/launch.json +++ b/dev/.vscode-example/launch.json @@ -42,7 +42,6 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet01.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8080", @@ -98,7 +97,6 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet02.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8082", @@ -154,7 +152,6 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet03.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8084", @@ -210,7 +207,6 @@ "FROSTFS_MORPH_DIAL_TIMEOUT":"30s", "FROSTFS_MORPH_RPC_ENDPOINT_0_ADDRESS":"ws://127.0.0.1:30333/ws", "FROSTFS_MORPH_RPC_ENDPOINT_0_PRIORITY":"0", - "FROSTFS_MORPH_INACTIVITY_TIMEOUT":"60s", "FROSTFS_NODE_WALLET_PATH":"${workspaceFolder}/dev/storage/wallet04.json", "FROSTFS_NODE_WALLET_PASSWORD":"", "FROSTFS_NODE_ADDRESSES":"127.0.0.1:8086", From 09faca034c089b99da140519ef723e03f2c2238b Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Fri, 10 Jan 2025 14:53:10 +0300 Subject: [PATCH 314/591] [#1593] node: Fix initialization of frostfsid cache Signed-off-by: Alexander Chuprov --- cmd/frostfs-node/container.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index be0acf738..98fea9f41 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -43,7 +43,7 @@ func initContainerService(_ context.Context, c *cfg) { fatalOnErr(err) cacheSize := morphconfig.FrostfsIDCacheSize(c.appCfg) - if cacheSize > 0 { + if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id")) } From a2485637bb91fcf976ce21cfa09af95f12c27c94 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Fri, 10 Jan 2025 15:13:10 +0300 Subject: [PATCH 315/591] [#1593] node/config_example: Add description of morph/cache_ttl=0 behavior Signed-off-by: Alexander Chuprov --- config/example/node.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/config/example/node.yaml b/config/example/node.yaml index a179b4704..c5acf5386 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -79,7 +79,8 @@ contracts: # side chain NEOFS contract script hashes; optional, override values morph: dial_timeout: 30s # timeout for side chain NEO RPC client connection - cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching. + cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). + # Negative value disables caching. A zero value sets the default value. # Default value: block time. It is recommended to have this value less or equal to block time. # Cached entities: containers, container lists, eACL tables. container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache. From 6c51f48aab6932b035a42f1982a531eed8251174 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Mon, 13 Jan 2025 12:52:02 +0300 Subject: [PATCH 316/591] [#1596] metrics: Create public aliases for internal `engine` metrics Signed-off-by: Anton Nikiforov --- pkg/local_object_storage/engine/metrics.go | 44 ++++++---------------- 1 file changed, 11 insertions(+), 33 deletions(-) diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go index 75936206d..963292d83 100644 --- a/pkg/local_object_storage/engine/metrics.go +++ b/pkg/local_object_storage/engine/metrics.go @@ -7,34 +7,12 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" ) -type MetricRegister interface { - AddMethodDuration(method string, d time.Duration) - - SetObjectCounter(shardID, objectType string, v uint64) - AddToObjectCounter(shardID, objectType string, delta int) - - SetMode(shardID string, mode mode.Mode) - - AddToContainerSize(cnrID string, size int64) - DeleteContainerSize(cnrID string) - DeleteContainerCount(cnrID string) - AddToPayloadCounter(shardID string, size int64) - IncErrorCounter(shardID string) - ClearErrorCounter(shardID string) - DeleteShardMetrics(shardID string) - - SetContainerObjectCounter(shardID, contID, objectType string, v uint64) - IncContainerObjectCounter(shardID, contID, objectType string) - SubContainerObjectCounter(shardID, contID, objectType string, v uint64) - - IncRefillObjectsCount(shardID, path string, size int, success bool) - SetRefillPercent(shardID, path string, percent uint32) - SetRefillStatus(shardID, path, status string) - SetEvacuationInProgress(shardID string, value bool) - - WriteCache() metrics.WriteCacheMetrics - GC() metrics.GCMetrics -} +type ( + MetricRegister = metrics.EngineMetrics + GCMetrics = metrics.GCMetrics + WriteCacheMetrics = metrics.WriteCacheMetrics + NullBool = metrics.NullBool +) func elapsed(method string, addFunc func(method string, d time.Duration)) func() { t := time.Now() @@ -76,9 +54,9 @@ type ( ) var ( - _ MetricRegister = noopMetrics{} - _ metrics.WriteCacheMetrics = noopWriteCacheMetrics{} - _ metrics.GCMetrics = noopGCMetrics{} + _ MetricRegister = noopMetrics{} + _ WriteCacheMetrics = noopWriteCacheMetrics{} + _ GCMetrics = noopGCMetrics{} ) func (noopMetrics) AddMethodDuration(string, time.Duration) {} @@ -99,8 +77,8 @@ func (noopMetrics) IncRefillObjectsCount(string, string, int, bool) {} func (noopMetrics) SetRefillPercent(string, string, uint32) {} func (noopMetrics) SetRefillStatus(string, string, string) {} func (noopMetrics) SetEvacuationInProgress(string, bool) {} -func (noopMetrics) WriteCache() metrics.WriteCacheMetrics { return noopWriteCacheMetrics{} } -func (noopMetrics) GC() metrics.GCMetrics { return noopGCMetrics{} } +func (noopMetrics) WriteCache() WriteCacheMetrics { return noopWriteCacheMetrics{} } +func (noopMetrics) GC() GCMetrics { return noopGCMetrics{} } func (noopWriteCacheMetrics) AddMethodDuration(string, string, string, string, bool, time.Duration) {} func (noopWriteCacheMetrics) SetActualCount(string, string, string, uint64) {} From a9f27e074bbf085016f82e1b4802c87ede452283 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Fri, 10 Jan 2025 16:27:08 +0300 Subject: [PATCH 317/591] [#1243] object: Look for X-Headers within origin before APE check * X-Headers can be found in `origin` field of `MetaHeader` if the request has been forwarded from non-container node. Signed-off-by: Airat Arifullin --- pkg/services/object/ape/service.go | 56 +++++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 8 deletions(-) diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go index c6d152e0f..d9594a3fc 100644 --- a/pkg/services/object/ape/service.go +++ b/pkg/services/object/ape/service.go @@ -150,6 +150,11 @@ type putStreamBasicChecker struct { } func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { reqCtx, err := requestContext(ctx) if err != nil { @@ -171,7 +176,7 @@ func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutR ContainerOwner: reqCtx.ContainerOwner, Role: nativeSchemaRole(reqCtx.Role), BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + XHeaders: meta.GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -204,6 +209,11 @@ type patchStreamBasicChecker struct { } func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + if !p.nonFirstSend { p.nonFirstSend = true @@ -226,7 +236,7 @@ func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.Pa ContainerOwner: reqCtx.ContainerOwner, Role: nativeSchemaRole(reqCtx.Role), BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + XHeaders: meta.GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -251,6 +261,11 @@ func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error } func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err @@ -295,7 +310,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + XHeaders: meta.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -304,6 +319,11 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + var cnrID cid.ID if cnrV2 := request.GetBody().GetContainerID(); cnrV2 != nil { if err := cnrID.ReadFromV2(*cnrV2); err != nil { @@ -324,7 +344,7 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + XHeaders: meta.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -334,6 +354,11 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc } func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err @@ -353,7 +378,7 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) ( SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + XHeaders: meta.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -368,6 +393,11 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) ( } func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return toStatusErr(err) @@ -387,7 +417,7 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + XHeaders: meta.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -397,6 +427,11 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G } func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err @@ -416,7 +451,7 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + XHeaders: meta.GetXHeaders(), } resp, err := c.next.GetRangeHash(ctx, request) @@ -431,6 +466,11 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa } func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin + } + cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) if err != nil { return nil, err @@ -451,7 +491,7 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ SenderKey: hex.EncodeToString(reqCtx.SenderKey), ContainerOwner: reqCtx.ContainerOwner, BearerToken: reqCtx.BearerToken, - XHeaders: request.GetMetaHeader().GetXHeaders(), + XHeaders: meta.GetXHeaders(), } if err = c.apeChecker.CheckAPE(ctx, prm); err != nil { From 4d5ae59a52e7ab0ce89e2c5cc1f9c6c88a193de2 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 13 Jan 2025 16:55:43 +0300 Subject: [PATCH 318/591] [#1598] golangci: Enable unconvert linters To drop unnecessary conversions. Signed-off-by: Dmitrii Stepanov --- .golangci.yml | 1 + cmd/frostfs-cli/modules/bearer/generate_override.go | 2 +- cmd/frostfs-node/config.go | 4 ++-- cmd/frostfs-node/config/node/config.go | 4 ++-- cmd/frostfs-node/netmap.go | 2 +- .../blobstor/fstree/fstree_write_generic.go | 2 +- .../blobstor/fstree/fstree_write_linux.go | 2 +- pkg/local_object_storage/blobstor/memstore/memstore.go | 4 ++-- pkg/local_object_storage/engine/evacuate.go | 2 +- pkg/local_object_storage/engine/shards.go | 4 ++-- pkg/local_object_storage/shard/control.go | 2 +- pkg/morph/event/netmap/epoch.go | 2 +- pkg/services/object/put/v2/streamer.go | 2 +- 13 files changed, 17 insertions(+), 16 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 57e3b4494..d0e45aa75 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -89,5 +89,6 @@ linters: - protogetter - intrange - tenv + - unconvert disable-all: true fast: false diff --git a/cmd/frostfs-cli/modules/bearer/generate_override.go b/cmd/frostfs-cli/modules/bearer/generate_override.go index 13fe07995..9632061f1 100644 --- a/cmd/frostfs-cli/modules/bearer/generate_override.go +++ b/cmd/frostfs-cli/modules/bearer/generate_override.go @@ -52,7 +52,7 @@ func genereateAPEOverride(cmd *cobra.Command, _ []string) { outputPath, _ := cmd.Flags().GetString(outputFlag) if outputPath != "" { - err := os.WriteFile(outputPath, []byte(overrideMarshalled), 0o644) + err := os.WriteFile(outputPath, overrideMarshalled, 0o644) commonCmd.ExitOnErr(cmd, "dump error: %w", err) } else { fmt.Print("\n") diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 18d3e2454..3392589bf 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -853,8 +853,8 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID { } func initCfgGRPC() cfgGRPC { - maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload - maxAddrAmount := uint64(maxChunkSize) / addressSize // each address is about 72 bytes + maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload + maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes return cfgGRPC{ maxChunkSize: maxChunkSize, diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go index 4d063245b..969d77396 100644 --- a/cmd/frostfs-node/config/node/config.go +++ b/cmd/frostfs-node/config/node/config.go @@ -198,7 +198,7 @@ func (l PersistentPolicyRulesConfig) Path() string { // // Returns PermDefault if the value is not a positive number. func (l PersistentPolicyRulesConfig) Perm() fs.FileMode { - p := config.UintSafe((*config.Config)(l.cfg), "perm") + p := config.UintSafe(l.cfg, "perm") if p == 0 { p = PermDefault } @@ -210,7 +210,7 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode { // // Returns false if the value is not a boolean. func (l PersistentPolicyRulesConfig) NoSync() bool { - return config.BoolSafe((*config.Config)(l.cfg), "no_sync") + return config.BoolSafe(l.cfg, "no_sync") } // CompatibilityMode returns true if need to run node in compatibility with previous versions mode. diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index a26fdd798..34cd00ac8 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -86,7 +86,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) { } } - s.setControlNetmapStatus(control.NetmapStatus(ctrlNetSt)) + s.setControlNetmapStatus(ctrlNetSt) } // sets the current node state to the given value. Subsequent cfg.bootstrap diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go index 4110ba7d7..07a618b0a 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go @@ -136,6 +136,6 @@ func (w *genericWriter) removeWithCounter(p string, size uint64) error { if err := os.Remove(p); err != nil { return err } - w.fileCounter.Dec(uint64(size)) + w.fileCounter.Dec(size) return nil } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go index 3561c616b..c62654028 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go @@ -114,7 +114,7 @@ func (w *linuxWriter) removeFile(p string, size uint64) error { return logicerr.Wrap(new(apistatus.ObjectNotFound)) } if err == nil { - w.fileCounter.Dec(uint64(size)) + w.fileCounter.Dec(size) } return err } diff --git a/pkg/local_object_storage/blobstor/memstore/memstore.go b/pkg/local_object_storage/blobstor/memstore/memstore.go index 3afef7d18..7ef7e37a4 100644 --- a/pkg/local_object_storage/blobstor/memstore/memstore.go +++ b/pkg/local_object_storage/blobstor/memstore/memstore.go @@ -133,11 +133,11 @@ func (s *memstoreImpl) Iterate(_ context.Context, req common.IteratePrm) (common elem := common.IterationElement{ ObjectData: v, } - if err := elem.Address.DecodeString(string(k)); err != nil { + if err := elem.Address.DecodeString(k); err != nil { if req.IgnoreErrors { continue } - return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, string(k), err)) + return common.IterateRes{}, logicerr.Wrap(fmt.Errorf("(%T) decoding address string %q: %v", s, k, err)) } var err error if elem.ObjectData, err = s.compression.Decompress(elem.ObjectData); err != nil { diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index 623f5c941..682f23dff 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -724,7 +724,7 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) shards := make([]pooledShard, 0, len(e.shards)) for id := range e.shards { shards = append(shards, pooledShard{ - hashedShard: hashedShard(e.shards[id]), + hashedShard: e.shards[id], pool: e.shardPools[id], }) } diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 6d4844b75..8e191f72c 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -272,7 +272,7 @@ func (e *StorageEngine) sortShards(objAddr interface{ EncodeToString() string }) h := hrw.StringHash(objAddr.EncodeToString()) shards := make([]hashedShard, 0, len(e.shards)) for _, sh := range e.shards { - shards = append(shards, hashedShard(sh)) + shards = append(shards, sh) } hrw.SortHasherSliceByValue(shards, h) return shards @@ -285,7 +285,7 @@ func (e *StorageEngine) unsortedShards() []hashedShard { shards := make([]hashedShard, 0, len(e.shards)) for _, sh := range e.shards { - shards = append(shards, hashedShard(sh)) + shards = append(shards, sh) } return shards diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 1c1933af5..3136ddfcc 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -38,7 +38,7 @@ func (s *Shard) handleMetabaseFailure(ctx context.Context, stage string, err err err = s.SetMode(ctx, mode.DegradedReadOnly) if err != nil { - return fmt.Errorf("switch to mode %s", mode.Mode(mode.DegradedReadOnly)) + return fmt.Errorf("switch to mode %s", mode.DegradedReadOnly) } return nil } diff --git a/pkg/morph/event/netmap/epoch.go b/pkg/morph/event/netmap/epoch.go index 4dcc0d035..39c8f6237 100644 --- a/pkg/morph/event/netmap/epoch.go +++ b/pkg/morph/event/netmap/epoch.go @@ -41,7 +41,7 @@ func ParseNewEpoch(e *state.ContainedNotificationEvent) (event.Event, error) { } return NewEpoch{ - Num: uint64(nee.Epoch.Uint64()), + Num: nee.Epoch.Uint64(), Hash: e.Container, }, nil } diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go index 36b514fbc..1cd10cd7f 100644 --- a/pkg/services/object/put/v2/streamer.go +++ b/pkg/services/object/put/v2/streamer.go @@ -59,7 +59,7 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) maxSz := s.stream.MaxSizeSrc.MaxObjectSize() s.sizes = &sizes{ - payloadSz: uint64(v.GetHeader().GetPayloadLength()), + payloadSz: v.GetHeader().GetPayloadLength(), } // check payload size limit overflow From fb928616ccade3c704dd98dccd7f2e521d27e5c5 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 13 Jan 2025 17:43:39 +0300 Subject: [PATCH 319/591] [#1598] golangci: Enable unparam linter To drop unnecessary parameters and return values. Signed-off-by: Dmitrii Stepanov --- .golangci.yml | 1 + .../modules/morph/frostfsid/frostfsid.go | 16 +++++----- .../modules/container/policy_playground.go | 7 ++--- cmd/frostfs-lens/internal/tui/buckets.go | 18 ++++------- cmd/frostfs-lens/internal/tui/db.go | 31 +++++++------------ cmd/frostfs-lens/internal/tui/records.go | 8 ++--- cmd/frostfs-node/config.go | 13 +++----- internal/logs/logs.go | 2 -- pkg/innerring/initialization.go | 6 ++-- pkg/innerring/locode.go | 4 +-- .../blobstor/blobovniczatree/delete.go | 17 +++++----- .../blobstor/blobovniczatree/iterate.go | 6 ++++ pkg/local_object_storage/engine/container.go | 14 ++++----- pkg/local_object_storage/engine/delete.go | 6 ++-- pkg/local_object_storage/engine/inhume.go | 10 +++--- pkg/local_object_storage/engine/select.go | 18 +++++------ .../metabase/containers.go | 13 ++++---- pkg/local_object_storage/metabase/counter.go | 4 +-- pkg/local_object_storage/metabase/delete.go | 6 ++-- pkg/local_object_storage/metabase/inhume.go | 4 +-- .../metabase/storage_id.go | 18 +++++------ pkg/local_object_storage/pilorama/boltdb.go | 22 +++++-------- pkg/local_object_storage/writecache/cache.go | 3 +- pkg/local_object_storage/writecache/state.go | 3 +- pkg/morph/event/listener.go | 19 +++--------- .../object_manager/placement/traverser.go | 9 ++---- 26 files changed, 123 insertions(+), 155 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index d0e45aa75..d83f36de8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -90,5 +90,6 @@ linters: - intrange - tenv - unconvert + - unparam disable-all: true fast: false diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go index db98bb8ad..b229d0436 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go @@ -253,7 +253,7 @@ func frostfsidListNamespaces(cmd *cobra.Command, _ []string) { reader := frostfsidrpclient.NewReader(inv, hash) sessionID, it, err := reader.ListNamespaces() commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) + items, err := readIterator(inv, &it, sessionID) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) namespaces, err := frostfsidclient.ParseNamespaces(items) @@ -305,7 +305,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) { sessionID, it, err := reader.ListNamespaceSubjects(ns) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, iteratorBatchSize, sessionID)) + subAddresses, err := frostfsidclient.UnwrapArrayOfUint160(readIterator(inv, &it, sessionID)) commonCmd.ExitOnErr(cmd, "can't unwrap: %w", err) sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) }) @@ -319,7 +319,7 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) { sessionID, it, err := reader.ListSubjects() commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) - items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) + items, err := readIterator(inv, &it, sessionID) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) subj, err := frostfsidclient.ParseSubject(items) @@ -365,7 +365,7 @@ func frostfsidListGroups(cmd *cobra.Command, _ []string) { sessionID, it, err := reader.ListGroups(ns) commonCmd.ExitOnErr(cmd, "can't get namespace: %w", err) - items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) + items, err := readIterator(inv, &it, sessionID) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) groups, err := frostfsidclient.ParseGroups(items) commonCmd.ExitOnErr(cmd, "can't parse groups: %w", err) @@ -415,7 +415,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { sessionID, it, err := reader.ListGroupSubjects(ns, big.NewInt(groupID)) commonCmd.ExitOnErr(cmd, "can't list groups: %w", err) - items, err := readIterator(inv, &it, iteratorBatchSize, sessionID) + items, err := readIterator(inv, &it, sessionID) commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) subjects, err := frostfsidclient.UnwrapArrayOfUint160(items, err) @@ -492,17 +492,17 @@ func (f *frostfsidClient) sendWaitRes() (*state.AppExecResult, error) { return f.roCli.Wait(f.wCtx.SentTxs[0].Hash, f.wCtx.SentTxs[0].Vub, nil) } -func readIterator(inv *invoker.Invoker, iter *result.Iterator, batchSize int, sessionID uuid.UUID) ([]stackitem.Item, error) { +func readIterator(inv *invoker.Invoker, iter *result.Iterator, sessionID uuid.UUID) ([]stackitem.Item, error) { var shouldStop bool res := make([]stackitem.Item, 0) for !shouldStop { - items, err := inv.TraverseIterator(sessionID, iter, batchSize) + items, err := inv.TraverseIterator(sessionID, iter, iteratorBatchSize) if err != nil { return nil, err } res = append(res, items...) - shouldStop = len(items) < batchSize + shouldStop = len(items) < iteratorBatchSize } return res, nil diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index 40bd4110b..dcd755510 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -23,11 +23,11 @@ type policyPlaygroundREPL struct { nodes map[string]netmap.NodeInfo } -func newPolicyPlaygroundREPL(cmd *cobra.Command) (*policyPlaygroundREPL, error) { +func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL { return &policyPlaygroundREPL{ cmd: cmd, nodes: map[string]netmap.NodeInfo{}, - }, nil + } } func (repl *policyPlaygroundREPL) handleLs(args []string) error { @@ -246,8 +246,7 @@ var policyPlaygroundCmd = &cobra.Command{ Long: `A REPL for testing placement policies. If a wallet and endpoint is provided, the initial netmap data will be loaded from the snapshot of the node. Otherwise, an empty playground is created.`, Run: func(cmd *cobra.Command, _ []string) { - repl, err := newPolicyPlaygroundREPL(cmd) - commonCmd.ExitOnErr(cmd, "could not create policy playground: %w", err) + repl := newPolicyPlaygroundREPL(cmd) commonCmd.ExitOnErr(cmd, "policy playground failed: %w", repl.run()) }, } diff --git a/cmd/frostfs-lens/internal/tui/buckets.go b/cmd/frostfs-lens/internal/tui/buckets.go index 3f5088e7a..2d3b20792 100644 --- a/cmd/frostfs-lens/internal/tui/buckets.go +++ b/cmd/frostfs-lens/internal/tui/buckets.go @@ -124,10 +124,7 @@ func (v *BucketsView) loadNodeChildren( path := parentBucket.Path parser := parentBucket.NextParser - buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize) - if err != nil { - return err - } + buffer := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize) for item := range buffer { if item.err != nil { @@ -135,6 +132,7 @@ func (v *BucketsView) loadNodeChildren( } bucket := item.val + var err error bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil) if err != nil { return err @@ -180,10 +178,7 @@ func (v *BucketsView) bucketSatisfiesFilter( defer cancel() // Check the current bucket's nested buckets if exist - bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) - if err != nil { - return false, err - } + bucketsBuffer := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) for item := range bucketsBuffer { if item.err != nil { @@ -191,6 +186,7 @@ func (v *BucketsView) bucketSatisfiesFilter( } b := item.val + var err error b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil) if err != nil { return false, err @@ -206,10 +202,7 @@ func (v *BucketsView) bucketSatisfiesFilter( } // Check the current bucket's nested records if exist - recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) - if err != nil { - return false, err - } + recordsBuffer := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize) for item := range recordsBuffer { if item.err != nil { @@ -217,6 +210,7 @@ func (v *BucketsView) bucketSatisfiesFilter( } r := item.val + var err error r.Entry, _, err = bucket.NextParser(r.Key, r.Value) if err != nil { return false, err diff --git a/cmd/frostfs-lens/internal/tui/db.go b/cmd/frostfs-lens/internal/tui/db.go index d0cf611d4..94fa87f98 100644 --- a/cmd/frostfs-lens/internal/tui/db.go +++ b/cmd/frostfs-lens/internal/tui/db.go @@ -35,7 +35,7 @@ func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) { func load[T any]( ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, filter func(key, value []byte) bool, transform func(key, value []byte) T, -) (<-chan Item[T], error) { +) <-chan Item[T] { buffer := make(chan Item[T], bufferSize) go func() { @@ -77,13 +77,13 @@ func load[T any]( } }() - return buffer, nil + return buffer } func LoadBuckets( ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, -) (<-chan Item[*Bucket], error) { - buffer, err := load( +) <-chan Item[*Bucket] { + buffer := load( ctx, db, path, bufferSize, func(_, value []byte) bool { return value == nil @@ -98,17 +98,14 @@ func LoadBuckets( } }, ) - if err != nil { - return nil, fmt.Errorf("can't start iterating bucket: %w", err) - } - return buffer, nil + return buffer } func LoadRecords( ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int, -) (<-chan Item[*Record], error) { - buffer, err := load( +) <-chan Item[*Record] { + buffer := load( ctx, db, path, bufferSize, func(_, value []byte) bool { return value != nil @@ -124,11 +121,8 @@ func LoadRecords( } }, ) - if err != nil { - return nil, fmt.Errorf("can't start iterating bucket: %w", err) - } - return buffer, nil + return buffer } // HasBuckets checks if a bucket has nested buckets. It relies on assumption @@ -137,24 +131,21 @@ func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error) ctx, cancel := context.WithCancel(ctx) defer cancel() - buffer, err := load( + buffer := load( ctx, db, path, 1, nil, func(_, value []byte) []byte { return value }, ) - if err != nil { - return false, err - } x, ok := <-buffer if !ok { return false, nil } if x.err != nil { - return false, err + return false, x.err } if x.val != nil { - return false, err + return false, nil } return true, nil } diff --git a/cmd/frostfs-lens/internal/tui/records.go b/cmd/frostfs-lens/internal/tui/records.go index 5f53ed287..5f61df884 100644 --- a/cmd/frostfs-lens/internal/tui/records.go +++ b/cmd/frostfs-lens/internal/tui/records.go @@ -62,10 +62,7 @@ func (v *RecordsView) Mount(ctx context.Context) error { ctx, v.onUnmount = context.WithCancel(ctx) - tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize) - if err != nil { - return err - } + tempBuffer := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize) v.buffer = make(chan *Record, v.ui.loadBufferSize) go func() { @@ -73,11 +70,12 @@ func (v *RecordsView) Mount(ctx context.Context) error { for item := range tempBuffer { if item.err != nil { - v.ui.stopOnError(err) + v.ui.stopOnError(item.err) break } record := item.val + var err error record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value) if err != nil { v.ui.stopOnError(err) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 3392589bf..9931d0dc8 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -698,8 +698,7 @@ func initCfg(appCfg *config.Config) *cfg { netState.metrics = c.metricsCollector - logPrm, err := c.loggerPrm() - fatalOnErr(err) + logPrm := c.loggerPrm() logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() log, err := logger.NewLogger(logPrm) fatalOnErr(err) @@ -1059,7 +1058,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID return sh } -func (c *cfg) loggerPrm() (*logger.Prm, error) { +func (c *cfg) loggerPrm() *logger.Prm { // check if it has been inited before if c.dynamicConfiguration.logger == nil { c.dynamicConfiguration.logger = new(logger.Prm) @@ -1078,7 +1077,7 @@ func (c *cfg) loggerPrm() (*logger.Prm, error) { } c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp - return c.dynamicConfiguration.logger, nil + return c.dynamicConfiguration.logger } func (c *cfg) LocalAddress() network.AddressGroup { @@ -1334,11 +1333,7 @@ func (c *cfg) reloadConfig(ctx context.Context) { // Logger - logPrm, err := c.loggerPrm() - if err != nil { - c.log.Error(ctx, logs.FrostFSNodeLoggerConfigurationPreparation, zap.Error(err)) - return - } + logPrm := c.loggerPrm() components := c.getComponents(ctx, logPrm) diff --git a/internal/logs/logs.go b/internal/logs/logs.go index b24f3593d..0610dc175 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -146,7 +146,6 @@ const ( ClientCantGetBlockchainHeight = "can't get blockchain height" ClientCantGetBlockchainHeight243 = "can't get blockchain height" EventCouldNotSubmitHandlerToWorkerPool = "could not Submit handler to worker pool" - EventCouldNotStartListenToEvents = "could not start listen to events" EventStopEventListenerByError = "stop event listener by error" EventStopEventListenerByContext = "stop event listener by context" EventStopEventListenerByNotificationChannel = "stop event listener by notification channel" @@ -384,7 +383,6 @@ const ( FrostFSNodeShutdownSkip = "node is already shutting down, skipped shutdown" FrostFSNodeShutdownWhenNotReady = "node is going to shut down when subsystems are still initializing" FrostFSNodeConfigurationReading = "configuration reading" - FrostFSNodeLoggerConfigurationPreparation = "logger configuration preparation" FrostFSNodeTracingConfigationUpdated = "tracing configation updated" FrostFSNodeStorageEngineConfigurationUpdate = "storage engine configuration update" FrostFSNodePoolConfigurationUpdate = "adjust pool configuration" diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index ecaf8ae86..f7b71dbe6 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -38,10 +38,7 @@ import ( func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, alphaSync event.Handler, ) error { - locodeValidator, err := s.newLocodeValidator(cfg) - if err != nil { - return err - } + locodeValidator := s.newLocodeValidator(cfg) netSettings := (*networkSettings)(s.netmapClient) @@ -51,6 +48,7 @@ func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, poolSize := cfg.GetInt("workers.netmap") s.log.Debug(ctx, logs.NetmapNetmapWorkerPool, zap.Int("size", poolSize)) + var err error s.netmapProcessor, err = netmap.New(&netmap.Params{ Log: s.log, Metrics: s.irMetrics, diff --git a/pkg/innerring/locode.go b/pkg/innerring/locode.go index a0c3ea751..ae4c85168 100644 --- a/pkg/innerring/locode.go +++ b/pkg/innerring/locode.go @@ -9,7 +9,7 @@ import ( "github.com/spf13/viper" ) -func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, error) { +func (s *Server) newLocodeValidator(cfg *viper.Viper) netmap.NodeValidator { locodeDB := locodebolt.New(locodebolt.Prm{ Path: cfg.GetString("locode.db.path"), }, @@ -21,7 +21,7 @@ func (s *Server) newLocodeValidator(cfg *viper.Viper) (netmap.NodeValidator, err return irlocode.New(irlocode.Prm{ DB: (*locodeBoltDBWrapper)(locodeDB), - }), nil + }) } type locodeBoltEntryWrapper struct { diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go index 47e12bafb..8d17fc4b0 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go @@ -19,7 +19,10 @@ import ( "go.uber.org/zap" ) -var errObjectIsDeleteProtected = errors.New("object is delete protected") +var ( + errObjectIsDeleteProtected = errors.New("object is delete protected") + deleteRes = common.DeleteRes{} +) // Delete deletes object from blobovnicza tree. // @@ -43,17 +46,17 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co defer span.End() if b.readOnly { - return common.DeleteRes{}, common.ErrReadOnly + return deleteRes, common.ErrReadOnly } if b.rebuildGuard.TryRLock() { defer b.rebuildGuard.RUnlock() } else { - return common.DeleteRes{}, errRebuildInProgress + return deleteRes, errRebuildInProgress } if b.deleteProtectedObjects.Contains(prm.Address) { - return common.DeleteRes{}, errObjectIsDeleteProtected + return deleteRes, errObjectIsDeleteProtected } var bPrm blobovnicza.DeletePrm @@ -98,7 +101,7 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co if err == nil && !objectFound { // not found in any blobovnicza - return common.DeleteRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) + return deleteRes, logicerr.Wrap(new(apistatus.ObjectNotFound)) } success = err == nil @@ -112,7 +115,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz shBlz := b.getBlobovnicza(ctx, blzPath) blz, err := shBlz.Open(ctx) if err != nil { - return common.DeleteRes{}, err + return deleteRes, err } defer shBlz.Close(ctx) @@ -122,5 +125,5 @@ func (b *Blobovniczas) deleteObjectFromLevel(ctx context.Context, prm blobovnicz // removes object from blobovnicza and returns common.DeleteRes. func (b *Blobovniczas) deleteObject(ctx context.Context, blz *blobovnicza.Blobovnicza, prm blobovnicza.DeletePrm) (common.DeleteRes, error) { _, err := blz.Delete(ctx, prm) - return common.DeleteRes{}, err + return deleteRes, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go index 5c2d58ca1..ceb8fb7e3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go @@ -249,6 +249,12 @@ func (b *Blobovniczas) iterateSortedDBPaths(ctx context.Context, addr oid.Addres } func (b *Blobovniczas) iterateSordedDBPathsInternal(ctx context.Context, path string, addr oid.Address, f func(string) (bool, error)) (bool, error) { + select { + case <-ctx.Done(): + return false, ctx.Err() + default: + } + sysPath := filepath.Join(b.rootPath, path) entries, err := os.ReadDir(sysPath) if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index 24059a3f9..b2d7a1037 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -48,8 +48,8 @@ func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) defer elapsed("ContainerSize", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res, err = e.containerSize(ctx, prm) - return err + res = e.containerSize(ctx, prm) + return nil }) return @@ -69,7 +69,7 @@ func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, er return res.Size(), nil } -func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes, err error) { +func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes) { e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { var csPrm shard.ContainerSizePrm csPrm.SetContainerID(prm.cnr) @@ -96,8 +96,8 @@ func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm) defer elapsed("ListContainers", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res, err = e.listContainers(ctx) - return err + res = e.listContainers(ctx) + return nil }) return @@ -115,7 +115,7 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) { return res.Containers(), nil } -func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) { +func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes { uniqueIDs := make(map[string]cid.ID) e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { @@ -142,5 +142,5 @@ func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, return ListContainersRes{ containers: result, - }, nil + } } diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 65ccbdb9e..c735cc41d 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -27,6 +27,8 @@ type DeletePrm struct { // DeleteRes groups the resulting values of Delete operation. type DeleteRes struct{} +var deleteRes = DeleteRes{} + // WithAddress is a Delete option to set the addresses of the objects to delete. // // Option is required. @@ -126,14 +128,14 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e }) if locked.is { - return DeleteRes{}, new(apistatus.ObjectLocked) + return deleteRes, new(apistatus.ObjectLocked) } if splitInfo != nil { e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) } - return DeleteRes{}, nil + return deleteRes, nil } func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) { diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index bae784064..a0c38cd5d 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -30,6 +30,8 @@ type InhumePrm struct { // InhumeRes encapsulates results of inhume operation. type InhumeRes struct{} +var inhumeRes = InhumeRes{} + // WithTarget sets a list of objects that should be inhumed and tombstone address // as the reason for inhume operation. // @@ -83,7 +85,7 @@ func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRe func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { addrsPerShard, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) if err != nil { - return InhumeRes{}, err + return inhumeRes, err } var shPrm shard.InhumePrm @@ -107,7 +109,7 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e zap.String("shard_id", shardID), zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) - return InhumeRes{}, errInhumeFailure + return inhumeRes, errInhumeFailure } if _, err := sh.Inhume(ctx, shPrm); err != nil { @@ -119,11 +121,11 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e default: e.reportShardError(ctx, sh, "couldn't inhume object in shard", err) } - return InhumeRes{}, err + return inhumeRes, err } } - return InhumeRes{}, nil + return inhumeRes, nil } // groupObjectsByShard groups objects based on the shard(s) they are stored on. diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index 02149b4c8..fc8b4a9a7 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -54,19 +54,17 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe defer elapsed("Select", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res, err = e._select(ctx, prm) - return err + res = e._select(ctx, prm) + return nil }) return } -func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) { +func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) SelectRes { addrList := make([]oid.Address, 0) uniqueMap := make(map[string]struct{}) - var outError error - var shPrm shard.SelectPrm shPrm.SetContainerID(prm.cnr, prm.indexedContainer) shPrm.SetFilters(prm.filters) @@ -90,7 +88,7 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, return SelectRes{ addrList: addrList, - }, outError + } } // List returns `limit` available physically storage object addresses in engine. @@ -100,14 +98,14 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, err error) { defer elapsed("List", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res, err = e.list(ctx, limit) - return err + res = e.list(ctx, limit) + return nil }) return } -func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) { +func (e *StorageEngine) list(ctx context.Context, limit uint64) SelectRes { addrList := make([]oid.Address, 0, limit) uniqueMap := make(map[string]struct{}) ln := uint64(0) @@ -136,7 +134,7 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro return SelectRes{ addrList: addrList, - }, nil + } } // Select selects objects from local storage using provided filters. diff --git a/pkg/local_object_storage/metabase/containers.go b/pkg/local_object_storage/metabase/containers.go index 472b2affc..da27e6085 100644 --- a/pkg/local_object_storage/metabase/containers.go +++ b/pkg/local_object_storage/metabase/containers.go @@ -56,7 +56,7 @@ func (db *DB) containers(tx *bbolt.Tx) ([]cid.ID, error) { return result, err } -func (db *DB) ContainerSize(id cid.ID) (size uint64, err error) { +func (db *DB) ContainerSize(id cid.ID) (uint64, error) { db.modeMtx.RLock() defer db.modeMtx.RUnlock() @@ -64,21 +64,22 @@ func (db *DB) ContainerSize(id cid.ID) (size uint64, err error) { return 0, ErrDegradedMode } - err = db.boltDB.View(func(tx *bbolt.Tx) error { - size, err = db.containerSize(tx, id) + var size uint64 + err := db.boltDB.View(func(tx *bbolt.Tx) error { + size = db.containerSize(tx, id) - return err + return nil }) return size, metaerr.Wrap(err) } -func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) (uint64, error) { +func (db *DB) containerSize(tx *bbolt.Tx, id cid.ID) uint64 { containerVolume := tx.Bucket(containerVolumeBucketName) key := make([]byte, cidSize) id.Encode(key) - return parseContainerSize(containerVolume.Get(key)), nil + return parseContainerSize(containerVolume.Get(key)) } func parseContainerID(dst *cid.ID, name []byte, ignore map[string]struct{}) bool { diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go index f29dafe77..732f99519 100644 --- a/pkg/local_object_storage/metabase/counter.go +++ b/pkg/local_object_storage/metabase/counter.go @@ -251,13 +251,13 @@ func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error { return db.incContainerObjectCounter(tx, cnrID, isUserObject) } -func (db *DB) updateShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64, inc bool) error { +func (db *DB) decShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint64) error { b := tx.Bucket(shardInfoBucket) if b == nil { return nil } - return db.updateShardObjectCounterBucket(b, typ, delta, inc) + return db.updateShardObjectCounterBucket(b, typ, delta, false) } func (*DB) updateShardObjectCounterBucket(b *bbolt.Bucket, typ objectType, delta uint64, inc bool) error { diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index 00ee2baa3..d338e228f 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -161,21 +161,21 @@ func (db *DB) deleteGroup(tx *bbolt.Tx, addrs []oid.Address) (DeleteRes, error) func (db *DB) updateCountersDelete(tx *bbolt.Tx, res DeleteRes) error { if res.phyCount > 0 { - err := db.updateShardObjectCounter(tx, phy, res.phyCount, false) + err := db.decShardObjectCounter(tx, phy, res.phyCount) if err != nil { return fmt.Errorf("decrease phy object counter: %w", err) } } if res.logicCount > 0 { - err := db.updateShardObjectCounter(tx, logical, res.logicCount, false) + err := db.decShardObjectCounter(tx, logical, res.logicCount) if err != nil { return fmt.Errorf("decrease logical object counter: %w", err) } } if res.userCount > 0 { - err := db.updateShardObjectCounter(tx, user, res.userCount, false) + err := db.decShardObjectCounter(tx, user, res.userCount) if err != nil { return fmt.Errorf("decrease user object counter: %w", err) } diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index 99fdec310..76018fb61 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -342,10 +342,10 @@ func (db *DB) inhumeECInfo(tx *bbolt.Tx, epoch uint64, tomb *oid.Address, res *I } func (db *DB) applyInhumeResToCounters(tx *bbolt.Tx, res *InhumeRes) error { - if err := db.updateShardObjectCounter(tx, logical, res.LogicInhumed(), false); err != nil { + if err := db.decShardObjectCounter(tx, logical, res.LogicInhumed()); err != nil { return err } - if err := db.updateShardObjectCounter(tx, user, res.UserInhumed(), false); err != nil { + if err := db.decShardObjectCounter(tx, user, res.UserInhumed()); err != nil { return err } diff --git a/pkg/local_object_storage/metabase/storage_id.go b/pkg/local_object_storage/metabase/storage_id.go index 6d620b41a..8f2376503 100644 --- a/pkg/local_object_storage/metabase/storage_id.go +++ b/pkg/local_object_storage/metabase/storage_id.go @@ -35,7 +35,7 @@ func (r StorageIDRes) StorageID() []byte { // StorageID returns storage descriptor for objects from the blobstor. // It is put together with the object can makes get/delete operation faster. -func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) { +func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (StorageIDRes, error) { var ( startedAt = time.Now() success = false @@ -53,32 +53,32 @@ func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes db.modeMtx.RLock() defer db.modeMtx.RUnlock() + var res StorageIDRes if db.mode.NoMetabase() { return res, ErrDegradedMode } - err = db.boltDB.View(func(tx *bbolt.Tx) error { - res.id, err = db.storageID(tx, prm.addr) - - return err + err := db.boltDB.View(func(tx *bbolt.Tx) error { + res.id = db.storageID(tx, prm.addr) + return nil }) success = err == nil return res, metaerr.Wrap(err) } -func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) ([]byte, error) { +func (db *DB) storageID(tx *bbolt.Tx, addr oid.Address) []byte { key := make([]byte, bucketKeySize) smallBucket := tx.Bucket(smallBucketName(addr.Container(), key)) if smallBucket == nil { - return nil, nil + return nil } storageID := smallBucket.Get(objectKey(addr.Object(), key)) if storageID == nil { - return nil, nil + return nil } - return bytes.Clone(storageID), nil + return bytes.Clone(storageID) } // UpdateStorageIDPrm groups the parameters of UpdateStorageID operation. diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 86b19e3af..1d55d1c13 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -419,10 +419,7 @@ func (t *boltForest) addByPathInternal(d CIDDescriptor, attr string, treeID stri return err } - i, node, err := t.getPathPrefix(bTree, attr, path) - if err != nil { - return err - } + i, node := t.getPathPrefix(bTree, attr, path) ts := t.getLatestTimestamp(bLog, d.Position, d.Size) lm = make([]Move, len(path)-i+1) @@ -980,10 +977,7 @@ func (t *boltForest) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID st b := treeRoot.Bucket(dataBucket) - i, curNodes, err := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1]) - if err != nil { - return err - } + i, curNodes := t.getPathPrefixMultiTraversal(b, attr, path[:len(path)-1]) if i < len(path)-1 { return nil } @@ -1526,7 +1520,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (* return &res, nil } -func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node, error) { +func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr string, path []string) (int, []Node) { c := bTree.Cursor() var curNodes []Node @@ -1549,14 +1543,14 @@ func (t *boltForest) getPathPrefixMultiTraversal(bTree *bbolt.Bucket, attr strin } if len(nextNodes) == 0 { - return i, curNodes, nil + return i, curNodes } } - return len(path), nextNodes, nil + return len(path), nextNodes } -func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node, error) { +func (t *boltForest) getPathPrefix(bTree *bbolt.Bucket, attr string, path []string) (int, Node) { c := bTree.Cursor() var curNode Node @@ -1576,10 +1570,10 @@ loop: childKey, value = c.Next() } - return i, curNode, nil + return i, curNode } - return len(path), curNode, nil + return len(path), curNode } func (t *boltForest) moveFromBytes(m *Move, data []byte) error { diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go index e829d013c..b99d73d3a 100644 --- a/pkg/local_object_storage/writecache/cache.go +++ b/pkg/local_object_storage/writecache/cache.go @@ -94,7 +94,8 @@ func (c *cache) Open(_ context.Context, mod mode.Mode) error { if err != nil { return metaerr.Wrap(err) } - return metaerr.Wrap(c.initCounters()) + c.initCounters() + return nil } // Init runs necessary services. diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go index 835686fbb..44caa2603 100644 --- a/pkg/local_object_storage/writecache/state.go +++ b/pkg/local_object_storage/writecache/state.go @@ -19,7 +19,6 @@ func (c *cache) hasEnoughSpace(objectSize uint64) bool { return c.maxCacheSize >= size+objectSize } -func (c *cache) initCounters() error { +func (c *cache) initCounters() { c.estimateCacheSize() - return nil } diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index 83f8bee07..e5cdfeef7 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -134,11 +134,8 @@ func (l *listener) Listen(ctx context.Context) { l.startOnce.Do(func() { l.wg.Add(1) defer l.wg.Done() - if err := l.listen(ctx, nil); err != nil { - l.log.Error(ctx, logs.EventCouldNotStartListenToEvents, - zap.Error(err), - ) - } + + l.listen(ctx, nil) }) } @@ -152,23 +149,17 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) { l.startOnce.Do(func() { l.wg.Add(1) defer l.wg.Done() - if err := l.listen(ctx, intError); err != nil { - l.log.Error(ctx, logs.EventCouldNotStartListenToEvents, - zap.Error(err), - ) - l.sendError(ctx, intError, err) - } + + l.listen(ctx, intError) }) } -func (l *listener) listen(ctx context.Context, intError chan<- error) error { +func (l *listener) listen(ctx context.Context, intError chan<- error) { subErrCh := make(chan error) go l.subscribe(subErrCh) l.listenLoop(ctx, intError, subErrCh) - - return nil } func (l *listener) subscribe(errCh chan error) { diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go index 6a949e938..8daf38217 100644 --- a/pkg/services/object_manager/placement/traverser.go +++ b/pkg/services/object_manager/placement/traverser.go @@ -120,10 +120,7 @@ func NewTraverser(opts ...Option) (*Traverser, error) { } rem = []int{-1, -1} - sortedVector, err := sortVector(cfg, unsortedVector) - if err != nil { - return nil, err - } + sortedVector := sortVector(cfg, unsortedVector) ns = [][]netmap.NodeInfo{sortedVector, regularVector} } else if cfg.flatSuccess != nil { ns = flatNodes(ns) @@ -188,7 +185,7 @@ type nodeMetrics struct { metrics []int } -func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) ([]netmap.NodeInfo, error) { +func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) []netmap.NodeInfo { nm := make([]nodeMetrics, len(unsortedVector)) node := cfg.nodeState.LocalNodeInfo() @@ -209,7 +206,7 @@ func sortVector(cfg *cfg, unsortedVector []netmap.NodeInfo) ([]netmap.NodeInfo, for i := range unsortedVector { sortedVector[i] = unsortedVector[nm[i].index] } - return sortedVector, nil + return sortedVector } // Node is a descriptor of storage node with information required for intra-container communication. From eff95bd632dc11435b8356555bfbdebe35b6d1d6 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 14 Jan 2025 11:15:21 +0300 Subject: [PATCH 320/591] [#1598] engine: Drop unnecessary result structs Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/object.go | 9 +++---- pkg/local_object_storage/engine/delete.go | 20 +++++----------- .../engine/delete_test.go | 8 +++---- pkg/local_object_storage/engine/inhume.go | 24 +++++++------------ .../engine/inhume_test.go | 12 +++++----- pkg/local_object_storage/engine/lock_test.go | 19 +++++++-------- pkg/services/control/server/gc.go | 3 +-- 7 files changed, 36 insertions(+), 59 deletions(-) diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index f82a8e533..939241168 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -215,8 +215,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl prm.MarkAsGarbage(addr) prm.WithForceRemoval() - _, err := ls.Inhume(ctx, prm) - return err + return ls.Inhume(ctx, prm) } remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor) @@ -266,8 +265,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl var inhumePrm engine.InhumePrm inhumePrm.MarkAsGarbage(addr) - _, err := ls.Inhume(ctx, inhumePrm) - if err != nil { + if err := ls.Inhume(ctx, inhumePrm); err != nil { c.log.Warn(ctx, logs.FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage, zap.Error(err), ) @@ -476,8 +474,7 @@ func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Ad prm.WithTarget(tombstone, addrs...) - _, err := e.engine.Inhume(ctx, prm) - return err + return e.engine.Inhume(ctx, prm) } func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address, toLock []oid.ID) error { diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index c735cc41d..20c915da6 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -24,11 +24,6 @@ type DeletePrm struct { forceRemoval bool } -// DeleteRes groups the resulting values of Delete operation. -type DeleteRes struct{} - -var deleteRes = DeleteRes{} - // WithAddress is a Delete option to set the addresses of the objects to delete. // // Option is required. @@ -53,7 +48,7 @@ func (p *DeletePrm) WithForceRemoval() { // NOTE: Marks any object to be deleted (despite any prohibitions // on operations with that object) if WithForceRemoval option has // been provided. -func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRes, err error) { +func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Delete", trace.WithAttributes( attribute.String("address", prm.addr.EncodeToString()), @@ -62,15 +57,12 @@ func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRe defer span.End() defer elapsed("Delete", e.metrics.AddMethodDuration)() - err = e.execIfNotBlocked(func() error { - res, err = e.delete(ctx, prm) - return err + return e.execIfNotBlocked(func() error { + return e.delete(ctx, prm) }) - - return } -func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) { +func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error { var locked struct { is bool } @@ -128,14 +120,14 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, e }) if locked.is { - return deleteRes, new(apistatus.ObjectLocked) + return new(apistatus.ObjectLocked) } if splitInfo != nil { e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) } - return deleteRes, nil + return nil } func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) { diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go index 0dd2e94bb..a56598c09 100644 --- a/pkg/local_object_storage/engine/delete_test.go +++ b/pkg/local_object_storage/engine/delete_test.go @@ -70,8 +70,7 @@ func TestDeleteBigObject(t *testing.T) { deletePrm.WithForceRemoval() deletePrm.WithAddress(addrParent) - _, err := e.Delete(context.Background(), deletePrm) - require.NoError(t, err) + require.NoError(t, e.Delete(context.Background(), deletePrm)) checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true) checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true) @@ -141,8 +140,7 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) { deletePrm.WithForceRemoval() deletePrm.WithAddress(addrParent) - _, err := e.Delete(context.Background(), deletePrm) - require.NoError(t, err) + require.NoError(t, e.Delete(context.Background(), deletePrm)) checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true) checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true) @@ -153,7 +151,7 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) { // delete physical var delPrm shard.DeletePrm delPrm.SetAddresses(addrParent) - _, err = s1.Delete(context.Background(), delPrm) + _, err := s1.Delete(context.Background(), delPrm) require.NoError(t, err) delPrm.SetAddresses(addrLink) diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index a0c38cd5d..75bd15c8b 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -27,11 +27,6 @@ type InhumePrm struct { forceRemoval bool } -// InhumeRes encapsulates results of inhume operation. -type InhumeRes struct{} - -var inhumeRes = InhumeRes{} - // WithTarget sets a list of objects that should be inhumed and tombstone address // as the reason for inhume operation. // @@ -69,23 +64,20 @@ var errInhumeFailure = errors.New("inhume operation failed") // with that object) if WithForceRemoval option has been provided. // // Returns an error if executions are blocked (see BlockExecution). -func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) { +func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume") defer span.End() defer elapsed("Inhume", e.metrics.AddMethodDuration)() - err = e.execIfNotBlocked(func() error { - res, err = e.inhume(ctx, prm) - return err + return e.execIfNotBlocked(func() error { + return e.inhume(ctx, prm) }) - - return } -func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { +func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { addrsPerShard, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) if err != nil { - return inhumeRes, err + return err } var shPrm shard.InhumePrm @@ -109,7 +101,7 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e zap.String("shard_id", shardID), zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) - return inhumeRes, errInhumeFailure + return errInhumeFailure } if _, err := sh.Inhume(ctx, shPrm); err != nil { @@ -121,11 +113,11 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, e default: e.reportShardError(ctx, sh, "couldn't inhume object in shard", err) } - return inhumeRes, err + return err } } - return inhumeRes, nil + return nil } // groupObjectsByShard groups objects based on the shard(s) they are stored on. diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 2d083a58c..8c5d28b15 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -55,7 +55,7 @@ func TestStorageEngine_Inhume(t *testing.T) { var inhumePrm InhumePrm inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent)) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) addrs, err := Select(context.Background(), e, cnr, false, fs) @@ -85,7 +85,7 @@ func TestStorageEngine_Inhume(t *testing.T) { var inhumePrm InhumePrm inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent)) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) addrs, err := Select(context.Background(), e, cnr, false, fs) @@ -128,7 +128,7 @@ func TestStorageEngine_ECInhume(t *testing.T) { var inhumePrm InhumePrm inhumePrm.WithTarget(tombstoneObjectAddress, parentObjectAddress) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) var alreadyRemoved *apistatus.ObjectAlreadyRemoved @@ -173,7 +173,7 @@ func TestInhumeExpiredRegularObject(t *testing.T) { var prm InhumePrm prm.WithTarget(ts, object.AddressOf(obj)) - _, err := engine.Inhume(context.Background(), prm) + err := engine.Inhume(context.Background(), prm) require.NoError(t, err) }) @@ -182,7 +182,7 @@ func TestInhumeExpiredRegularObject(t *testing.T) { var prm InhumePrm prm.MarkAsGarbage(object.AddressOf(obj)) - _, err := engine.Inhume(context.Background(), prm) + err := engine.Inhume(context.Background(), prm) require.NoError(t, err) }) } @@ -237,7 +237,7 @@ func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { prm.WithTarget(ts, addrs...) b.StartTimer() - _, err := engine.Inhume(context.Background(), prm) + err := engine.Inhume(context.Background(), prm) require.NoError(b, err) b.StopTimer() } diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go index 7bb9e3934..b8c9d6b1d 100644 --- a/pkg/local_object_storage/engine/lock_test.go +++ b/pkg/local_object_storage/engine/lock_test.go @@ -114,7 +114,7 @@ func TestLockUserScenario(t *testing.T) { inhumePrm.WithTarget(tombAddr, objAddr) var objLockedErr *apistatus.ObjectLocked - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) // 4. @@ -127,7 +127,7 @@ func TestLockUserScenario(t *testing.T) { inhumePrm.WithTarget(tombForLockAddr, lockerAddr) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorIs(t, err, meta.ErrLockObjectRemoval) // 5. @@ -136,7 +136,7 @@ func TestLockUserScenario(t *testing.T) { inhumePrm.WithTarget(tombAddr, objAddr) require.Eventually(t, func() bool { - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) return err == nil }, 30*time.Second, time.Second) } @@ -200,7 +200,7 @@ func TestLockExpiration(t *testing.T) { inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj)) var objLockedErr *apistatus.ObjectLocked - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) // 3. @@ -212,7 +212,7 @@ func TestLockExpiration(t *testing.T) { inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj)) require.Eventually(t, func() bool { - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) return err == nil }, 30*time.Second, time.Second) } @@ -270,12 +270,12 @@ func TestLockForceRemoval(t *testing.T) { inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) var objLockedErr *apistatus.ObjectLocked - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj)) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.ErrorAs(t, err, &objLockedErr) // 4. @@ -283,13 +283,12 @@ func TestLockForceRemoval(t *testing.T) { deletePrm.WithAddress(objectcore.AddressOf(lock)) deletePrm.WithForceRemoval() - _, err = e.Delete(context.Background(), deletePrm) - require.NoError(t, err) + require.NoError(t, e.Delete(context.Background(), deletePrm)) // 5. inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) - _, err = e.Inhume(context.Background(), inhumePrm) + err = e.Inhume(context.Background(), inhumePrm) require.NoError(t, err) } diff --git a/pkg/services/control/server/gc.go b/pkg/services/control/server/gc.go index d9fefc38e..a8ef7809e 100644 --- a/pkg/services/control/server/gc.go +++ b/pkg/services/control/server/gc.go @@ -42,8 +42,7 @@ func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsReques prm.WithForceRemoval() prm.WithAddress(addrList[i]) - _, err := s.s.Delete(ctx, prm) - if err != nil && firstErr == nil { + if err := s.s.Delete(ctx, prm); err != nil && firstErr == nil { firstErr = err } } From 05fd999162f30042a0021f14a250aa3305b79f29 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 14 Jan 2025 10:54:53 +0300 Subject: [PATCH 321/591] [#1600] fstree: Handle incomplete writes Signed-off-by: Evgenii Stratonikov --- .../blobstor/fstree/fstree_write_linux.go | 25 +++++++++-- .../fstree/fstree_write_linux_test.go | 42 +++++++++++++++++++ 2 files changed, 63 insertions(+), 4 deletions(-) create mode 100644 pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go index c62654028..49cbda344 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go @@ -69,10 +69,13 @@ func (w *linuxWriter) writeFile(p string, data []byte) error { if err != nil { return err } + written := 0 tmpPath := "/proc/self/fd/" + strconv.FormatUint(uint64(fd), 10) n, err := unix.Write(fd, data) - if err == nil { - if n == len(data) { + for err == nil { + written += n + + if written == len(data) { err = unix.Linkat(unix.AT_FDCWD, tmpPath, unix.AT_FDCWD, p, unix.AT_SYMLINK_FOLLOW) if err == nil { w.fileCounter.Inc(uint64(len(data))) @@ -80,9 +83,23 @@ func (w *linuxWriter) writeFile(p string, data []byte) error { if errors.Is(err, unix.EEXIST) { err = nil } - } else { - err = errors.New("incomplete write") + break } + + // From man 2 write: + // https://www.man7.org/linux/man-pages/man2/write.2.html + // + // Note that a successful write() may transfer fewer than count + // bytes. Such partial writes can occur for various reasons; for + // example, because there was insufficient space on the disk device + // to write all of the requested bytes, or because a blocked write() + // to a socket, pipe, or similar was interrupted by a signal handler + // after it had transferred some, but before it had transferred all + // of the requested bytes. In the event of a partial write, the + // caller can make another write() call to transfer the remaining + // bytes. The subsequent call will either transfer further bytes or + // may result in an error (e.g., if the disk is now full). + n, err = unix.Write(fd, data[written:]) } errClose := unix.Close(fd) if err != nil { diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go new file mode 100644 index 000000000..7fae2e695 --- /dev/null +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux_test.go @@ -0,0 +1,42 @@ +//go:build linux && integration + +package fstree + +import ( + "context" + "errors" + "os" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + "github.com/stretchr/testify/require" + "golang.org/x/sys/unix" +) + +func TestENOSPC(t *testing.T) { + dir, err := os.MkdirTemp(t.TempDir(), "ramdisk") + require.NoError(t, err) + + f, err := os.CreateTemp(t.TempDir(), "ramdisk_*") + require.NoError(t, err) + + err = unix.Mount(f.Name(), dir, "tmpfs", 0, "size=1M") + if errors.Is(err, unix.EPERM) { + t.Skipf("skip size tests: no permission to mount: %v", err) + return + } + require.NoError(t, err) + defer func() { + require.NoError(t, unix.Unmount(dir, 0)) + }() + + fst := New(WithPath(dir), WithDepth(1)) + require.NoError(t, fst.Open(mode.ComponentReadWrite)) + require.NoError(t, fst.Init()) + + _, err = fst.Put(context.Background(), common.PutPrm{ + RawData: make([]byte, 10<<20), + }) + require.ErrorIs(t, err, common.ErrNoSpace) +} From c3c034eccac1297a15645d1ff7995855cc45fc74 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Wed, 15 Jan 2025 14:19:56 +0300 Subject: [PATCH 322/591] [#1601] util: Correctly parse 'root' name for container resources * Convert `root/*` to `//`; * Add unit-test case for parses to check parsing correctness. Signed-off-by: Airat Arifullin --- pkg/util/ape/parser.go | 2 +- pkg/util/ape/parser_test.go | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/util/ape/parser.go b/pkg/util/ape/parser.go index b4a31fd8d..a34a17f6f 100644 --- a/pkg/util/ape/parser.go +++ b/pkg/util/ape/parser.go @@ -261,7 +261,7 @@ func parseResource(lexeme string, isObj bool) (string, error) { } else { if lexeme == "*" { return nativeschema.ResourceFormatAllContainers, nil - } else if lexeme == "/*" { + } else if lexeme == "/*" || lexeme == "root/*" { return nativeschema.ResourceFormatRootContainers, nil } else if strings.HasPrefix(lexeme, "/") && len(lexeme) > 1 { lexeme = lexeme[1:] diff --git a/pkg/util/ape/parser_test.go b/pkg/util/ape/parser_test.go index 21649fd24..c236c4603 100644 --- a/pkg/util/ape/parser_test.go +++ b/pkg/util/ape/parser_test.go @@ -43,6 +43,15 @@ func TestParseAPERule(t *testing.T) { Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootObjects}}, }, }, + { + name: "Valid rule for all containers in explicit root namespace", + rule: "allow Container.Put root/*", + expectRule: policyengine.Rule{ + Status: policyengine.Allow, + Actions: policyengine.Actions{Names: []string{nativeschema.MethodPutContainer}}, + Resources: policyengine.Resources{Names: []string{nativeschema.ResourceFormatRootContainers}}, + }, + }, { name: "Valid rule for all objects in root namespace and container", rule: "allow Object.Put /cid/*", From 436d65d784876248585edb94be9cf397a874caba Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Thu, 9 Jan 2025 12:07:17 +0300 Subject: [PATCH 323/591] [#1591] Build and host OCI images on our own infra Similar to https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/issues/587 this PR introduces a CI pipeline that builds Docker images and pushes them to our selfhosted registry. Signed-off-by: Vitaliy Potyarkin --- .forgejo/workflows/oci-image.yml | 28 ++++++++++++++++++++++++++++ Makefile | 9 +++++++++ docs/release-instruction.md | 12 ++++-------- 3 files changed, 41 insertions(+), 8 deletions(-) create mode 100644 .forgejo/workflows/oci-image.yml diff --git a/.forgejo/workflows/oci-image.yml b/.forgejo/workflows/oci-image.yml new file mode 100644 index 000000000..fe91d65f9 --- /dev/null +++ b/.forgejo/workflows/oci-image.yml @@ -0,0 +1,28 @@ +name: OCI image + +on: + push: + workflow_dispatch: + +jobs: + image: + name: Build container images + runs-on: docker + container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm + steps: + - name: Clone git repo + uses: actions/checkout@v3 + + - name: Build OCI image + run: make images + + - name: Push image to OCI registry + run: | + echo "$REGISTRY_PASSWORD" \ + | docker login --username truecloudlab --password-stdin git.frostfs.info + make push-images + if: >- + startsWith(github.ref, 'refs/tags/v') && + (github.event_name == 'workflow_dispatch' || github.event_name == 'push') + env: + REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}} diff --git a/Makefile b/Makefile index f0cdc273c..e9601a87e 100755 --- a/Makefile +++ b/Makefile @@ -139,6 +139,15 @@ images: image-storage image-ir image-cli image-adm # Build dirty local Docker images dirty-images: image-dirty-storage image-dirty-ir image-dirty-cli image-dirty-adm +# Push FrostFS components' docker image to the registry +push-image-%: + @echo "⇒ Publish FrostFS $* docker image " + @docker push $(HUB_IMAGE)-$*:$(HUB_TAG) + +# Push all Docker images to the registry +.PHONY: push-images +push-images: push-image-storage push-image-ir push-image-cli push-image-adm + # Run `make %` in Golang container docker/%: docker run --rm -t \ diff --git a/docs/release-instruction.md b/docs/release-instruction.md index 18659c699..aa867e83c 100644 --- a/docs/release-instruction.md +++ b/docs/release-instruction.md @@ -95,19 +95,15 @@ $ git push origin ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} ## Post-release -### Prepare and push images to a Docker Hub (if not automated) +### Prepare and push images to a Docker registry (automated) -Create Docker images for all applications and push them into Docker Hub -(requires [organization](https://hub.docker.com/u/truecloudlab) privileges) +Create Docker images for all applications and push them into container registry +(executed automatically in Forgejo Actions upon pushing a release tag): ```shell $ git checkout ${FROSTFS_TAG_PREFIX}${FROSTFS_REVISION} $ make images -$ docker push truecloudlab/frostfs-storage:${FROSTFS_REVISION} -$ docker push truecloudlab/frostfs-storage-testnet:${FROSTFS_REVISION} -$ docker push truecloudlab/frostfs-ir:${FROSTFS_REVISION} -$ docker push truecloudlab/frostfs-cli:${FROSTFS_REVISION} -$ docker push truecloudlab/frostfs-adm:${FROSTFS_REVISION} +$ make push-images ``` ### Make a proper release (if not automated) From 5a270e2e61fc245130c5fc663e035a574ec6bc5e Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 16 Jan 2025 15:08:35 +0300 Subject: [PATCH 324/591] [#1604] policer: Use status instead of bool value in node cache Signed-off-by: Evgenii Stratonikov --- pkg/services/policer/nodecache.go | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go index cd47cb0fc..410ef7d86 100644 --- a/pkg/services/policer/nodecache.go +++ b/pkg/services/policer/nodecache.go @@ -15,37 +15,30 @@ func (st nodeProcessStatus) Processed() bool { } // nodeCache tracks Policer's check progress. -type nodeCache map[uint64]bool +type nodeCache map[uint64]nodeProcessStatus func newNodeCache() nodeCache { - return make(map[uint64]bool) + return make(map[uint64]nodeProcessStatus) } -func (n nodeCache) set(node netmap.NodeInfo, val bool) { +func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) { n[node.Hash()] = val } // submits storage node as a candidate to store the object replica in case of // shortage. func (n nodeCache) submitReplicaCandidate(node netmap.NodeInfo) { - n.set(node, false) + n.set(node, nodeDoesNotHoldObject) } // submits storage node as a current object replica holder. func (n nodeCache) submitReplicaHolder(node netmap.NodeInfo) { - n.set(node, true) + n.set(node, nodeHoldsObject) } // processStatus returns current processing status of the storage node. func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus { - switch val, ok := n[node.Hash()]; { - case !ok: - return nodeNotProcessed - case val: - return nodeHoldsObject - default: - return nodeDoesNotHoldObject - } + return n[node.Hash()] } // SubmitSuccessfulReplication marks given storage node as a current object From 84e1599997c3d4674c418556dccbeaeb1d2414ed Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 16 Jan 2025 15:52:21 +0300 Subject: [PATCH 325/591] [#1604] policer: Remove one-line helpers Signed-off-by: Evgenii Stratonikov --- pkg/services/policer/check.go | 6 +++--- pkg/services/policer/check_test.go | 4 ++-- pkg/services/policer/nodecache.go | 13 +------------ 3 files changed, 6 insertions(+), 17 deletions(-) diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index bdfc4344b..9335a75d8 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -143,10 +143,10 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe if err == nil { shortage-- - checkedNodes.submitReplicaHolder(nodes[i]) + checkedNodes.set(nodes[i], nodeHoldsObject) } else { if client.IsErrObjectNotFound(err) { - checkedNodes.submitReplicaCandidate(nodes[i]) + checkedNodes.set(nodes[i], nodeDoesNotHoldObject) continue } else if client.IsErrNodeUnderMaintenance(err) { shortage, uncheckedCopies = p.handleMaintenance(ctx, nodes[i], checkedNodes, shortage, uncheckedCopies) @@ -174,7 +174,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe // However, additional copies should not be removed in this case, // because we can remove the only copy this way. func (p *Policer) handleMaintenance(ctx context.Context, node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) { - checkedNodes.submitReplicaHolder(node) + checkedNodes.set(node, nodeHoldsObject) shortage-- uncheckedCopies++ diff --git a/pkg/services/policer/check_test.go b/pkg/services/policer/check_test.go index d4c7ccbf9..69879c439 100644 --- a/pkg/services/policer/check_test.go +++ b/pkg/services/policer/check_test.go @@ -16,9 +16,9 @@ func TestNodeCache(t *testing.T) { cache.SubmitSuccessfulReplication(node) require.Equal(t, cache.processStatus(node), nodeHoldsObject) - cache.submitReplicaCandidate(node) + cache.set(node, nodeDoesNotHoldObject) require.Equal(t, cache.processStatus(node), nodeDoesNotHoldObject) - cache.submitReplicaHolder(node) + cache.set(node, nodeHoldsObject) require.Equal(t, cache.processStatus(node), nodeHoldsObject) } diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go index 410ef7d86..7a8217858 100644 --- a/pkg/services/policer/nodecache.go +++ b/pkg/services/policer/nodecache.go @@ -25,17 +25,6 @@ func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) { n[node.Hash()] = val } -// submits storage node as a candidate to store the object replica in case of -// shortage. -func (n nodeCache) submitReplicaCandidate(node netmap.NodeInfo) { - n.set(node, nodeDoesNotHoldObject) -} - -// submits storage node as a current object replica holder. -func (n nodeCache) submitReplicaHolder(node netmap.NodeInfo) { - n.set(node, nodeHoldsObject) -} - // processStatus returns current processing status of the storage node. func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus { return n[node.Hash()] @@ -46,5 +35,5 @@ func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus { // // SubmitSuccessfulReplication implements replicator.TaskResult. func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) { - n.submitReplicaHolder(node) + n.set(node, nodeHoldsObject) } From 4538ccb12a4769eeb397417f0b755dc1bc7bd712 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 16 Jan 2025 15:59:17 +0300 Subject: [PATCH 326/591] [#1604] policer: Do not process the same node twice Signed-off-by: Evgenii Stratonikov --- pkg/services/policer/check.go | 6 +++--- pkg/services/policer/nodecache.go | 1 + pkg/services/policer/policer_test.go | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index 9335a75d8..f79ffbece 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -126,12 +126,11 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe } else { if status := checkedNodes.processStatus(nodes[i]); status.Processed() { if status == nodeHoldsObject { - // node already contains replica, no need to replicate - nodes = append(nodes[:i], nodes[i+1:]...) - i-- shortage-- } + nodes = append(nodes[:i], nodes[i+1:]...) + i-- continue } @@ -155,6 +154,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe zap.Stringer("object", addr), zap.Error(err), ) + checkedNodes.set(nodes[i], nodeStatusUnknown) } } } diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go index 7a8217858..84a333278 100644 --- a/pkg/services/policer/nodecache.go +++ b/pkg/services/policer/nodecache.go @@ -8,6 +8,7 @@ const ( nodeNotProcessed nodeProcessStatus = iota nodeDoesNotHoldObject nodeHoldsObject + nodeStatusUnknown ) func (st nodeProcessStatus) Processed() bool { diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go index 4e17e98a8..2ed71c404 100644 --- a/pkg/services/policer/policer_test.go +++ b/pkg/services/policer/policer_test.go @@ -127,7 +127,7 @@ func TestProcessObject(t *testing.T) { nodeCount: 2, policy: `REP 2 REP 2`, placement: [][]int{{0, 1}, {0, 1}}, - wantReplicateTo: []int{1, 1}, // is this actually good? + wantReplicateTo: []int{1}, }, { desc: "lock object must be replicated to all nodes", From 26e0c82fb8ad0d74782e2c96cbf4d7bcdd76e428 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 16 Jan 2025 16:22:37 +0300 Subject: [PATCH 327/591] [#1604] policer/test: Add test for MAINTENANCE runtime status The node can have MAINTENANCE status in the network map, but can also be ONLINE while responding with MAINTENANCE. These are 2 different code paths, let's test them separately. Signed-off-by: Evgenii Stratonikov --- pkg/services/policer/policer_test.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go index 2ed71c404..ca6bff944 100644 --- a/pkg/services/policer/policer_test.go +++ b/pkg/services/policer/policer_test.go @@ -78,6 +78,7 @@ func TestProcessObject(t *testing.T) { maintenanceNodes []int wantRemoveRedundant bool wantReplicateTo []int + headResult map[int]error ecInfo *objectcore.ECInfo }{ { @@ -145,6 +146,14 @@ func TestProcessObject(t *testing.T) { objHolders: []int{1}, maintenanceNodes: []int{2}, }, + { + desc: "preserve local copy when node response with MAINTENANCE", + nodeCount: 3, + policy: `REP 2`, + placement: [][]int{{1, 2}}, + objHolders: []int{1}, + headResult: map[int]error{2: new(apistatus.NodeUnderMaintenance)}, + }, { desc: "lock object must be replicated to all EC nodes", objType: objectSDK.TypeLock, @@ -204,6 +213,11 @@ func TestProcessObject(t *testing.T) { t.Errorf("unexpected remote object head: node=%+v addr=%v", ni, a) return nil, errors.New("unexpected object head") } + if ti.headResult != nil { + if err, ok := ti.headResult[index]; ok { + return nil, err + } + } for _, i := range ti.objHolders { if index == i { return nil, nil From 57efa0bc8eebbc7b26ef9d0c14b4dbbdb7b59de7 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 16 Jan 2025 16:04:40 +0300 Subject: [PATCH 328/591] [#1604] policer: Properly handle maintenance nodes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Consider `REP 1 REP 1` placement (selects/filters are omitted). The placement is `[1, 2], [1, 0]`. We are the 0-th node. Node 1 is under maintenance, so we do not replicate object on the node 2. In the second replication group node 1 is under maintenance, but current caching logic considers it as "replica holder" and removes local copy. Voilà, we have DL if the object is missing from the node 1. Signed-off-by: Evgenii Stratonikov --- pkg/services/policer/check.go | 6 +++++- pkg/services/policer/nodecache.go | 1 + pkg/services/policer/policer_test.go | 8 ++++++++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index f79ffbece..7ac5fc9e0 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -128,6 +128,10 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe if status == nodeHoldsObject { shortage-- } + if status == nodeIsUnderMaintenance { + shortage-- + uncheckedCopies++ + } nodes = append(nodes[:i], nodes[i+1:]...) i-- @@ -174,7 +178,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe // However, additional copies should not be removed in this case, // because we can remove the only copy this way. func (p *Policer) handleMaintenance(ctx context.Context, node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) { - checkedNodes.set(node, nodeHoldsObject) + checkedNodes.set(node, nodeIsUnderMaintenance) shortage-- uncheckedCopies++ diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go index 84a333278..53b64d3fa 100644 --- a/pkg/services/policer/nodecache.go +++ b/pkg/services/policer/nodecache.go @@ -9,6 +9,7 @@ const ( nodeDoesNotHoldObject nodeHoldsObject nodeStatusUnknown + nodeIsUnderMaintenance ) func (st nodeProcessStatus) Processed() bool { diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go index ca6bff944..9b9ab99ac 100644 --- a/pkg/services/policer/policer_test.go +++ b/pkg/services/policer/policer_test.go @@ -170,6 +170,14 @@ func TestProcessObject(t *testing.T) { placement: [][]int{{0, 1, 2}}, wantReplicateTo: []int{1, 2}, }, + { + desc: "do not remove local copy when MAINTENANCE status is cached", + objType: objectSDK.TypeRegular, + nodeCount: 3, + policy: `REP 1 REP 1`, + placement: [][]int{{1, 2}, {1, 0}}, + headResult: map[int]error{1: new(apistatus.NodeUnderMaintenance)}, + }, } for i := range tests { From 80de5d70bfd5bca16b9c891b8fdef25165e0bba1 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Thu, 16 Jan 2025 15:25:05 +0300 Subject: [PATCH 329/591] [#1593] node: Fix initialization of ape_chain cache Signed-off-by: Alexander Chuprov --- cmd/frostfs-node/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 9931d0dc8..3e9cd4e11 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1146,7 +1146,7 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) { c.cfgObject.cfgAccessPolicyEngine.policyContractHash) cacheSize := morphconfig.APEChainCacheSize(c.appCfg) - if cacheSize > 0 { + if cacheSize > 0 && c.cfgMorph.cacheTTL > 0 { morphRuleStorage = newMorphCache(morphRuleStorage, int(cacheSize), c.cfgMorph.cacheTTL) } From c98357606b4e387f7a8063331438dccfb24d255e Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 17 Jan 2025 13:51:18 +0300 Subject: [PATCH 330/591] [#1606] Use slices.Clone()/bytes.Clone() where possible gopatch: ``` @@ var from, to expression @@ +import "bytes" -to := make([]byte, len(from)) -copy(to, from) +to := bytes.Clone(from) @@ var from, to expression @@ +import "bytes" -to = make([]byte, len(from)) -copy(to, from) +to = bytes.Clone(from) @@ var from, to, typ expression @@ +import "slices" -to := make([]typ, len(from)) -copy(to, from) +to := slices.Clone(from) @@ var from, to, typ expression @@ +import "slices" -to = make([]typ, len(from)) -copy(to, from) +to = slices.Clone(from) ``` Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/storagecfg/root.go | 4 ++-- cmd/frostfs-node/config/calls.go | 4 ++-- pkg/local_object_storage/engine/evacuate.go | 4 ++-- pkg/local_object_storage/engine/evacuate_limiter.go | 4 ++-- pkg/local_object_storage/metabase/list.go | 6 ++---- pkg/local_object_storage/pilorama/boltdb.go | 3 +-- pkg/local_object_storage/pilorama/forest.go | 4 ++-- pkg/morph/client/multi.go | 4 ++-- pkg/services/object/common/writer/ec_test.go | 4 ++-- pkg/services/object/search/search_test.go | 4 ++-- pkg/services/object_manager/placement/traverser_test.go | 4 ++-- 11 files changed, 21 insertions(+), 24 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/root.go b/cmd/frostfs-adm/internal/modules/storagecfg/root.go index 8e6a8354e..8acbc4579 100644 --- a/cmd/frostfs-adm/internal/modules/storagecfg/root.go +++ b/cmd/frostfs-adm/internal/modules/storagecfg/root.go @@ -11,6 +11,7 @@ import ( "net/url" "os" "path/filepath" + "slices" "strconv" "strings" "text/template" @@ -410,8 +411,7 @@ func initClient(rpc []string) *rpcclient.Client { var c *rpcclient.Client var err error - shuffled := make([]string, len(rpc)) - copy(shuffled, rpc) + shuffled := slices.Clone(rpc) rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) for _, endpoint := range shuffled { diff --git a/cmd/frostfs-node/config/calls.go b/cmd/frostfs-node/config/calls.go index 36e53ea7c..c40bf3620 100644 --- a/cmd/frostfs-node/config/calls.go +++ b/cmd/frostfs-node/config/calls.go @@ -1,6 +1,7 @@ package config import ( + "slices" "strings" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" @@ -52,6 +53,5 @@ func (x *Config) Value(name string) any { // It supports only one level of nesting and is intended to be used // to provide default values. func (x *Config) SetDefault(from *Config) { - x.defaultPath = make([]string, len(from.path)) - copy(x.defaultPath, from.path) + x.defaultPath = slices.Clone(from.path) } diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index 682f23dff..b37f6b68e 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "strings" "sync" "sync/atomic" @@ -255,8 +256,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro copyShards := func() []pooledShard { mtx.RLock() defer mtx.RUnlock() - t := make([]pooledShard, len(shards)) - copy(t, shards) + t := slices.Clone(shards) return t } eg.Go(func() error { diff --git a/pkg/local_object_storage/engine/evacuate_limiter.go b/pkg/local_object_storage/engine/evacuate_limiter.go index 1e6b9ccb1..c74134500 100644 --- a/pkg/local_object_storage/engine/evacuate_limiter.go +++ b/pkg/local_object_storage/engine/evacuate_limiter.go @@ -3,6 +3,7 @@ package engine import ( "context" "fmt" + "slices" "sync" "time" @@ -123,8 +124,7 @@ func (s *EvacuationState) DeepCopy() *EvacuationState { if s == nil { return nil } - shardIDs := make([]string, len(s.shardIDs)) - copy(shardIDs, s.shardIDs) + shardIDs := slices.Clone(s.shardIDs) return &EvacuationState{ shardIDs: shardIDs, diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index f488c3ced..eaef3b9ba 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -188,8 +188,7 @@ loop: if offset != nil { // new slice is much faster but less memory efficient // we need to copy, because offset exists during bbolt tx - cursor.inBucketOffset = make([]byte, len(offset)) - copy(cursor.inBucketOffset, offset) + cursor.inBucketOffset = bytes.Clone(offset) } if len(result) == 0 { @@ -198,8 +197,7 @@ loop: // new slice is much faster but less memory efficient // we need to copy, because bucketName exists during bbolt tx - cursor.bucketName = make([]byte, len(bucketName)) - copy(cursor.bucketName, bucketName) + cursor.bucketName = bytes.Clone(bucketName) return result, cursor, nil } diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 1d55d1c13..9d71d9fda 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -1506,8 +1506,7 @@ func (t *boltForest) TreeListTrees(ctx context.Context, prm TreeListTreesPrm) (* }) if len(res.Items) == batchSize { - res.NextPageToken = make([]byte, len(k)) - copy(res.NextPageToken, k) + res.NextPageToken = bytes.Clone(k) break } } diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index f31504e2b..92183716c 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "sort" "strings" @@ -84,8 +85,7 @@ func (f *memoryForest) TreeAddByPath(_ context.Context, d CIDDescriptor, treeID s.operations = append(s.operations, op) } - mCopy := make([]KeyValue, len(m)) - copy(mCopy, m) + mCopy := slices.Clone(m) op := s.do(&Move{ Parent: node, Meta: Meta{ diff --git a/pkg/morph/client/multi.go b/pkg/morph/client/multi.go index 708d3b39f..b9e39c25e 100644 --- a/pkg/morph/client/multi.go +++ b/pkg/morph/client/multi.go @@ -2,6 +2,7 @@ package client import ( "context" + "slices" "sort" "time" @@ -99,8 +100,7 @@ mainLoop: case <-t.C: c.switchLock.RLock() - endpointsCopy := make([]Endpoint, len(c.endpoints.list)) - copy(endpointsCopy, c.endpoints.list) + endpointsCopy := slices.Clone(c.endpoints.list) currPriority := c.endpoints.list[c.endpoints.curr].Priority highestPriority := c.endpoints.list[0].Priority diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go index 8b2599e5f..8ad7e641a 100644 --- a/pkg/services/object/common/writer/ec_test.go +++ b/pkg/services/object/common/writer/ec_test.go @@ -7,6 +7,7 @@ import ( "crypto/sha256" "errors" "fmt" + "slices" "strconv" "testing" @@ -41,8 +42,7 @@ type testPlacementBuilder struct { func (p *testPlacementBuilder) BuildPlacement(_ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) ( [][]netmap.NodeInfo, error, ) { - arr := make([]netmap.NodeInfo, len(p.vectors[0])) - copy(arr, p.vectors[0]) + arr := slices.Clone(p.vectors[0]) return [][]netmap.NodeInfo{arr}, nil } diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go index 0a40025e1..05643eb2b 100644 --- a/pkg/services/object/search/search_test.go +++ b/pkg/services/object/search/search_test.go @@ -6,6 +6,7 @@ import ( "crypto/sha256" "errors" "fmt" + "slices" "strconv" "testing" @@ -103,8 +104,7 @@ func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap. return nil, errors.New("vectors for address not found") } - res := make([][]netmap.NodeInfo, len(vs)) - copy(res, vs) + res := slices.Clone(vs) return res, nil } diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go index f96e5c8a7..624efb007 100644 --- a/pkg/services/object_manager/placement/traverser_test.go +++ b/pkg/services/object_manager/placement/traverser_test.go @@ -1,6 +1,7 @@ package placement import ( + "slices" "strconv" "testing" @@ -33,8 +34,7 @@ func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo { vc := make([][]netmap.NodeInfo, 0, len(v)) for i := range v { - ns := make([]netmap.NodeInfo, len(v[i])) - copy(ns, v[i]) + ns := slices.Clone(v[i]) vc = append(vc, ns) } From 0bcbeb26b2b79d210d4b99360e1ad58ae809faab Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 17 Jan 2025 12:38:44 +0300 Subject: [PATCH 331/591] [#1605] policer: Simplify processRepNodes() checks Current flow is hard to reason about, #1601 is a notorious example of accidental complexity. 1. Remove multiple nested ifs, use depth=1. 2. Process each status exactly once, hopefully preventing bugs like #1601. Signed-off-by: Evgenii Stratonikov --- pkg/services/policer/check.go | 106 ++++++++++++++---------------- pkg/services/policer/nodecache.go | 1 + 2 files changed, 52 insertions(+), 55 deletions(-) diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index 7ac5fc9e0..3e536f105 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -117,50 +117,40 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe default: } - if p.netmapKeys.IsLocalKey(nodes[i].PublicKey()) { + var err error + st := checkedNodes.processStatus(nodes[i]) + if !st.Processed() { + st, err = p.checkStatus(ctx, addr, nodes[i]) + checkedNodes.set(nodes[i], st) + if st == nodeDoesNotHoldObject { + // 1. This is the first time the node is encountered (`!st.Processed()`). + // 2. The node does not hold object (`st == nodeDoesNotHoldObject`). + // So we leave the node in the list and skip its removal + // at the end of the loop body. + continue + } + } + + switch st { + case nodeIsLocal: requirements.needLocalCopy = true shortage-- - } else if nodes[i].Status().IsMaintenance() { - shortage, uncheckedCopies = p.handleMaintenance(ctx, nodes[i], checkedNodes, shortage, uncheckedCopies) - } else { - if status := checkedNodes.processStatus(nodes[i]); status.Processed() { - if status == nodeHoldsObject { - shortage-- - } - if status == nodeIsUnderMaintenance { - shortage-- - uncheckedCopies++ - } + case nodeIsUnderMaintenance: + shortage-- + uncheckedCopies++ - nodes = append(nodes[:i], nodes[i+1:]...) - i-- - continue - } - - callCtx, cancel := context.WithTimeout(ctx, p.headTimeout) - - _, err := p.remoteHeader(callCtx, nodes[i], addr, false) - - cancel() - - if err == nil { - shortage-- - checkedNodes.set(nodes[i], nodeHoldsObject) - } else { - if client.IsErrObjectNotFound(err) { - checkedNodes.set(nodes[i], nodeDoesNotHoldObject) - continue - } else if client.IsErrNodeUnderMaintenance(err) { - shortage, uncheckedCopies = p.handleMaintenance(ctx, nodes[i], checkedNodes, shortage, uncheckedCopies) - } else { - p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, - zap.Stringer("object", addr), - zap.Error(err), - ) - checkedNodes.set(nodes[i], nodeStatusUnknown) - } - } + p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, + zap.String("node", netmap.StringifyPublicKey(nodes[i]))) + case nodeHoldsObject: + shortage-- + case nodeDoesNotHoldObject: + case nodeStatusUnknown: + p.log.Error(ctx, logs.PolicerReceiveObjectHeaderToCheckPolicyCompliance, + zap.Stringer("object", addr), + zap.Error(err)) + default: + panic("unreachable") } nodes = append(nodes[:i], nodes[i+1:]...) @@ -170,22 +160,28 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies) } -// handleMaintenance handles node in maintenance mode and returns new shortage and uncheckedCopies values -// -// consider remote nodes under maintenance as problem OK. Such -// nodes MAY not respond with object, however, this is how we -// prevent spam with new replicas. -// However, additional copies should not be removed in this case, -// because we can remove the only copy this way. -func (p *Policer) handleMaintenance(ctx context.Context, node netmap.NodeInfo, checkedNodes nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) { - checkedNodes.set(node, nodeIsUnderMaintenance) - shortage-- - uncheckedCopies++ +func (p *Policer) checkStatus(ctx context.Context, addr oid.Address, node netmap.NodeInfo) (nodeProcessStatus, error) { + if p.netmapKeys.IsLocalKey(node.PublicKey()) { + return nodeIsLocal, nil + } + if node.Status().IsMaintenance() { + return nodeIsUnderMaintenance, nil + } - p.log.Debug(ctx, logs.PolicerConsiderNodeUnderMaintenanceAsOK, - zap.String("node", netmap.StringifyPublicKey(node)), - ) - return shortage, uncheckedCopies + callCtx, cancel := context.WithTimeout(ctx, p.headTimeout) + _, err := p.remoteHeader(callCtx, node, addr, false) + cancel() + + if err == nil { + return nodeHoldsObject, nil + } + if client.IsErrObjectNotFound(err) { + return nodeDoesNotHoldObject, nil + } + if client.IsErrNodeUnderMaintenance(err) { + return nodeIsUnderMaintenance, nil + } + return nodeStatusUnknown, err } func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements, diff --git a/pkg/services/policer/nodecache.go b/pkg/services/policer/nodecache.go index 53b64d3fa..c2157de5d 100644 --- a/pkg/services/policer/nodecache.go +++ b/pkg/services/policer/nodecache.go @@ -10,6 +10,7 @@ const ( nodeHoldsObject nodeStatusUnknown nodeIsUnderMaintenance + nodeIsLocal ) func (st nodeProcessStatus) Processed() bool { From 951a7ee1c7fa9ffa96041e77731e3bacea16242b Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 20 Jan 2025 16:09:35 +0300 Subject: [PATCH 332/591] [#1605] policer: Do not mutate slice under iteration Nothing wrong with it, besides being difficult to read. Signed-off-by: Evgenii Stratonikov --- pkg/services/policer/check.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index 3e536f105..7df372476 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -110,6 +110,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe // Number of copies that are stored on maintenance nodes. var uncheckedCopies int + var candidates []netmap.NodeInfo for i := 0; shortage > 0 && i < len(nodes); i++ { select { case <-ctx.Done(): @@ -125,8 +126,8 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe if st == nodeDoesNotHoldObject { // 1. This is the first time the node is encountered (`!st.Processed()`). // 2. The node does not hold object (`st == nodeDoesNotHoldObject`). - // So we leave the node in the list and skip its removal - // at the end of the loop body. + // So we need to try to put an object to it. + candidates = append(candidates, nodes[i]) continue } } @@ -152,12 +153,9 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe default: panic("unreachable") } - - nodes = append(nodes[:i], nodes[i+1:]...) - i-- } - p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies) + p.handleProcessNodesResult(ctx, addr, requirements, candidates, checkedNodes, shortage, uncheckedCopies) } func (p *Policer) checkStatus(ctx context.Context, addr oid.Address, node netmap.NodeInfo) (nodeProcessStatus, error) { From 30e14d50ef97e04758b5fb529fb24a5f912d247b Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Thu, 23 Jan 2025 17:56:52 +0300 Subject: [PATCH 333/591] [#1612] Makefile: Update golangci-lint Signed-off-by: Alexander Chuprov --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e9601a87e..497dce115 100755 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" GO_VERSION ?= 1.22 -LINT_VERSION ?= 1.62.0 +LINT_VERSION ?= 1.62.2 TRUECLOUDLAB_LINT_VERSION ?= 0.0.8 PROTOC_VERSION ?= 25.0 PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) From 603015d029ba15dcc9f84e19b14eea1e38e55309 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Fri, 24 Jan 2025 16:33:32 +0300 Subject: [PATCH 334/591] [#1570] cli: Use array type for --range parameter to object hash Signed-off-by: Ekaterina Lebedeva --- cmd/frostfs-cli/modules/object/hash.go | 2 +- cmd/frostfs-cli/modules/object/range.go | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/cmd/frostfs-cli/modules/object/hash.go b/cmd/frostfs-cli/modules/object/hash.go index 461c35f30..d8ea449eb 100644 --- a/cmd/frostfs-cli/modules/object/hash.go +++ b/cmd/frostfs-cli/modules/object/hash.go @@ -41,7 +41,7 @@ func initObjectHashCmd() { flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) _ = objectHashCmd.MarkFlagRequired(commonflags.OIDFlag) - flags.String("range", "", "Range to take hash from in the form offset1:length1,...") + flags.StringSlice("range", nil, "Range to take hash from in the form offset1:length1,...") _ = objectHashCmd.MarkFlagRequired("range") flags.String("type", hashSha256, "Hash type. Either 'sha256' or 'tz'") diff --git a/cmd/frostfs-cli/modules/object/range.go b/cmd/frostfs-cli/modules/object/range.go index ad4bc3d59..8f59906ca 100644 --- a/cmd/frostfs-cli/modules/object/range.go +++ b/cmd/frostfs-cli/modules/object/range.go @@ -38,7 +38,7 @@ func initObjectRangeCmd() { flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) - flags.String("range", "", "Range to take data from in the form offset:length") + flags.StringSlice("range", nil, "Range to take data from in the form offset:length") flags.String(fileFlag, "", "File to write object payload to. Default: stdout.") flags.Bool(rawFlag, false, rawFlagDesc) } @@ -195,11 +195,10 @@ func marshalECInfo(cmd *cobra.Command, info *objectSDK.ECInfo) ([]byte, error) { } func getRangeList(cmd *cobra.Command) ([]objectSDK.Range, error) { - v := cmd.Flag("range").Value.String() - if len(v) == 0 { - return nil, nil + vs, err := cmd.Flags().GetStringSlice("range") + if len(vs) == 0 || err != nil { + return nil, err } - vs := strings.Split(v, ",") rs := make([]objectSDK.Range, len(vs)) for i := range vs { before, after, found := strings.Cut(vs[i], rangeSep) From a788d4477342b92f7fa9c3bc9c34be8a9c9b45df Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Fri, 24 Jan 2025 16:54:27 +0300 Subject: [PATCH 335/591] [#1570] cli: Use array type for attributes parameters Signed-off-by: Ekaterina Lebedeva --- cmd/frostfs-cli/modules/object/patch.go | 10 ++++------ cmd/frostfs-cli/modules/object/put.go | 10 ++++------ 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/cmd/frostfs-cli/modules/object/patch.go b/cmd/frostfs-cli/modules/object/patch.go index 8f03885ab..ebc415b2f 100644 --- a/cmd/frostfs-cli/modules/object/patch.go +++ b/cmd/frostfs-cli/modules/object/patch.go @@ -46,7 +46,7 @@ func initObjectPatchCmd() { flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) _ = objectRangeCmd.MarkFlagRequired(commonflags.OIDFlag) - flags.String(newAttrsFlagName, "", "New object attributes in form of Key1=Value1,Key2=Value2") + flags.StringSlice(newAttrsFlagName, nil, "New object attributes in form of Key1=Value1,Key2=Value2") flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.") flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length") flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.") @@ -99,11 +99,9 @@ func patch(cmd *cobra.Command, _ []string) { } func parseNewObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { - var rawAttrs []string - - raw := cmd.Flag(newAttrsFlagName).Value.String() - if len(raw) != 0 { - rawAttrs = strings.Split(raw, ",") + rawAttrs, err := cmd.Flags().GetStringSlice(newAttrsFlagName) + if err != nil { + return nil, err } attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes diff --git a/cmd/frostfs-cli/modules/object/put.go b/cmd/frostfs-cli/modules/object/put.go index affe9bbba..9e8a7cc6f 100644 --- a/cmd/frostfs-cli/modules/object/put.go +++ b/cmd/frostfs-cli/modules/object/put.go @@ -50,7 +50,7 @@ func initObjectPutCmd() { flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) - flags.String("attributes", "", "User attributes in form of Key1=Value1,Key2=Value2") + flags.StringSlice("attributes", nil, "User attributes in form of Key1=Value1,Key2=Value2") flags.Bool("disable-filename", false, "Do not set well-known filename attribute") flags.Bool("disable-timestamp", false, "Do not set well-known timestamp attribute") flags.Uint64VarP(&putExpiredOn, commonflags.ExpireAt, "e", 0, "The last active epoch in the life of the object") @@ -214,11 +214,9 @@ func getAllObjectAttributes(cmd *cobra.Command) []objectSDK.Attribute { } func parseObjectAttrs(cmd *cobra.Command) ([]objectSDK.Attribute, error) { - var rawAttrs []string - - raw := cmd.Flag("attributes").Value.String() - if len(raw) != 0 { - rawAttrs = strings.Split(raw, ",") + rawAttrs, err := cmd.Flags().GetStringSlice("attributes") + if err != nil { + return nil, err } attrs := make([]objectSDK.Attribute, len(rawAttrs), len(rawAttrs)+2) // name + timestamp attributes From abfd9657f97f2d7b8e9227befeadf08b3fc75f03 Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Tue, 28 Jan 2025 17:43:14 +0300 Subject: [PATCH 336/591] [#1617] govulncheck: Use patch release with security fixes https://go.dev/doc/devel/release#go1.23.minor Signed-off-by: Vitaliy Potyarkin --- .forgejo/workflows/vulncheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index cf15005b1..8a5a818aa 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -18,7 +18,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.23.5' - name: Install govulncheck run: go install golang.org/x/vuln/cmd/govulncheck@latest From e1a984e9d8bb0cedd5597f6176b865dea5e2f3b5 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 29 Jan 2025 16:09:53 +0300 Subject: [PATCH 337/591] [#1620] logs: Drop redundant trace_id fields `trace_id` is taken from context. Signed-off-by: Dmitrii Stepanov --- .../blobovnicza/delete.go | 2 - .../blobstor/blobovniczatree/delete.go | 2 - .../blobstor/blobovniczatree/exists.go | 4 +- .../blobstor/blobovniczatree/get.go | 2 - .../blobstor/blobovniczatree/get_range.go | 4 +- .../blobstor/blobovniczatree/put.go | 10 ++--- pkg/local_object_storage/blobstor/exists.go | 4 +- pkg/local_object_storage/engine/delete.go | 10 ++--- pkg/local_object_storage/engine/evacuate.go | 32 ++++++--------- pkg/local_object_storage/engine/get.go | 4 +- pkg/local_object_storage/engine/inhume.go | 9 +---- pkg/local_object_storage/engine/put.go | 10 ++--- pkg/local_object_storage/engine/range.go | 4 +- pkg/local_object_storage/engine/tree.go | 40 ++++++------------- pkg/local_object_storage/shard/delete.go | 7 +--- pkg/local_object_storage/shard/get.go | 7 +--- pkg/local_object_storage/shard/inhume.go | 2 - pkg/local_object_storage/shard/list.go | 4 +- pkg/services/object/put/single.go | 2 - pkg/services/replicator/process.go | 5 +-- pkg/services/replicator/pull.go | 10 ++--- pkg/services/replicator/put.go | 7 +--- pkg/services/tree/redirect.go | 4 +- pkg/services/tree/replicator.go | 7 +--- 24 files changed, 56 insertions(+), 136 deletions(-) diff --git a/pkg/local_object_storage/blobovnicza/delete.go b/pkg/local_object_storage/blobovnicza/delete.go index d821b2991..8f24b5675 100644 --- a/pkg/local_object_storage/blobovnicza/delete.go +++ b/pkg/local_object_storage/blobovnicza/delete.go @@ -6,7 +6,6 @@ import ( "syscall" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -94,7 +93,6 @@ func (b *Blobovnicza) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, err b.log.Debug(ctx, logs.BlobovniczaObjectWasRemovedFromBucket, zap.String("binary size", stringifyByteSize(dataSize)), zap.String("range", stringifyBounds(sizeLowerBound, sizeUpperBound)), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) b.itemDeleted(recordSize) } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go index 8d17fc4b0..d096791c3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/delete.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/delete.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -86,7 +85,6 @@ func (b *Blobovniczas) Delete(ctx context.Context, prm common.DeletePrm) (res co b.log.Debug(ctx, logs.BlobovniczatreeCouldNotRemoveObjectFromLevel, zap.String("level", p), zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go index d2c99945f..0c5e48821 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists.go @@ -8,7 +8,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "go.opentelemetry.io/otel/attribute" @@ -57,8 +56,7 @@ func (b *Blobovniczas) Exists(ctx context.Context, prm common.ExistsPrm) (common if !client.IsErrObjectNotFound(err) { b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get.go b/pkg/local_object_storage/blobstor/blobovniczatree/get.go index 5d158644e..e5c83e5f2 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -70,7 +69,6 @@ func (b *Blobovniczas) Get(ctx context.Context, prm common.GetPrm) (res common.G b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go index 84b9bc55f..27d13f4f3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/get_range.go @@ -11,7 +11,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -71,8 +70,7 @@ func (b *Blobovniczas) GetRange(ctx context.Context, prm common.GetRangePrm) (re if !outOfBounds && !client.IsErrObjectNotFound(err) { b.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetObjectFromLevel, zap.String("level", p), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } if outOfBounds { return true, err diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/put.go b/pkg/local_object_storage/blobstor/blobovniczatree/put.go index 8276a25ef..37c49d741 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/put.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/put.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobovnicza" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -83,16 +82,14 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) i.B.reportError(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, err) } else { i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotGetActiveBlobovnicza, - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } return false, nil } if active == nil { - i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + i.B.log.Debug(ctx, logs.BlobovniczatreeBlobovniczaOverflowed, zap.String("level", lvlPath)) return false, nil } defer active.Close(ctx) @@ -106,8 +103,7 @@ func (i *putIterator) iterate(ctx context.Context, lvlPath string) (bool, error) } else { i.B.log.Debug(ctx, logs.BlobovniczatreeCouldNotPutObjectToActiveBlobovnicza, zap.String("path", active.SystemPath()), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } if errors.Is(err, blobovnicza.ErrNoSpace) { i.AllFull = true diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go index f1e45fe10..c155e15b8 100644 --- a/pkg/local_object_storage/blobstor/exists.go +++ b/pkg/local_object_storage/blobstor/exists.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -75,8 +74,7 @@ func (b *BlobStor) Exists(ctx context.Context, prm common.ExistsPrm) (common.Exi for _, err := range errors[:len(errors)-1] { b.log.Warn(ctx, logs.BlobstorErrorOccurredDuringObjectExistenceChecking, zap.Stringer("address", prm.Address), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } return common.ExistsRes{}, errors[len(errors)-1] diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 20c915da6..5e5f65fa2 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -6,7 +6,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -148,8 +147,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo if err != nil { e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren, zap.Stringer("addr", addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return false } @@ -160,8 +158,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo if err != nil { e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) continue } } @@ -190,8 +187,7 @@ func (e *StorageEngine) deleteChunks( if err != nil { e.log.Debug(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Stringer("addr", addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) continue } } diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index b37f6b68e..fd1530f53 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -15,7 +15,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -284,12 +283,12 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p }() e.log.Info(ctx, logs.EngineStartedShardsEvacuation, zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) + zap.Stringer("scope", prm.Scope)) err = e.getTotals(ctx, prm, shardsToEvacuate, res) if err != nil { e.log.Error(ctx, logs.EngineShardsEvacuationFailedToCount, zap.Strings("shard_ids", shardIDs), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) + zap.Stringer("scope", prm.Scope)) return err } @@ -323,7 +322,7 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p } if err != nil { e.log.Error(ctx, logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope)) + zap.Stringer("scope", prm.Scope)) return err } @@ -480,8 +479,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context err := sh.IterateOverContainers(ctx, cntPrm) if err != nil { cancel(err) - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField) } return err } @@ -540,7 +538,7 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedLocal, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), zap.String("to_shard_id", shardID), - evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + evacuationOperationLogField) res.trEvacuated.Add(1) continue } @@ -550,26 +548,26 @@ func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, tree e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return err } if moved { e.log.Debug(ctx, logs.EngineShardsEvacuationTreeEvacuatedRemote, zap.String("cid", contTree.CID.EncodeToString()), zap.String("treeID", contTree.TreeID), zap.String("from_shardID", sh.ID().String()), zap.String("to_node", nodePK), - evacuationOperationLogField, zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + evacuationOperationLogField) res.trEvacuated.Add(1) } else if prm.IgnoreErrors { res.trFailed.Add(1) e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } else { e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveTree, zap.String("cid", contTree.CID.EncodeToString()), zap.String("tree_id", contTree.TreeID), zap.String("from_shard_id", sh.ID().String()), evacuationOperationLogField, - zap.Error(err), zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return fmt.Errorf("no remote nodes available to replicate tree '%s' of container %s", contTree.TreeID, contTree.CID) } } @@ -756,8 +754,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI res.objFailed.Add(1) return nil } - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) return err } @@ -778,16 +775,14 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object()) if err != nil { - e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.log.Error(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) return err } if moved { res.objEvacuated.Add(1) } else if prm.IgnoreErrors { res.objFailed.Add(1) - e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.log.Warn(ctx, logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField) } else { return fmt.Errorf("object %s was not replicated", addr) } @@ -825,8 +820,7 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add zap.Stringer("from", sh.ID()), zap.Stringer("to", shards[j].ID()), zap.Stringer("addr", addr), - evacuationOperationLogField, - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + evacuationOperationLogField) return true, nil case putToShardExists, putToShardRemoved: res.objSkipped.Add(1) diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index 81b027c26..74c64bbb6 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -8,7 +8,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -107,8 +106,7 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, zap.Stringer("shard_id", it.ShardWithMeta.ID()), zap.Error(it.MetaError), - zap.Stringer("address", prm.addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Stringer("address", prm.addr)) } } diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index 75bd15c8b..fb802ef2a 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -99,7 +98,6 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { e.log.Warn(ctx, logs.EngineCouldNotInhumeObjectInShard, zap.Error(errors.New("this shard was expected to exist")), zap.String("shard_id", shardID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) return errInhumeFailure } @@ -197,7 +195,6 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck, zap.Error(err), zap.Stringer("address", addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) } else if isLocked { retErr = new(apistatus.ObjectLocked) @@ -232,8 +229,7 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { locked, err = h.Shard.IsLocked(ctx, addr) if err != nil { - e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr)) outErr = err return false } @@ -262,8 +258,7 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { locks, err := h.Shard.GetLocks(ctx, addr) if err != nil { - e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr)) outErr = err } allLocks = append(allLocks, locks...) diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index c79b6e251..64288a511 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -143,8 +142,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti } else { e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence, zap.Stringer("shard_id", sh.ID()), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } return // this is not ErrAlreadyRemoved error so we can go to the next shard @@ -165,15 +163,13 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Stringer("shard_id", sh.ID()), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return } if client.IsErrObjectAlreadyRemoved(err) { e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Stringer("shard_id", sh.ID()), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) res.status = putToShardRemoved res.err = err return diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index 600e7266c..a468cf594 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -119,8 +118,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error e.log.Warn(ctx, logs.ShardMetaInfoPresentButObjectNotFound, zap.Stringer("shard_id", it.ShardWithMeta.ID()), zap.Error(it.MetaError), - zap.Stringer("address", prm.addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Stringer("address", prm.addr)) } } diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go index 268b4adfa..7f70d36f7 100644 --- a/pkg/local_object_storage/engine/tree.go +++ b/pkg/local_object_storage/engine/tree.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.opentelemetry.io/otel/attribute" @@ -39,8 +38,7 @@ func (e *StorageEngine) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't perform `TreeMove`", err, zap.Stringer("cid", d.CID), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } return nil, err @@ -73,8 +71,7 @@ func (e *StorageEngine) TreeAddByPath(ctx context.Context, d pilorama.CIDDescrip if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't perform `TreeAddByPath`", err, zap.Stringer("cid", d.CID), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } return nil, err } @@ -102,8 +99,7 @@ func (e *StorageEngine) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID str if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't perform `TreeApply`", err, zap.Stringer("cid", cnr), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } return err } @@ -130,8 +126,7 @@ func (e *StorageEngine) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeI if !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't perform `TreeApplyBatch`", err, zap.Stringer("cid", cnr), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } return err } @@ -162,8 +157,7 @@ func (e *StorageEngine) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeGetByPath`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -195,8 +189,7 @@ func (e *StorageEngine) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID s if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeGetMeta`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -227,8 +220,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeGetChildren`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -259,8 +251,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeSortedByFilename`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -291,8 +282,7 @@ func (e *StorageEngine) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't perform `TreeGetOpLog`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -321,8 +311,7 @@ func (e *StorageEngine) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID stri if !errors.Is(err, pilorama.ErrTreeNotFound) && !errors.Is(err, shard.ErrReadOnlyMode) { e.reportShardError(ctx, sh, "can't perform `TreeDrop`", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } @@ -350,8 +339,7 @@ func (e *StorageEngine) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, } e.reportShardError(ctx, sh, "can't perform `TreeList`", err, - zap.Stringer("cid", cid), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Stringer("cid", cid)) // returns as much info about // trees as possible @@ -417,8 +405,7 @@ func (e *StorageEngine) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK if err != nil && !errors.Is(err, shard.ErrReadOnlyMode) && err != shard.ErrPiloramaDisabled { e.reportShardError(ctx, lst[index], "can't update tree synchronization height", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } return err } @@ -444,8 +431,7 @@ func (e *StorageEngine) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, t if !errors.Is(err, pilorama.ErrTreeNotFound) { e.reportShardError(ctx, sh, "can't read tree synchronization height", err, zap.Stringer("cid", cid), - zap.String("tree", treeID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("tree", treeID)) } continue } diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go index fb6769b51..55231b032 100644 --- a/pkg/local_object_storage/shard/delete.go +++ b/pkg/local_object_storage/shard/delete.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -112,8 +111,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error if err != nil { s.log.Debug(ctx, logs.StorageIDRetrievalFailure, zap.Stringer("object", addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return err } storageID := res.StorageID() @@ -132,8 +130,7 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error if err != nil && !client.IsErrObjectNotFound(err) { s.log.Debug(ctx, logs.ObjectRemovalFailureBlobStor, zap.Stringer("object_address", addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return err } return nil diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go index 15d1eb6ba..05823c62b 100644 --- a/pkg/local_object_storage/shard/get.go +++ b/pkg/local_object_storage/shard/get.go @@ -10,7 +10,6 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -155,14 +154,12 @@ func (s *Shard) fetchObjectData(ctx context.Context, addr oid.Address, skipMeta if client.IsErrObjectNotFound(err) { s.log.Debug(ctx, logs.ShardObjectIsMissingInWritecache, zap.Stringer("addr", addr), - zap.Bool("skip_meta", skipMeta), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Bool("skip_meta", skipMeta)) } else { s.log.Error(ctx, logs.ShardFailedToFetchObjectFromWritecache, zap.Error(err), zap.Stringer("addr", addr), - zap.Bool("skip_meta", skipMeta), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Bool("skip_meta", skipMeta)) } } if skipMeta || mErr != nil { diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go index d46400869..9d5f66063 100644 --- a/pkg/local_object_storage/shard/inhume.go +++ b/pkg/local_object_storage/shard/inhume.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.opentelemetry.io/otel/attribute" @@ -111,7 +110,6 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { s.log.Debug(ctx, logs.ShardCouldNotMarkObjectToDeleteInMetabase, zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) s.m.RUnlock() diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index c5275dafd..7bc5ead1d 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -124,8 +123,7 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { if err != nil { s.log.Debug(ctx, logs.ShardCantSelectAllObjects, zap.Stringer("cid", lst[i]), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) continue } diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go index 5219e64d5..fec50b1d9 100644 --- a/pkg/services/object/put/single.go +++ b/pkg/services/object/put/single.go @@ -21,7 +21,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal" svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" objectAPI "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc" @@ -323,7 +322,6 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context, zap.Stringer("address", addr), zap.Stringer("object_id", objID), zap.Stringer("container_id", cnrID), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) } diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go index 69395bb02..8c6f0df06 100644 --- a/pkg/services/replicator/process.go +++ b/pkg/services/replicator/process.go @@ -6,7 +6,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "go.opentelemetry.io/otel/attribute" @@ -45,8 +44,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T if err != nil { p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) return } @@ -65,7 +63,6 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T log := p.log.With( zap.String("node", netmap.StringifyPublicKey(task.Nodes[i])), zap.Stringer("object", task.Addr), - zap.String("trace_id", tracingPkg.GetTraceID(ctx)), ) callCtx, cancel := context.WithTimeout(ctx, p.putTimeout) diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go index 5ce929342..bb38c72ad 100644 --- a/pkg/services/replicator/pull.go +++ b/pkg/services/replicator/pull.go @@ -8,7 +8,6 @@ import ( containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.opentelemetry.io/otel/attribute" @@ -51,15 +50,13 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, zap.Stringer("object", task.Addr), zap.Error(err), - zap.Strings("endpoints", endpoints), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Strings("endpoints", endpoints)) } if obj == nil { p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, zap.Stringer("object", task.Addr), - zap.Error(errFailedToGetObjectFromAnyNode), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(errFailedToGetObjectFromAnyNode)) return } @@ -67,7 +64,6 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { if err != nil { p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } } diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go index 489f66ae5..bcad8471d 100644 --- a/pkg/services/replicator/put.go +++ b/pkg/services/replicator/put.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -33,8 +32,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) { if task.Obj == nil { p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(errObjectNotDefined), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(errObjectNotDefined)) return } @@ -42,7 +40,6 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) { if err != nil { p.log.Error(ctx, logs.ReplicatorCouldNotPutObjectToLocalStorage, zap.Stringer("object", task.Addr), - zap.Error(err), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.Error(err)) } } diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go index 416a0fafe..d92c749a8 100644 --- a/pkg/services/tree/redirect.go +++ b/pkg/services/tree/redirect.go @@ -6,7 +6,6 @@ import ( "errors" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "go.opentelemetry.io/otel/attribute" @@ -54,8 +53,7 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo return false } - s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) called = true stop = f(c) diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index e0085d73a..bcbb73589 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -10,7 +10,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" - tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -117,14 +116,12 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req if lastErr != nil { if errors.Is(lastErr, errRecentlyFailed) { s.log.Debug(ctx, logs.TreeDoNotSendUpdateToTheNode, - zap.String("last_error", lastErr.Error()), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("last_error", lastErr.Error())) } else { s.log.Warn(ctx, logs.TreeFailedToSentUpdateToTheNode, zap.String("last_error", lastErr.Error()), zap.String("address", lastAddr), - zap.String("key", hex.EncodeToString(n.PublicKey())), - zap.String("trace_id", tracingPkg.GetTraceID(ctx))) + zap.String("key", hex.EncodeToString(n.PublicKey()))) } s.metrics.AddReplicateTaskDuration(time.Since(start), false) return lastErr From c0a341a7f6f02cc5e5a419287fab841251f4f4cc Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 27 Jan 2025 14:17:58 +0300 Subject: [PATCH 338/591] [#1616] getsvc: Move head assembling to a separate file Signed-off-by: Evgenii Stratonikov --- pkg/services/object/get/assembler.go | 37 ------------------- pkg/services/object/get/assembler_head.go | 45 +++++++++++++++++++++++ 2 files changed, 45 insertions(+), 37 deletions(-) create mode 100644 pkg/services/object/get/assembler_head.go diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go index ff3f90bf2..23fc187f5 100644 --- a/pkg/services/object/get/assembler.go +++ b/pkg/services/object/get/assembler.go @@ -71,43 +71,6 @@ func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectS return a.parentObject, nil } -func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { - var sourceObjectIDs []oid.ID - sourceObjectID, ok := a.splitInfo.Link() - if ok { - sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) - } - sourceObjectID, ok = a.splitInfo.LastPart() - if ok { - sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) - } - if len(sourceObjectIDs) == 0 { - return nil, objectSDK.NewSplitInfoError(a.splitInfo) - } - for _, sourceObjectID = range sourceObjectIDs { - obj, err := a.getParent(ctx, sourceObjectID, writer) - if err == nil { - return obj, nil - } - } - return nil, objectSDK.NewSplitInfoError(a.splitInfo) -} - -func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) { - obj, err := a.objGetter.HeadObject(ctx, sourceObjectID) - if err != nil { - return nil, err - } - parent := obj.Parent() - if parent == nil { - return nil, objectSDK.NewSplitInfoError(a.splitInfo) - } - if err := writer.WriteHeader(ctx, parent); err != nil { - return nil, err - } - return obj, nil -} - func (a *assembler) getLastPartOrLinkObjectID() (oid.ID, bool) { sourceObjectID, ok := a.splitInfo.Link() if ok { diff --git a/pkg/services/object/get/assembler_head.go b/pkg/services/object/get/assembler_head.go new file mode 100644 index 000000000..ff213cb82 --- /dev/null +++ b/pkg/services/object/get/assembler_head.go @@ -0,0 +1,45 @@ +package getsvc + +import ( + "context" + + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +func (a *assembler) assembleHeader(ctx context.Context, writer ObjectWriter) (*objectSDK.Object, error) { + var sourceObjectIDs []oid.ID + sourceObjectID, ok := a.splitInfo.Link() + if ok { + sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) + } + sourceObjectID, ok = a.splitInfo.LastPart() + if ok { + sourceObjectIDs = append(sourceObjectIDs, sourceObjectID) + } + if len(sourceObjectIDs) == 0 { + return nil, objectSDK.NewSplitInfoError(a.splitInfo) + } + for _, sourceObjectID = range sourceObjectIDs { + obj, err := a.getParent(ctx, sourceObjectID, writer) + if err == nil { + return obj, nil + } + } + return nil, objectSDK.NewSplitInfoError(a.splitInfo) +} + +func (a *assembler) getParent(ctx context.Context, sourceObjectID oid.ID, writer ObjectWriter) (*objectSDK.Object, error) { + obj, err := a.objGetter.HeadObject(ctx, sourceObjectID) + if err != nil { + return nil, err + } + parent := obj.Parent() + if parent == nil { + return nil, objectSDK.NewSplitInfoError(a.splitInfo) + } + if err := writer.WriteHeader(ctx, parent); err != nil { + return nil, err + } + return obj, nil +} From 6410542d1933fa35b765f4cf324b60e1f6584714 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 27 Jan 2025 14:22:46 +0300 Subject: [PATCH 339/591] [#1616] getsvc: Move range assembling to a separate file Signed-off-by: Evgenii Stratonikov --- pkg/services/object/get/assembler.go | 102 ++++++-------------- pkg/services/object/get/assembler_range.go | 103 +++++++++++++++++++++ 2 files changed, 129 insertions(+), 76 deletions(-) create mode 100644 pkg/services/object/get/assembler_range.go diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go index 23fc187f5..886f0aabb 100644 --- a/pkg/services/object/get/assembler.go +++ b/pkg/services/object/get/assembler.go @@ -59,15 +59,23 @@ func (a *assembler) Assemble(ctx context.Context, writer ObjectWriter) (*objectS if previousID == nil && len(childrenIDs) == 0 { return nil, objectSDK.NewSplitInfoError(a.splitInfo) } + if len(childrenIDs) > 0 { - if err := a.assembleObjectByChildrenList(ctx, childrenIDs, writer); err != nil { - return nil, err + if a.rng != nil { + err = a.assembleObjectByChildrenListRange(ctx, childrenIDs, writer) + } else { + err = a.assembleObjectByChildrenList(ctx, childrenIDs, writer) } } else { - if err := a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer); err != nil { - return nil, err + if a.rng != nil { + err = a.assemleObjectByPreviousIDInReverseRange(ctx, *previousID, writer) + } else { + err = a.assemleObjectByPreviousIDInReverse(ctx, *previousID, writer) } } + if err != nil { + return nil, err + } return a.parentObject, nil } @@ -153,26 +161,16 @@ func (a *assembler) getChildObject(ctx context.Context, id oid.ID, rng *objectSD } func (a *assembler) assembleObjectByChildrenList(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error { - if a.rng == nil { - if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { - return err - } - return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, nil, true) - } - - if err := a.assemblePayloadInReverse(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil { + if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { return err } - return writer.WriteChunk(ctx, a.parentObject.Payload()) + return a.assemblePayloadByObjectIDs(ctx, writer, childrenIDs, true) } func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prevID oid.ID, writer ObjectWriter) error { - if a.rng == nil { - if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { - return err - } + if err := writer.WriteHeader(ctx, a.parentObject.CutPayload()); err != nil { + return err } - if err := a.assemblePayloadInReverse(ctx, writer, prevID); err != nil { return err } @@ -182,16 +180,9 @@ func (a *assembler) assemleObjectByPreviousIDInReverse(ctx context.Context, prev return nil } -func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range, verifyIsChild bool) error { - withRng := len(partRanges) > 0 && a.rng != nil - +func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, verifyIsChild bool) error { for i := range partIDs { - var r *objectSDK.Range - if withRng { - r = &partRanges[i] - } - - _, err := a.getChildObject(ctx, partIDs[i], r, verifyIsChild, writer) + _, err := a.getChildObject(ctx, partIDs[i], nil, verifyIsChild, writer) if err != nil { return err } @@ -200,22 +191,16 @@ func (a *assembler) assemblePayloadByObjectIDs(ctx context.Context, writer Objec } func (a *assembler) assemblePayloadInReverse(ctx context.Context, writer ObjectWriter, prevID oid.ID) error { - chain, rngs, err := a.buildChain(ctx, prevID) + chain, err := a.buildChain(ctx, prevID) if err != nil { return err } - reverseRngs := len(rngs) > 0 - for left, right := 0, len(chain)-1; left < right; left, right = left+1, right-1 { chain[left], chain[right] = chain[right], chain[left] - - if reverseRngs { - rngs[left], rngs[right] = rngs[right], rngs[left] - } } - return a.assemblePayloadByObjectIDs(ctx, writer, chain, rngs, false) + return a.assemblePayloadByObjectIDs(ctx, writer, chain, false) } func (a *assembler) isChild(obj *objectSDK.Object) bool { @@ -223,63 +208,28 @@ func (a *assembler) isChild(obj *objectSDK.Object) bool { return parent == nil || equalAddresses(a.addr, object.AddressOf(parent)) } -func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) { +func (a *assembler) buildChain(ctx context.Context, prevID oid.ID) ([]oid.ID, error) { var ( chain []oid.ID - rngs []objectSDK.Range - from = a.rng.GetOffset() - to = from + a.rng.GetLength() hasPrev = true ) // fill the chain end-to-start for hasPrev { - // check that only for "range" requests, - // for `GET` it stops via the false `withPrev` - if a.rng != nil && a.currentOffset <= from { - break - } - head, err := a.objGetter.HeadObject(ctx, prevID) if err != nil { - return nil, nil, err + return nil, err } if !a.isChild(head) { - return nil, nil, errParentAddressDiffers + return nil, errParentAddressDiffers } - if a.rng != nil { - sz := head.PayloadSize() - - a.currentOffset -= sz - - if a.currentOffset < to { - off := uint64(0) - if from > a.currentOffset { - off = from - a.currentOffset - sz -= from - a.currentOffset - } - - if to < a.currentOffset+off+sz { - sz = to - off - a.currentOffset - } - - index := len(rngs) - rngs = append(rngs, objectSDK.Range{}) - rngs[index].SetOffset(off) - rngs[index].SetLength(sz) - - id, _ := head.ID() - chain = append(chain, id) - } - } else { - id, _ := head.ID() - chain = append(chain, id) - } + id, _ := head.ID() + chain = append(chain, id) prevID, hasPrev = head.PreviousID() } - return chain, rngs, nil + return chain, nil } diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go new file mode 100644 index 000000000..638db0c76 --- /dev/null +++ b/pkg/services/object/get/assembler_range.go @@ -0,0 +1,103 @@ +package getsvc + +import ( + "context" + + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" +) + +func (a *assembler) assembleObjectByChildrenListRange(ctx context.Context, childrenIDs []oid.ID, writer ObjectWriter) error { + if err := a.assemblePayloadInReverseRange(ctx, writer, childrenIDs[len(childrenIDs)-1]); err != nil { + return err + } + return writer.WriteChunk(ctx, a.parentObject.Payload()) +} + +func (a *assembler) assemleObjectByPreviousIDInReverseRange(ctx context.Context, prevID oid.ID, writer ObjectWriter) error { + if err := a.assemblePayloadInReverseRange(ctx, writer, prevID); err != nil { + return err + } + if err := writer.WriteChunk(ctx, a.parentObject.Payload()); err != nil { // last part + return err + } + return nil +} + +func (a *assembler) assemblePayloadByObjectIDsRange(ctx context.Context, writer ObjectWriter, partIDs []oid.ID, partRanges []objectSDK.Range) error { + for i := range partIDs { + _, err := a.getChildObject(ctx, partIDs[i], &partRanges[i], false, writer) + if err != nil { + return err + } + } + return nil +} + +func (a *assembler) assemblePayloadInReverseRange(ctx context.Context, writer ObjectWriter, prevID oid.ID) error { + chain, rngs, err := a.buildChainRange(ctx, prevID) + if err != nil { + return err + } + + for left, right := 0, len(chain)-1; left < right; left, right = left+1, right-1 { + chain[left], chain[right] = chain[right], chain[left] + rngs[left], rngs[right] = rngs[right], rngs[left] + } + + return a.assemblePayloadByObjectIDsRange(ctx, writer, chain, rngs) +} + +func (a *assembler) buildChainRange(ctx context.Context, prevID oid.ID) ([]oid.ID, []objectSDK.Range, error) { + var ( + chain []oid.ID + rngs []objectSDK.Range + from = a.rng.GetOffset() + to = from + a.rng.GetLength() + + hasPrev = true + ) + + // fill the chain end-to-start + for hasPrev { + if a.currentOffset <= from { + break + } + + head, err := a.objGetter.HeadObject(ctx, prevID) + if err != nil { + return nil, nil, err + } + if !a.isChild(head) { + return nil, nil, errParentAddressDiffers + } + + sz := head.PayloadSize() + + a.currentOffset -= sz + + if a.currentOffset < to { + off := uint64(0) + if from > a.currentOffset { + off = from - a.currentOffset + sz -= from - a.currentOffset + } + + if to < a.currentOffset+off+sz { + sz = to - off - a.currentOffset + } + + index := len(rngs) + rngs = append(rngs, objectSDK.Range{}) + rngs[index].SetOffset(off) + rngs[index].SetLength(sz) + + id, _ := head.ID() + chain = append(chain, id) + } + + prevID, hasPrev = head.PreviousID() + } + + return chain, rngs, nil +} From c8acdf40bbd18970fb39130f78ea64959f072f4b Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Mon, 27 Jan 2025 14:24:06 +0300 Subject: [PATCH 340/591] [#1616] getsvc: Use slices.Reverse() where possible Signed-off-by: Evgenii Stratonikov --- pkg/services/object/get/assembler.go | 6 ++---- pkg/services/object/get/assembler_range.go | 8 +++----- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/pkg/services/object/get/assembler.go b/pkg/services/object/get/assembler.go index 886f0aabb..b24c9417b 100644 --- a/pkg/services/object/get/assembler.go +++ b/pkg/services/object/get/assembler.go @@ -2,6 +2,7 @@ package getsvc import ( "context" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -196,10 +197,7 @@ func (a *assembler) assemblePayloadInReverse(ctx context.Context, writer ObjectW return err } - for left, right := 0, len(chain)-1; left < right; left, right = left+1, right-1 { - chain[left], chain[right] = chain[right], chain[left] - } - + slices.Reverse(chain) return a.assemblePayloadByObjectIDs(ctx, writer, chain, false) } diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go index 638db0c76..748a499ef 100644 --- a/pkg/services/object/get/assembler_range.go +++ b/pkg/services/object/get/assembler_range.go @@ -2,6 +2,7 @@ package getsvc import ( "context" + "slices" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -40,11 +41,8 @@ func (a *assembler) assemblePayloadInReverseRange(ctx context.Context, writer Ob return err } - for left, right := 0, len(chain)-1; left < right; left, right = left+1, right-1 { - chain[left], chain[right] = chain[right], chain[left] - rngs[left], rngs[right] = rngs[right], rngs[left] - } - + slices.Reverse(chain) + slices.Reverse(rngs) return a.assemblePayloadByObjectIDsRange(ctx, writer, chain, rngs) } From b309b34bfc77db01c77a6b98b6143aefc1673fcc Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 28 Jan 2025 15:01:02 +0300 Subject: [PATCH 341/591] [#1616] getsvc: Simplify buildChainRange() Signed-off-by: Evgenii Stratonikov --- pkg/services/object/get/assembler_range.go | 24 +++++++--------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go index 748a499ef..844a471a6 100644 --- a/pkg/services/object/get/assembler_range.go +++ b/pkg/services/object/get/assembler_range.go @@ -70,30 +70,20 @@ func (a *assembler) buildChainRange(ctx context.Context, prevID oid.ID) ([]oid.I return nil, nil, errParentAddressDiffers } - sz := head.PayloadSize() - - a.currentOffset -= sz - - if a.currentOffset < to { - off := uint64(0) - if from > a.currentOffset { - off = from - a.currentOffset - sz -= from - a.currentOffset - } - - if to < a.currentOffset+off+sz { - sz = to - off - a.currentOffset - } - + nextOffset := a.currentOffset - head.PayloadSize() + clampedFrom := max(from, nextOffset) + clampedTo := min(to, a.currentOffset) + if clampedFrom < clampedTo { index := len(rngs) rngs = append(rngs, objectSDK.Range{}) - rngs[index].SetOffset(off) - rngs[index].SetLength(sz) + rngs[index].SetOffset(clampedFrom - nextOffset) + rngs[index].SetLength(clampedTo - clampedFrom) id, _ := head.ID() chain = append(chain, id) } + a.currentOffset = nextOffset prevID, hasPrev = head.PreviousID() } From 57dc0a8e9e69567213f87616d0080a6c12182019 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 29 Jan 2025 12:23:05 +0300 Subject: [PATCH 342/591] [#1616] getsvc: Move break condition from body to the loop condition Signed-off-by: Evgenii Stratonikov --- pkg/services/object/get/assembler_range.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg/services/object/get/assembler_range.go b/pkg/services/object/get/assembler_range.go index 844a471a6..780693c40 100644 --- a/pkg/services/object/get/assembler_range.go +++ b/pkg/services/object/get/assembler_range.go @@ -57,11 +57,7 @@ func (a *assembler) buildChainRange(ctx context.Context, prevID oid.ID) ([]oid.I ) // fill the chain end-to-start - for hasPrev { - if a.currentOffset <= from { - break - } - + for hasPrev && from < a.currentOffset { head, err := a.objGetter.HeadObject(ctx, prevID) if err != nil { return nil, nil, err From 78bfd1222908d4ff4e8163f3e045fa2d7e478fcb Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 30 Jan 2025 15:29:38 +0300 Subject: [PATCH 343/591] [#1622] adm: Return non zero return code if metabase upgrade failed Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-adm/internal/modules/metabase/upgrade.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go index beced0d7a..c0c290c5e 100644 --- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go +++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go @@ -28,6 +28,7 @@ const ( var ( errNoPathsFound = errors.New("no metabase paths found") errNoMorphEndpointsFound = errors.New("no morph endpoints found") + errUpgradeFailed = errors.New("upgrade failed") ) var UpgradeCmd = &cobra.Command{ @@ -91,14 +92,19 @@ func upgrade(cmd *cobra.Command, _ []string) error { if err := eg.Wait(); err != nil { return err } + allSuccess := true for mb, ok := range result { if ok { cmd.Println(mb, ": success") } else { cmd.Println(mb, ": failed") + allSuccess = false } } - return nil + if allSuccess { + return nil + } + return errUpgradeFailed } func getMetabasePaths(appCfg *config.Config) ([]string, error) { From 9ee3dd4e91bea626eedee35f751fddaedf9c9621 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Tue, 28 Jan 2025 18:32:14 +0300 Subject: [PATCH 344/591] [#1618] ape: Fix object service request tests Constant string `testOwnerID` for these tests has an invalid format. It has 11 bytes instead of required 25 for `user.ID`. It worked because: 1. `user.ID` was a byte slice and didn't check length and format of byte slices decoded from strings. 2. in these tests `testOwnerID` was used only to decode container owner id and to compare it with owner id encoded back to string. Since `user.ID implementation has changed`, the problem arised. Now `testOwnerID` is valid. Signed-off-by: Ekaterina Lebedeva --- pkg/services/object/ape/request_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go index 787785b60..f270bf97d 100644 --- a/pkg/services/object/ape/request_test.go +++ b/pkg/services/object/ape/request_test.go @@ -19,7 +19,7 @@ import ( ) const ( - testOwnerID = "FPPtmAi9TCX329" + testOwnerID = "NURFM8PWbLA2aLt2vrD8q4FyfAdgESwM8y" incomingIP = "192.92.33.1" ) From 24054cf6f4c9b2cae443ea5b9741602eaca57a22 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Tue, 28 Jan 2025 18:49:45 +0300 Subject: [PATCH 345/591] [#1618] pkg: Refactor `user.ID.ScriptHash()` usage `user.ID.ScriptHash()` does not return an error anymore. Signed-off-by: Ekaterina Lebedeva --- go.mod | 2 +- go.sum | 4 ++-- pkg/morph/client/balance/balanceOf.go | 5 +---- pkg/morph/client/balance/transfer.go | 13 +++---------- pkg/services/container/ape.go | 10 ++-------- pkg/services/container/ape_test.go | 22 ++++++++-------------- 6 files changed, 17 insertions(+), 39 deletions(-) diff --git a/go.mod b/go.mod index 267e411fb..95efa584e 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250109084609-328d214d2d76 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250129133430-d195cb510401 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 diff --git a/go.sum b/go.sum index 935d3c56a..3be3f2246 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250109084609-328d214d2d76 h1:wzvSJIiS+p9qKfl3eg1oH6qlrjaEWiqTc/iMDKG3Ml4= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250109084609-328d214d2d76/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250129133430-d195cb510401 h1:5EN6YsbF5u2BffTKo5kVwe+Aqcy6CRTTVKcwZOI+Ym0= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250129133430-d195cb510401/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go index a5fb8e82a..449d49e5a 100644 --- a/pkg/morph/client/balance/balanceOf.go +++ b/pkg/morph/client/balance/balanceOf.go @@ -11,10 +11,7 @@ import ( // BalanceOf receives the amount of funds in the client's account // through the Balance contract call, and returns it. func (c *Client) BalanceOf(id user.ID) (*big.Int, error) { - h, err := id.ScriptHash() - if err != nil { - return nil, err - } + h := id.ScriptHash() invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(balanceOfMethod) diff --git a/pkg/morph/client/balance/transfer.go b/pkg/morph/client/balance/transfer.go index 52d69dccb..870bed166 100644 --- a/pkg/morph/client/balance/transfer.go +++ b/pkg/morph/client/balance/transfer.go @@ -22,22 +22,15 @@ type TransferPrm struct { // TransferX transfers p.Amount of GASe-12 from p.From to p.To // with details p.Details through direct smart contract call. func (c *Client) TransferX(ctx context.Context, p TransferPrm) error { - from, err := p.From.ScriptHash() - if err != nil { - return err - } - - to, err := p.To.ScriptHash() - if err != nil { - return err - } + from := p.From.ScriptHash() + to := p.To.ScriptHash() prm := client.InvokePrm{} prm.SetMethod(transferXMethod) prm.SetArgs(from, to, p.Amount, p.Details) prm.InvokePrmOptional = p.InvokePrmOptional - _, err = c.client.Invoke(ctx, prm) + _, err := c.client.Invoke(ctx, prm) if err != nil { return fmt.Errorf("invoke method (%s): %w", transferXMethod, err) } diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go index 493452fa6..2c240412b 100644 --- a/pkg/services/container/ape.go +++ b/pkg/services/container/ape.go @@ -649,10 +649,7 @@ func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) { if err := ownerSDK.ReadFromV2(*owner); err != nil { return "", err } - addr, err := ownerSDK.ScriptHash() - if err != nil { - return "", err - } + addr := ownerSDK.ScriptHash() namespace := "" subject, err := ac.frostFSIDClient.GetSubject(addr) @@ -674,10 +671,7 @@ func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error) if err := ownerSDK.ReadFromV2(*owner); err != nil { return "", err } - addr, err := ownerSDK.ScriptHash() - if err != nil { - return "", err - } + addr := ownerSDK.ScriptHash() subject, err := ac.frostFSIDClient.GetSubject(addr) if err != nil { return "", fmt.Errorf("get subject error: %w", err) diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go index 513ffff02..38b240d94 100644 --- a/pkg/services/container/ape_test.go +++ b/pkg/services/container/ape_test.go @@ -678,8 +678,7 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) { testContainer := containertest.Container() owner := testContainer.Owner() - ownerAddr, err := owner.ScriptHash() - require.NoError(t, err) + ownerAddr := owner.ScriptHash() frostfsIDSubjectReader := &frostfsidStub{ subjects: map[util.Uint160]*client.Subject{ ownerAddr: {}, @@ -690,7 +689,7 @@ func testDenyPutContainerForOthersSessionToken(t *testing.T) { nm.currentEpoch = 100 nm.netmaps = map[uint64]*netmap.NetMap{} - _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ + _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ Rules: []chain.Rule{ { Status: chain.AccessDenied, @@ -773,7 +772,7 @@ func testDenyPutContainerReadNamespaceFromFrostfsID(t *testing.T) { require.NoError(t, err) req := initPutRequest(t, testContainer) - ownerScriptHash := initOwnerIDScriptHash(t, testContainer) + ownerScriptHash := initOwnerIDScriptHash(testContainer) frostfsIDSubjectReader := &frostfsidStub{ subjects: map[util.Uint160]*client.Subject{ @@ -857,7 +856,7 @@ func testDenyPutContainerInvalidNamespace(t *testing.T) { require.NoError(t, err) req := initPutRequest(t, testContainer) - ownerScriptHash := initOwnerIDScriptHash(t, testContainer) + ownerScriptHash := initOwnerIDScriptHash(testContainer) frostfsIDSubjectReader := &frostfsidStub{ subjects: map[util.Uint160]*client.Subject{ @@ -1532,26 +1531,21 @@ func initPutRequest(t *testing.T, testContainer cnrSDK.Container) *container.Put return req } -func initOwnerIDScriptHash(t *testing.T, testContainer cnrSDK.Container) util.Uint160 { +func initOwnerIDScriptHash(testContainer cnrSDK.Container) util.Uint160 { var ownerSDK *user.ID owner := testContainer.Owner() ownerSDK = &owner - sc, err := ownerSDK.ScriptHash() - require.NoError(t, err) - return sc + return ownerSDK.ScriptHash() } func initActorOwnerScriptHashes(t *testing.T, actorPK *keys.PrivateKey, ownerPK *keys.PrivateKey) (actorScriptHash util.Uint160, ownerScriptHash util.Uint160) { var actorUserID user.ID user.IDFromKey(&actorUserID, ecdsa.PublicKey(*actorPK.PublicKey())) - var err error - actorScriptHash, err = actorUserID.ScriptHash() - require.NoError(t, err) + actorScriptHash = actorUserID.ScriptHash() var ownerUserID user.ID user.IDFromKey(&ownerUserID, ecdsa.PublicKey(*ownerPK.PublicKey())) - ownerScriptHash, err = ownerUserID.ScriptHash() - require.NoError(t, err) + ownerScriptHash = ownerUserID.ScriptHash() require.NotEqual(t, ownerScriptHash.String(), actorScriptHash.String()) return } From 6a580db55effc5d824d3f6f53b2c3bab07781f93 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 30 Jan 2025 16:06:14 +0300 Subject: [PATCH 346/591] [#1626] go.mod: Bump frostfs-sdk-go Signed-off-by: Dmitrii Stepanov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 95efa584e..e3f44569e 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250129133430-d195cb510401 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 diff --git a/go.sum b/go.sum index 3be3f2246..e0aacb454 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250129133430-d195cb510401 h1:5EN6YsbF5u2BffTKo5kVwe+Aqcy6CRTTVKcwZOI+Ym0= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250129133430-d195cb510401/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a h1:Ud+3zz4WP9HPxEQxDPJZPpiPdm30nDNSKucsWP9L54M= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= From 1df64c5caba97eb0b03a4a2220517cd8c649dac1 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 30 Jan 2025 10:23:30 +0300 Subject: [PATCH 347/591] [#1621] treesvc: Move variable initialization to top in mergeOperationStreams() Signed-off-by: Evgenii Stratonikov --- pkg/services/tree/sync.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index c48a312fb..4f3262113 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -134,11 +134,6 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 { defer close(merged) - ms := make([]*pilorama.Move, len(streams)) - for i := range streams { - ms[i] = <-streams[i] - } - // Merging different node streams shuffles incoming operations like that: // // x - operation from the stream A @@ -150,6 +145,11 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram // operation height from the stream B. This height is stored in minStreamedLastHeight. var minStreamedLastHeight uint64 = math.MaxUint64 + ms := make([]*pilorama.Move, len(streams)) + for i := range streams { + ms[i] = <-streams[i] + } + for { var minTimeMoveTime uint64 = math.MaxUint64 minTimeMoveIndex := -1 From 6fcae9f75ab822a67a7a0462cfe3cedaac155b92 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 30 Jan 2025 10:36:02 +0300 Subject: [PATCH 348/591] [#1621] treesvc: Cancel background sync on failure If applyOperationStream() exits prematurely, other goroutines will block on send and errgroup will never finish waiting. In this commit we also check whether context is cancelled. Signed-off-by: Evgenii Stratonikov --- pkg/services/tree/sync.go | 35 +++++++++++++++++++++++----------- pkg/services/tree/sync_test.go | 3 ++- 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 4f3262113..1a455def9 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -131,7 +131,7 @@ func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string } // mergeOperationStreams performs merge sort for node operation streams to one stream. -func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 { +func mergeOperationStreams(ctx context.Context, streams []chan *pilorama.Move, merged chan<- *pilorama.Move) uint64 { defer close(merged) // Merging different node streams shuffles incoming operations like that: @@ -147,7 +147,11 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram ms := make([]*pilorama.Move, len(streams)) for i := range streams { - ms[i] = <-streams[i] + select { + case ms[i] = <-streams[i]: + case <-ctx.Done(): + return minStreamedLastHeight + } } for { @@ -164,7 +168,11 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram break } - merged <- ms[minTimeMoveIndex] + select { + case merged <- ms[minTimeMoveIndex]: + case <-ctx.Done(): + return minStreamedLastHeight + } height := ms[minTimeMoveIndex].Time if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil { minStreamedLastHeight = min(minStreamedLastHeight, height) @@ -176,7 +184,7 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID string, operationStream <-chan *pilorama.Move, -) uint64 { +) (uint64, error) { var prev *pilorama.Move var batch []*pilorama.Move for m := range operationStream { @@ -189,17 +197,17 @@ func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID s if len(batch) == s.syncBatchSize { if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil { - return batch[0].Time + return batch[0].Time, err } batch = batch[:0] } } if len(batch) > 0 { if err := s.forest.TreeApplyBatch(ctx, cid, treeID, batch); err != nil { - return batch[0].Time + return batch[0].Time, err } } - return math.MaxUint64 + return math.MaxUint64, nil } func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, @@ -235,7 +243,11 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, if err := m.Meta.FromBytes(lm.GetMeta()); err != nil { return err } - opsCh <- m + select { + case opsCh <- m: + case <-ctx.Done(): + return ctx.Err() + } } if !errors.Is(err, io.EOF) { return err @@ -264,13 +276,14 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, merged := make(chan *pilorama.Move) var minStreamedLastHeight uint64 errGroup.Go(func() error { - minStreamedLastHeight = mergeOperationStreams(nodeOperationStreams, merged) + minStreamedLastHeight = mergeOperationStreams(egCtx, nodeOperationStreams, merged) return nil }) var minUnappliedHeight uint64 errGroup.Go(func() error { - minUnappliedHeight = s.applyOperationStream(ctx, cid, treeID, merged) - return nil + var err error + minUnappliedHeight, err = s.applyOperationStream(egCtx, cid, treeID, merged) + return err }) var allNodesSynced atomic.Bool diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go index 497d90554..87d419408 100644 --- a/pkg/services/tree/sync_test.go +++ b/pkg/services/tree/sync_test.go @@ -1,6 +1,7 @@ package tree import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" @@ -64,7 +65,7 @@ func Test_mergeOperationStreams(t *testing.T) { merged := make(chan *pilorama.Move, 1) min := make(chan uint64) go func() { - min <- mergeOperationStreams(nodeOpChans, merged) + min <- mergeOperationStreams(context.Background(), nodeOpChans, merged) }() var res []uint64 From 9c4c5a52625180ae8491cee184d2f8583f541e75 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Tue, 14 Jan 2025 16:37:48 +0300 Subject: [PATCH 349/591] [#1602] go.mod: Update sdk-go Signed-off-by: Aleksey Savchuk --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e3f44569e..cc6b0a202 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 diff --git a/go.sum b/go.sum index e0aacb454..eae467b31 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a h1:Ud+3zz4WP9HPxEQxDPJZPpiPdm30nDNSKucsWP9L54M= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421 h1:pP19IawSdsLCKFv7HMNfWAeH6E3uSnntKZkwka+/2+4= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= From 4de5fca547359398cd4b6bbf801f15d0bd603b83 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 3 Feb 2025 17:36:21 +0300 Subject: [PATCH 350/591] [#1624] blobovniczatree: Return source error from Init() As it was before: when the database was opened, an error returned, but along with the original error, the `context cancelled`` error returned, because `iterateIncompletedRebuildDBPaths` method has `ctx.Done()` check and egCtx passed to `iterateIncompletedRebuildDBPaths` method. Signed-off-by: Dmitrii Stepanov --- .../blobstor/blobovniczatree/control.go | 53 +++++++++---------- .../blobstor/blobovniczatree/control_test.go | 34 ++++++++++++ 2 files changed, 60 insertions(+), 27 deletions(-) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go index c77df63bf..a6c1ce368 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go @@ -41,35 +41,34 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error { } eg, egCtx := errgroup.WithContext(ctx) - eg.SetLimit(b.blzInitWorkerCount) - err = b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) { - eg.Go(func() error { - p = strings.TrimSuffix(p, rebuildSuffix) - shBlz := b.getBlobovniczaWithoutCaching(p) - blz, err := shBlz.Open(egCtx) - if err != nil { - return err - } - defer shBlz.Close(egCtx) - - moveInfo, err := blz.ListMoveInfo(egCtx) - if err != nil { - return err - } - for _, move := range moveInfo { - b.deleteProtectedObjects.Add(move.Address) - } - - b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) - return nil - }) - return false, nil - }) - if err != nil { - _ = eg.Wait() - return err + if b.blzInitWorkerCount > 0 { + eg.SetLimit(b.blzInitWorkerCount + 1) } + eg.Go(func() error { + return b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) { + eg.Go(func() error { + p = strings.TrimSuffix(p, rebuildSuffix) + shBlz := b.getBlobovniczaWithoutCaching(p) + blz, err := shBlz.Open(egCtx) + if err != nil { + return err + } + defer shBlz.Close(egCtx) + moveInfo, err := blz.ListMoveInfo(egCtx) + if err != nil { + return err + } + for _, move := range moveInfo { + b.deleteProtectedObjects.Add(move.Address) + } + + b.log.Debug(egCtx, logs.BlobovniczatreeBlobovniczaSuccessfullyInitializedClosing, zap.String("id", p)) + return nil + }) + return false, nil + }) + }) return eg.Wait() } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go index b26323bd0..7db1891f9 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/control_test.go @@ -2,6 +2,9 @@ package blobovniczatree import ( "context" + "os" + "path" + "strconv" "testing" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -129,3 +132,34 @@ func TestObjectsAvailableAfterDepthAndWidthEdit(t *testing.T) { require.NoError(t, blz.Close(context.Background())) } + +func TestInitBlobovniczasInitErrorType(t *testing.T) { + t.Parallel() + + rootDir := t.TempDir() + + for idx := 0; idx < 10; idx++ { + f, err := os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db")) + require.NoError(t, err) + _, err = f.Write([]byte("invalid db")) + require.NoError(t, err) + require.NoError(t, f.Close()) + + f, err = os.Create(path.Join(rootDir, strconv.FormatInt(int64(idx), 10)+".db"+rebuildSuffix)) + require.NoError(t, err) + require.NoError(t, f.Close()) + } + + blz := NewBlobovniczaTree( + context.Background(), + WithBlobovniczaShallowDepth(1), + WithBlobovniczaShallowWidth(1), + WithRootPath(rootDir), + ) + + require.NoError(t, blz.Open(mode.ComponentReadWrite)) + err := blz.Init() + require.Contains(t, err.Error(), "open blobovnicza") + require.Contains(t, err.Error(), "invalid database") + require.NoError(t, blz.Close(context.Background())) +} From 9b113c315668fd26a9d43f8b29c9139fcf3fbb5e Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Wed, 5 Feb 2025 16:37:11 +0300 Subject: [PATCH 351/591] [#1613] morph: Add tracing for morph queries to neo-go Signed-off-by: Alexander Chuprov --- .../modules/control/set_netmap_status.go | 2 +- cmd/frostfs-cli/modules/object/nodes.go | 4 +- cmd/frostfs-node/cache.go | 62 ++++++++------- cmd/frostfs-node/cache_test.go | 11 +-- cmd/frostfs-node/config.go | 2 +- cmd/frostfs-node/container.go | 22 +++--- cmd/frostfs-node/frostfsid.go | 9 ++- cmd/frostfs-node/netmap.go | 20 ++--- cmd/frostfs-node/object.go | 10 +-- cmd/frostfs-node/tree.go | 12 +-- pkg/ape/request/frostfsid.go | 9 ++- pkg/core/container/info.go | 13 ++-- pkg/core/container/storage.go | 6 +- pkg/core/container/util.go | 5 +- pkg/core/frostfsid/subject_provider.go | 6 +- pkg/core/netmap/storage.go | 16 ++-- pkg/core/object/fmt.go | 2 +- pkg/core/object/fmt_test.go | 14 ++-- pkg/core/object/sender_classifier.go | 15 ++-- pkg/innerring/fetcher.go | 10 ++- pkg/innerring/indexer.go | 19 ++--- pkg/innerring/indexer_test.go | 47 ++++++------ pkg/innerring/innerring.go | 12 +-- pkg/innerring/netmap.go | 5 +- .../processors/alphabet/handlers_test.go | 2 +- .../processors/alphabet/process_emit.go | 2 +- .../processors/alphabet/processor.go | 2 +- pkg/innerring/processors/container/common.go | 13 ++-- .../processors/container/handlers_test.go | 8 +- .../processors/container/process_container.go | 48 ++++++------ .../processors/container/processor.go | 8 +- .../processors/governance/handlers_test.go | 4 +- .../processors/governance/process_update.go | 4 +- .../processors/governance/processor.go | 4 +- .../processors/netmap/handlers_test.go | 8 +- .../netmap/nodevalidation/locode/calls.go | 3 +- .../nodevalidation/locode/calls_test.go | 9 ++- .../netmap/nodevalidation/maddress/calls.go | 3 +- .../netmap/nodevalidation/state/validator.go | 7 +- .../nodevalidation/state/validator_test.go | 5 +- .../netmap/nodevalidation/validator.go | 6 +- .../processors/netmap/process_epoch.go | 4 +- .../processors/netmap/process_peers.go | 4 +- pkg/innerring/processors/netmap/processor.go | 6 +- pkg/innerring/processors/netmap/wrappers.go | 8 +- pkg/innerring/state.go | 6 +- pkg/local_object_storage/engine/engine.go | 2 +- pkg/local_object_storage/engine/evacuate.go | 2 +- .../engine/evacuate_test.go | 4 +- pkg/local_object_storage/metabase/upgrade.go | 10 +-- pkg/local_object_storage/shard/control.go | 2 +- pkg/morph/client/balance/balanceOf.go | 5 +- pkg/morph/client/balance/decimals.go | 5 +- pkg/morph/client/client.go | 2 +- pkg/morph/client/container/containers_of.go | 9 ++- pkg/morph/client/container/deletion_info.go | 15 ++-- pkg/morph/client/container/get.go | 15 ++-- pkg/morph/client/container/list.go | 5 +- pkg/morph/client/frostfsid/subject.go | 9 ++- pkg/morph/client/netmap/config.go | 48 ++++++------ pkg/morph/client/netmap/epoch.go | 9 ++- pkg/morph/client/netmap/innerring.go | 4 +- pkg/morph/client/netmap/netmap.go | 13 ++-- pkg/morph/client/netmap/snapshot.go | 5 +- pkg/morph/client/static.go | 5 +- pkg/services/accounting/morph/executor.go | 6 +- pkg/services/apemanager/executor.go | 16 ++-- pkg/services/common/ape/checker.go | 7 +- pkg/services/container/ape.go | 76 +++++++++---------- pkg/services/container/ape_test.go | 16 ++-- pkg/services/container/morph/executor.go | 14 ++-- pkg/services/control/ir/server/calls.go | 6 +- pkg/services/control/rpc.go | 3 + pkg/services/control/server/evacuate_async.go | 10 +-- .../control/server/get_netmap_status.go | 4 +- pkg/services/control/server/server.go | 2 +- pkg/services/netmap/executor.go | 6 +- pkg/services/object/acl/v2/service.go | 8 +- pkg/services/object/acl/v2/types.go | 4 +- pkg/services/object/ape/checker.go | 2 +- pkg/services/object/ape/checker_test.go | 16 ++-- pkg/services/object/ape/request.go | 16 ++-- pkg/services/object/common/target/target.go | 23 +++--- pkg/services/object/common/writer/common.go | 2 +- pkg/services/object/common/writer/ec.go | 12 +-- pkg/services/object/common/writer/ec_test.go | 2 +- pkg/services/object/common/writer/writer.go | 4 +- pkg/services/object/get/assemblerec.go | 2 +- pkg/services/object/get/get_test.go | 8 +- pkg/services/object/get/getrangeec_test.go | 4 +- pkg/services/object/get/remote_getter.go | 2 +- pkg/services/object/get/request.go | 4 +- pkg/services/object/get/types.go | 4 +- pkg/services/object/get/v2/get_range_hash.go | 12 +-- pkg/services/object/patch/streamer.go | 2 +- pkg/services/object/put/single.go | 14 ++-- pkg/services/object/put/streamer.go | 2 +- pkg/services/object/put/v2/streamer.go | 2 +- pkg/services/object/search/container.go | 8 +- pkg/services/object/search/exec.go | 6 +- pkg/services/object/search/search_test.go | 8 +- pkg/services/object/search/service.go | 4 +- pkg/services/object/search/util.go | 2 +- pkg/services/object/util/placement.go | 17 +++-- .../object_manager/placement/netmap.go | 7 +- .../object_manager/placement/traverser.go | 7 +- .../placement/traverser_test.go | 25 +++--- pkg/services/policer/check.go | 6 +- pkg/services/policer/ec.go | 4 +- pkg/services/policer/ec_test.go | 8 +- pkg/services/policer/policer_test.go | 30 ++++---- pkg/services/tree/ape.go | 8 +- pkg/services/tree/ape_test.go | 4 +- pkg/services/tree/container.go | 7 +- pkg/services/tree/options.go | 5 +- pkg/services/tree/replicator.go | 6 +- pkg/services/tree/service.go | 24 +++--- pkg/services/tree/signature.go | 2 +- pkg/services/tree/signature_test.go | 10 +-- pkg/services/tree/sync.go | 10 +-- 120 files changed, 623 insertions(+), 562 deletions(-) diff --git a/cmd/frostfs-cli/modules/control/set_netmap_status.go b/cmd/frostfs-cli/modules/control/set_netmap_status.go index 87c4f3b3d..26a1ba883 100644 --- a/cmd/frostfs-cli/modules/control/set_netmap_status.go +++ b/cmd/frostfs-cli/modules/control/set_netmap_status.go @@ -127,7 +127,7 @@ func awaitSetNetmapStatus(cmd *cobra.Command, pk *ecdsa.PrivateKey, cli *client. var resp *control.GetNetmapStatusResponse var err error err = cli.ExecRaw(func(client *rawclient.Client) error { - resp, err = control.GetNetmapStatus(client, req) + resp, err = control.GetNetmapStatus(cmd.Context(), client, req) return err }) commonCmd.ExitOnErr(cmd, "failed to get current netmap status: %w", err) diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index 31682c0e1..1500830a2 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -320,7 +320,7 @@ func getReplicaRequiredPlacement(cmd *cobra.Command, objects []phyObject, placem } placementBuilder := placement.NewNetworkMapBuilder(netmap) for _, object := range objects { - placement, err := placementBuilder.BuildPlacement(object.containerID, &object.objectID, placementPolicy) + placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &object.objectID, placementPolicy) commonCmd.ExitOnErr(cmd, "failed to get required placement for object: %w", err) for repIdx, rep := range placement { numOfReplicas := placementPolicy.ReplicaDescriptor(repIdx).NumberOfObjects() @@ -358,7 +358,7 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem placementObjectID = object.ecHeader.parent } placementBuilder := placement.NewNetworkMapBuilder(netmap) - placement, err := placementBuilder.BuildPlacement(object.containerID, &placementObjectID, placementPolicy) + placement, err := placementBuilder.BuildPlacement(cmd.Context(), object.containerID, &placementObjectID, placementPolicy) commonCmd.ExitOnErr(cmd, "failed to get required placement: %w", err) for _, vector := range placement { diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go index b90641799..0fe56d2b0 100644 --- a/cmd/frostfs-node/cache.go +++ b/cmd/frostfs-node/cache.go @@ -1,6 +1,7 @@ package main import ( + "context" "sync" "time" @@ -16,7 +17,7 @@ import ( "github.com/hashicorp/golang-lru/v2/expirable" ) -type netValueReader[K any, V any] func(K) (V, error) +type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error) type valueWithError[V any] struct { v V @@ -49,7 +50,7 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n // updates the value from the network on cache miss or by TTL. // // returned value should not be modified. -func (c *ttlNetCache[K, V]) get(key K) (V, error) { +func (c *ttlNetCache[K, V]) get(ctx context.Context, key K) (V, error) { hit := false startedAt := time.Now() defer func() { @@ -71,7 +72,7 @@ func (c *ttlNetCache[K, V]) get(key K) (V, error) { return val.v, val.e } - v, err := c.netRdr(key) + v, err := c.netRdr(ctx, key) c.cache.Add(key, &valueWithError[V]{ v: v, @@ -135,7 +136,7 @@ func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap] // updates the value from the network on cache miss. // // returned value should not be modified. -func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) { +func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { hit := false startedAt := time.Now() defer func() { @@ -148,7 +149,7 @@ func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) { return val, nil } - val, err := c.netRdr(key) + val, err := c.netRdr(ctx, key) if err != nil { return nil, err } @@ -166,11 +167,11 @@ type ttlContainerStorage struct { } func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage { - lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.Container, error) { - return v.Get(id) + lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.Container, error) { + return v.Get(ctx, id) }, metrics.NewCacheMetrics("container")) - lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.DelInfo, error) { - return v.DeletionInfo(id) + lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { + return v.DeletionInfo(ctx, id) }, metrics.NewCacheMetrics("container_deletion_info")) return ttlContainerStorage{ @@ -188,12 +189,12 @@ func (s ttlContainerStorage) handleRemoval(cnr cid.ID) { // Get returns container value from the cache. If value is missing in the cache // or expired, then it returns value from side chain and updates the cache. -func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) { - return s.containerCache.get(cnr) +func (s ttlContainerStorage) Get(ctx context.Context, cnr cid.ID) (*container.Container, error) { + return s.containerCache.get(ctx, cnr) } -func (s ttlContainerStorage) DeletionInfo(cnr cid.ID) (*container.DelInfo, error) { - return s.delInfoCache.get(cnr) +func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*container.DelInfo, error) { + return s.delInfoCache.get(ctx, cnr) } type lruNetmapSource struct { @@ -205,8 +206,8 @@ type lruNetmapSource struct { func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source { const netmapCacheSize = 10 - lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) { - return v.GetNetMapByEpoch(key) + lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { + return v.GetNetMapByEpoch(ctx, key) }, metrics.NewCacheMetrics("netmap")) return &lruNetmapSource{ @@ -215,16 +216,16 @@ func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source { } } -func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) { - return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff) +func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { + return s.getNetMapByEpoch(ctx, s.netState.CurrentEpoch()-diff) } -func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) { - return s.getNetMapByEpoch(epoch) +func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + return s.getNetMapByEpoch(ctx, epoch) } -func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) { - val, err := s.cache.get(epoch) +func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + val, err := s.cache.get(ctx, epoch) if err != nil { return nil, err } @@ -232,7 +233,7 @@ func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, err return val, nil } -func (s *lruNetmapSource) Epoch() (uint64, error) { +func (s *lruNetmapSource) Epoch(_ context.Context) (uint64, error) { return s.netState.CurrentEpoch(), nil } @@ -240,7 +241,10 @@ type cachedIRFetcher struct { *ttlNetCache[struct{}, [][]byte] } -func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher { +func newCachedIRFetcher(f interface { + InnerRingKeys(ctx context.Context) ([][]byte, error) +}, +) cachedIRFetcher { const ( irFetcherCacheSize = 1 // we intend to store only one value @@ -254,8 +258,8 @@ func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cached ) irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL, - func(_ struct{}) ([][]byte, error) { - return f.InnerRingKeys() + func(ctx context.Context, _ struct{}) ([][]byte, error) { + return f.InnerRingKeys(ctx) }, metrics.NewCacheMetrics("ir_keys"), ) @@ -265,8 +269,8 @@ func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cached // InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in // the cache or expired, then it returns keys from side chain and updates // the cache. -func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) { - val, err := f.get(struct{}{}) +func (f cachedIRFetcher) InnerRingKeys(ctx context.Context) ([][]byte, error) { + val, err := f.get(ctx, struct{}{}) if err != nil { return nil, err } @@ -289,7 +293,7 @@ func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.M } } -func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 { +func (c *ttlMaxObjectSizeCache) MaxObjectSize(ctx context.Context) uint64 { const ttl = time.Second * 30 hit := false @@ -311,7 +315,7 @@ func (c *ttlMaxObjectSizeCache) MaxObjectSize() uint64 { c.mtx.Lock() size = c.lastSize if !c.lastUpdated.After(prevUpdated) { - size = c.src.MaxObjectSize() + size = c.src.MaxObjectSize(ctx) c.lastSize = size c.lastUpdated = time.Now() } diff --git a/cmd/frostfs-node/cache_test.go b/cmd/frostfs-node/cache_test.go index f8c324a2f..b1601aa67 100644 --- a/cmd/frostfs-node/cache_test.go +++ b/cmd/frostfs-node/cache_test.go @@ -1,6 +1,7 @@ package main import ( + "context" "errors" "testing" "time" @@ -17,7 +18,7 @@ func TestTTLNetCache(t *testing.T) { t.Run("Test Add and Get", func(t *testing.T) { ti := time.Now() cache.set(key, ti, nil) - val, err := cache.get(key) + val, err := cache.get(context.Background(), key) require.NoError(t, err) require.Equal(t, ti, val) }) @@ -26,7 +27,7 @@ func TestTTLNetCache(t *testing.T) { ti := time.Now() cache.set(key, ti, nil) time.Sleep(2 * ttlDuration) - val, err := cache.get(key) + val, err := cache.get(context.Background(), key) require.NoError(t, err) require.NotEqual(t, val, ti) }) @@ -35,20 +36,20 @@ func TestTTLNetCache(t *testing.T) { ti := time.Now() cache.set(key, ti, nil) cache.remove(key) - val, err := cache.get(key) + val, err := cache.get(context.Background(), key) require.NoError(t, err) require.NotEqual(t, val, ti) }) t.Run("Test Cache Error", func(t *testing.T) { cache.set("error", time.Now(), errors.New("mock error")) - _, err := cache.get("error") + _, err := cache.get(context.Background(), "error") require.Error(t, err) require.Equal(t, "mock error", err.Error()) }) } -func testNetValueReader(key string) (time.Time, error) { +func testNetValueReader(_ context.Context, key string) (time.Time, error) { if key == "error" { return time.Now(), errors.New("mock error") } diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 3e9cd4e11..75d6f6dec 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1205,7 +1205,7 @@ func (c *cfg) setContractNodeInfo(ni *netmap.NodeInfo) { } func (c *cfg) updateContractNodeInfo(ctx context.Context, epoch uint64) { - ni, err := c.netmapLocalNodeState(epoch) + ni, err := c.netmapLocalNodeState(ctx, epoch) if err != nil { c.log.Error(ctx, logs.FrostFSNodeCouldNotUpdateNodeStateOnNewEpoch, zap.Uint64("epoch", epoch), diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index 98fea9f41..012012297 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -100,7 +100,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c // TODO: use owner directly from the event after neofs-contract#256 will become resolved // but don't forget about the profit of reading the new container and caching it: // creation success are most commonly tracked by polling GET op. - cnr, err := cnrSrc.Get(ev.ID) + cnr, err := cnrSrc.Get(ctx, ev.ID) if err == nil { containerCache.containerCache.set(ev.ID, cnr, nil) } else { @@ -221,25 +221,25 @@ type morphContainerReader struct { src containerCore.Source lister interface { - ContainersOf(*user.ID) ([]cid.ID, error) - IterateContainersOf(*user.ID, func(cid.ID) error) error + ContainersOf(context.Context, *user.ID) ([]cid.ID, error) + IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error } } -func (x *morphContainerReader) Get(id cid.ID) (*containerCore.Container, error) { - return x.src.Get(id) +func (x *morphContainerReader) Get(ctx context.Context, id cid.ID) (*containerCore.Container, error) { + return x.src.Get(ctx, id) } -func (x *morphContainerReader) DeletionInfo(id cid.ID) (*containerCore.DelInfo, error) { - return x.src.DeletionInfo(id) +func (x *morphContainerReader) DeletionInfo(ctx context.Context, id cid.ID) (*containerCore.DelInfo, error) { + return x.src.DeletionInfo(ctx, id) } -func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) { - return x.lister.ContainersOf(id) +func (x *morphContainerReader) ContainersOf(ctx context.Context, id *user.ID) ([]cid.ID, error) { + return x.lister.ContainersOf(ctx, id) } -func (x *morphContainerReader) IterateContainersOf(id *user.ID, processCID func(cid.ID) error) error { - return x.lister.IterateContainersOf(id, processCID) +func (x *morphContainerReader) IterateContainersOf(ctx context.Context, id *user.ID, processCID func(cid.ID) error) error { + return x.lister.IterateContainersOf(ctx, id, processCID) } type morphContainerWriter struct { diff --git a/cmd/frostfs-node/frostfsid.go b/cmd/frostfs-node/frostfsid.go index 3cca09105..d2d4e9785 100644 --- a/cmd/frostfs-node/frostfsid.go +++ b/cmd/frostfs-node/frostfsid.go @@ -1,6 +1,7 @@ package main import ( + "context" "strings" "time" @@ -42,7 +43,7 @@ func newMorphFrostfsIDCache(subjProvider frostfsidcore.SubjectProvider, size int } } -func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, error) { +func (m *morphFrostfsIDCache) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) { hit := false startedAt := time.Now() defer func() { @@ -55,7 +56,7 @@ func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, er return result.subject, result.err } - subj, err := m.subjProvider.GetSubject(addr) + subj, err := m.subjProvider.GetSubject(ctx, addr) if err != nil { if m.isCacheableError(err) { m.subjCache.Add(addr, subjectWithError{ @@ -69,7 +70,7 @@ func (m *morphFrostfsIDCache) GetSubject(addr util.Uint160) (*client.Subject, er return subj, nil } -func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) { +func (m *morphFrostfsIDCache) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) { hit := false startedAt := time.Now() defer func() { @@ -82,7 +83,7 @@ func (m *morphFrostfsIDCache) GetSubjectExtended(addr util.Uint160) (*client.Sub return result.subject, result.err } - subjExt, err := m.subjProvider.GetSubjectExtended(addr) + subjExt, err := m.subjProvider.GetSubjectExtended(ctx, addr) if err != nil { if m.isCacheableError(err) { m.subjExtCache.Add(addr, subjectExtWithError{ diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 34cd00ac8..0e90e7707 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -239,7 +239,7 @@ func setNetmapNotificationParser(c *cfg, sTyp string, p event.NotificationParser // initNetmapState inits current Network map state. // Must be called after Morph components initialization. func initNetmapState(ctx context.Context, c *cfg) { - epoch, err := c.cfgNetmap.wrapper.Epoch() + epoch, err := c.cfgNetmap.wrapper.Epoch(ctx) fatalOnErrDetails("could not initialize current epoch number", err) var ni *netmapSDK.NodeInfo @@ -278,7 +278,7 @@ func nodeState(ni *netmapSDK.NodeInfo) string { } func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { - nmNodes, err := c.cfgNetmap.wrapper.GetCandidates() + nmNodes, err := c.cfgNetmap.wrapper.GetCandidates(ctx) if err != nil { return nil, err } @@ -291,7 +291,7 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm } } - node, err := c.netmapLocalNodeState(epoch) + node, err := c.netmapLocalNodeState(ctx, epoch) if err != nil { return nil, err } @@ -312,9 +312,9 @@ func (c *cfg) netmapInitLocalNodeState(ctx context.Context, epoch uint64) (*netm return candidate, nil } -func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) { +func (c *cfg) netmapLocalNodeState(ctx context.Context, epoch uint64) (*netmapSDK.NodeInfo, error) { // calculate current network state - nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(epoch) + nm, err := c.cfgNetmap.wrapper.GetNetMapByEpoch(ctx, epoch) if err != nil { return nil, err } @@ -376,8 +376,8 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro return c.updateNetMapState(ctx, func(*nmClient.UpdatePeerPrm) {}) } -func (c *cfg) GetNetmapStatus() (control.NetmapStatus, uint64, error) { - epoch, err := c.netMapSource.Epoch() +func (c *cfg) GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) { + epoch, err := c.netMapSource.Epoch(ctx) if err != nil { return control.NetmapStatus_STATUS_UNDEFINED, 0, fmt.Errorf("failed to get current epoch: %w", err) } @@ -390,7 +390,7 @@ func (c *cfg) ForceMaintenance(ctx context.Context) error { } func (c *cfg) setMaintenanceStatus(ctx context.Context, force bool) error { - netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration() + netSettings, err := c.cfgNetmap.wrapper.ReadNetworkConfiguration(ctx) if err != nil { err = fmt.Errorf("read network settings to check maintenance allowance: %w", err) } else if !netSettings.MaintenanceModeAllowed { @@ -438,7 +438,7 @@ type netInfo struct { msPerBlockRdr func() (int64, error) } -func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) { +func (n *netInfo) Dump(ctx context.Context, ver version.Version) (*netmapSDK.NetworkInfo, error) { magic, err := n.magic.MagicNumber() if err != nil { return nil, err @@ -448,7 +448,7 @@ func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) { ni.SetCurrentEpoch(n.netState.CurrentEpoch()) ni.SetMagicNumber(magic) - netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration() + netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration(ctx) if err != nil { return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err) } diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 939241168..77446b81c 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -54,10 +54,10 @@ type objectSvc struct { patch *patchsvc.Service } -func (c *cfg) MaxObjectSize() uint64 { - sz, err := c.cfgNetmap.wrapper.MaxObjectSize() +func (c *cfg) MaxObjectSize(ctx context.Context) uint64 { + sz, err := c.cfgNetmap.wrapper.MaxObjectSize(ctx) if err != nil { - c.log.Error(context.Background(), logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, + c.log.Error(ctx, logs.FrostFSNodeCouldNotGetMaxObjectSizeValue, zap.Error(err), ) } @@ -122,8 +122,8 @@ type innerRingFetcherWithNotary struct { sidechain *morphClient.Client } -func (fn *innerRingFetcherWithNotary) InnerRingKeys() ([][]byte, error) { - keys, err := fn.sidechain.NeoFSAlphabetList() +func (fn *innerRingFetcherWithNotary) InnerRingKeys(ctx context.Context) ([][]byte, error) { + keys, err := fn.sidechain.NeoFSAlphabetList(ctx) if err != nil { return nil, fmt.Errorf("can't get inner ring keys from alphabet role: %w", err) } diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index f3ddc8cbe..f8330a25e 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -29,16 +29,16 @@ type cnrSource struct { cli *containerClient.Client } -func (c cnrSource) Get(id cid.ID) (*container.Container, error) { - return c.src.Get(id) +func (c cnrSource) Get(ctx context.Context, id cid.ID) (*container.Container, error) { + return c.src.Get(ctx, id) } -func (c cnrSource) DeletionInfo(cid cid.ID) (*container.DelInfo, error) { - return c.src.DeletionInfo(cid) +func (c cnrSource) DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) { + return c.src.DeletionInfo(ctx, cid) } -func (c cnrSource) List() ([]cid.ID, error) { - return c.cli.ContainersOf(nil) +func (c cnrSource) List(ctx context.Context) ([]cid.ID, error) { + return c.cli.ContainersOf(ctx, nil) } func initTreeService(c *cfg) { diff --git a/pkg/ape/request/frostfsid.go b/pkg/ape/request/frostfsid.go index c0413678d..d32bd4a07 100644 --- a/pkg/ape/request/frostfsid.go +++ b/pkg/ape/request/frostfsid.go @@ -1,6 +1,7 @@ package request import ( + "context" "fmt" "strconv" "strings" @@ -12,9 +13,9 @@ import ( ) // FormFrostfsIDRequestProperties forms frostfsid specific request properties like user-claim tags and group ID. -func FormFrostfsIDRequestProperties(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) { +func FormFrostfsIDRequestProperties(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) (map[string]string, error) { reqProps := make(map[string]string) - subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash()) + subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) if err != nil { if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { return nil, fmt.Errorf("get subject error: %w", err) @@ -36,8 +37,8 @@ func FormFrostfsIDRequestProperties(frostFSIDClient frostfsidcore.SubjectProvide } // Groups return the actor's group ids from frostfsid contract. -func Groups(frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) { - subj, err := frostFSIDClient.GetSubjectExtended(pk.GetScriptHash()) +func Groups(ctx context.Context, frostFSIDClient frostfsidcore.SubjectProvider, pk *keys.PublicKey) ([]string, error) { + subj, err := frostFSIDClient.GetSubjectExtended(ctx, pk.GetScriptHash()) if err != nil { if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { return nil, fmt.Errorf("get subject error: %w", err) diff --git a/pkg/core/container/info.go b/pkg/core/container/info.go index 62cc21553..1c52d93e7 100644 --- a/pkg/core/container/info.go +++ b/pkg/core/container/info.go @@ -1,6 +1,7 @@ package container import ( + "context" "sync" utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" @@ -19,7 +20,7 @@ type infoValue struct { } type InfoProvider interface { - Info(id cid.ID) (Info, error) + Info(ctx context.Context, id cid.ID) (Info, error) } type infoProvider struct { @@ -43,13 +44,13 @@ func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider { } } -func (r *infoProvider) Info(id cid.ID) (Info, error) { +func (r *infoProvider) Info(ctx context.Context, id cid.ID) (Info, error) { v, found := r.tryGetFromCache(id) if found { return v.info, v.err } - return r.getFromSource(id) + return r.getFromSource(ctx, id) } func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) { @@ -60,7 +61,7 @@ func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) { return value, found } -func (r *infoProvider) getFromSource(id cid.ID) (Info, error) { +func (r *infoProvider) getFromSource(ctx context.Context, id cid.ID) (Info, error) { r.kl.Lock(id) defer r.kl.Unlock(id) @@ -75,11 +76,11 @@ func (r *infoProvider) getFromSource(id cid.ID) (Info, error) { return Info{}, r.sourceErr } - cnr, err := r.source.Get(id) + cnr, err := r.source.Get(ctx, id) var civ infoValue if err != nil { if client.IsErrContainerNotFound(err) { - removed, err := WasRemoved(r.source, id) + removed, err := WasRemoved(ctx, r.source, id) if err != nil { civ.err = err } else { diff --git a/pkg/core/container/storage.go b/pkg/core/container/storage.go index ba4404546..4eb14e53c 100644 --- a/pkg/core/container/storage.go +++ b/pkg/core/container/storage.go @@ -1,6 +1,8 @@ package container import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto" @@ -41,9 +43,9 @@ type Source interface { // // Implementations must not retain the container pointer and modify // the container through it. - Get(cid.ID) (*Container, error) + Get(ctx context.Context, cid cid.ID) (*Container, error) - DeletionInfo(cid.ID) (*DelInfo, error) + DeletionInfo(ctx context.Context, cid cid.ID) (*DelInfo, error) } // EACL groups information about the FrostFS container's extended ACL stored in diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go index d27556807..a24b36944 100644 --- a/pkg/core/container/util.go +++ b/pkg/core/container/util.go @@ -1,6 +1,7 @@ package container import ( + "context" "errors" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -10,8 +11,8 @@ import ( // WasRemoved checks whether the container ever existed or // it just has not been created yet at the current epoch. -func WasRemoved(s Source, cid cid.ID) (bool, error) { - _, err := s.DeletionInfo(cid) +func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) { + _, err := s.DeletionInfo(ctx, cid) if err == nil { return true, nil } diff --git a/pkg/core/frostfsid/subject_provider.go b/pkg/core/frostfsid/subject_provider.go index ecfd0eb15..e752043d3 100644 --- a/pkg/core/frostfsid/subject_provider.go +++ b/pkg/core/frostfsid/subject_provider.go @@ -1,6 +1,8 @@ package frostfsid import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" "github.com/nspcc-dev/neo-go/pkg/util" ) @@ -11,6 +13,6 @@ const ( // SubjectProvider interface provides methods to get subject from FrostfsID contract. type SubjectProvider interface { - GetSubject(util.Uint160) (*client.Subject, error) - GetSubjectExtended(util.Uint160) (*client.SubjectExtended, error) + GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) + GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) } diff --git a/pkg/core/netmap/storage.go b/pkg/core/netmap/storage.go index 7770c61c7..97313da84 100644 --- a/pkg/core/netmap/storage.go +++ b/pkg/core/netmap/storage.go @@ -1,6 +1,8 @@ package netmap import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -16,7 +18,7 @@ type Source interface { // // Implementations must not retain the network map pointer and modify // the network map through it. - GetNetMap(diff uint64) (*netmap.NetMap, error) + GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) // GetNetMapByEpoch reads network map by the epoch number from the storage. // It returns the pointer to the requested network map and any error encountered. @@ -25,21 +27,21 @@ type Source interface { // // Implementations must not retain the network map pointer and modify // the network map through it. - GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) + GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) // Epoch reads the current epoch from the storage. // It returns thw number of the current epoch and any error encountered. // // Must return exactly one non-default value. - Epoch() (uint64, error) + Epoch(ctx context.Context) (uint64, error) } // GetLatestNetworkMap requests and returns the latest network map from the storage. -func GetLatestNetworkMap(src Source) (*netmap.NetMap, error) { - return src.GetNetMap(0) +func GetLatestNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { + return src.GetNetMap(ctx, 0) } // GetPreviousNetworkMap requests and returns previous from the latest network map from the storage. -func GetPreviousNetworkMap(src Source) (*netmap.NetMap, error) { - return src.GetNetMap(1) +func GetPreviousNetworkMap(ctx context.Context, src Source) (*netmap.NetMap, error) { + return src.GetNetMap(ctx, 1) } diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go index 19b5d34e4..cf090eb37 100644 --- a/pkg/core/object/fmt.go +++ b/pkg/core/object/fmt.go @@ -199,7 +199,7 @@ func (v *FormatValidator) isIROrContainerNode(ctx context.Context, obj *objectSD cnrIDBin := make([]byte, sha256.Size) cnrID.Encode(cnrIDBin) - cnr, err := v.containers.Get(cnrID) + cnr, err := v.containers.Get(ctx, cnrID) if err != nil { return acl.RoleOthers, fmt.Errorf("failed to get container (id=%s): %w", cnrID.EncodeToString(), err) } diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go index 20560cf3a..239a9f389 100644 --- a/pkg/core/object/fmt_test.go +++ b/pkg/core/object/fmt_test.go @@ -578,7 +578,7 @@ type testIRSource struct { irNodes [][]byte } -func (s *testIRSource) InnerRingKeys() ([][]byte, error) { +func (s *testIRSource) InnerRingKeys(_ context.Context) ([][]byte, error) { return s.irNodes, nil } @@ -586,14 +586,14 @@ type testContainerSource struct { containers map[cid.ID]*container.Container } -func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) { +func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) { if cnr, found := s.containers[cnrID]; found { return cnr, nil } return nil, fmt.Errorf("container not found") } -func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) { +func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { return nil, nil } @@ -602,20 +602,20 @@ type testNetmapSource struct { currentEpoch uint64 } -func (s *testNetmapSource) GetNetMap(diff uint64) (*netmap.NetMap, error) { +func (s *testNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { if diff >= s.currentEpoch { return nil, fmt.Errorf("invalid diff") } - return s.GetNetMapByEpoch(s.currentEpoch - diff) + return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) } -func (s *testNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { +func (s *testNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { if nm, found := s.netmaps[epoch]; found { return nm, nil } return nil, fmt.Errorf("netmap not found") } -func (s *testNetmapSource) Epoch() (uint64, error) { +func (s *testNetmapSource) Epoch(ctx context.Context) (uint64, error) { return s.currentEpoch, nil } diff --git a/pkg/core/object/sender_classifier.go b/pkg/core/object/sender_classifier.go index a1a5fcac1..3733ed507 100644 --- a/pkg/core/object/sender_classifier.go +++ b/pkg/core/object/sender_classifier.go @@ -18,7 +18,7 @@ import ( ) type InnerRing interface { - InnerRingKeys() ([][]byte, error) + InnerRingKeys(ctx context.Context) ([][]byte, error) } type SenderClassifier struct { @@ -63,7 +63,7 @@ func (c SenderClassifier) Classify( } func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) { - isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes) + isInnerRingNode, err := c.isInnerRingKey(ctx, ownerKeyInBytes) if err != nil { // do not throw error, try best case matching c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing, @@ -78,7 +78,7 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK binCnr := make([]byte, sha256.Size) idCnr.Encode(binCnr) - isContainerNode, err := c.isContainerKey(ownerKeyInBytes, binCnr, cnr) + isContainerNode, err := c.isContainerKey(ctx, ownerKeyInBytes, binCnr, cnr) if err != nil { // error might happen if request has `RoleOther` key and placement // is not possible for previous epoch, so @@ -99,8 +99,8 @@ func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerK }, nil } -func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) { - innerRingKeys, err := c.innerRing.InnerRingKeys() +func (c SenderClassifier) isInnerRingKey(ctx context.Context, owner []byte) (bool, error) { + innerRingKeys, err := c.innerRing.InnerRingKeys(ctx) if err != nil { return false, err } @@ -116,10 +116,11 @@ func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) { } func (c SenderClassifier) isContainerKey( + ctx context.Context, owner, idCnr []byte, cnr container.Container, ) (bool, error) { - nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap + nm, err := core.GetLatestNetworkMap(ctx, c.netmap) // first check current netmap if err != nil { return false, err } @@ -133,7 +134,7 @@ func (c SenderClassifier) isContainerKey( // then check previous netmap, this can happen in-between epoch change // when node migrates data from last epoch container - nm, err = core.GetPreviousNetworkMap(c.netmap) + nm, err = core.GetPreviousNetworkMap(ctx, c.netmap) if err != nil { return false, err } diff --git a/pkg/innerring/fetcher.go b/pkg/innerring/fetcher.go index 4a80ebf3b..7deec3f31 100644 --- a/pkg/innerring/fetcher.go +++ b/pkg/innerring/fetcher.go @@ -1,6 +1,8 @@ package innerring import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -47,12 +49,12 @@ type IrFetcherWithoutNotary struct { // InnerRingKeys fetches list of innerring keys from NeoFSAlphabet // role in the sidechain. -func (fN IrFetcherWithNotary) InnerRingKeys() (keys.PublicKeys, error) { - return fN.cli.NeoFSAlphabetList() +func (fN IrFetcherWithNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { + return fN.cli.NeoFSAlphabetList(ctx) } // InnerRingKeys fetches list of innerring keys from netmap contract // in the sidechain. -func (f IrFetcherWithoutNotary) InnerRingKeys() (keys.PublicKeys, error) { - return f.nm.GetInnerRingList() +func (f IrFetcherWithoutNotary) InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) { + return f.nm.GetInnerRingList(ctx) } diff --git a/pkg/innerring/indexer.go b/pkg/innerring/indexer.go index 45135a57b..439400bac 100644 --- a/pkg/innerring/indexer.go +++ b/pkg/innerring/indexer.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "fmt" "sync" "time" @@ -10,7 +11,7 @@ import ( type ( irFetcher interface { - InnerRingKeys() (keys.PublicKeys, error) + InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) } committeeFetcher interface { @@ -45,7 +46,7 @@ func newInnerRingIndexer(comf committeeFetcher, irf irFetcher, key *keys.PublicK } } -func (s *innerRingIndexer) update() (ind indexes, err error) { +func (s *innerRingIndexer) update(ctx context.Context) (ind indexes, err error) { s.RLock() if time.Since(s.lastAccess) < s.timeout { @@ -62,7 +63,7 @@ func (s *innerRingIndexer) update() (ind indexes, err error) { return s.ind, nil } - innerRing, err := s.irFetcher.InnerRingKeys() + innerRing, err := s.irFetcher.InnerRingKeys(ctx) if err != nil { return indexes{}, err } @@ -81,8 +82,8 @@ func (s *innerRingIndexer) update() (ind indexes, err error) { return s.ind, nil } -func (s *innerRingIndexer) InnerRingIndex() (int32, error) { - ind, err := s.update() +func (s *innerRingIndexer) InnerRingIndex(ctx context.Context) (int32, error) { + ind, err := s.update(ctx) if err != nil { return 0, fmt.Errorf("can't update index state: %w", err) } @@ -90,8 +91,8 @@ func (s *innerRingIndexer) InnerRingIndex() (int32, error) { return ind.innerRingIndex, nil } -func (s *innerRingIndexer) InnerRingSize() (int32, error) { - ind, err := s.update() +func (s *innerRingIndexer) InnerRingSize(ctx context.Context) (int32, error) { + ind, err := s.update(ctx) if err != nil { return 0, fmt.Errorf("can't update index state: %w", err) } @@ -99,8 +100,8 @@ func (s *innerRingIndexer) InnerRingSize() (int32, error) { return ind.innerRingSize, nil } -func (s *innerRingIndexer) AlphabetIndex() (int32, error) { - ind, err := s.update() +func (s *innerRingIndexer) AlphabetIndex(ctx context.Context) (int32, error) { + ind, err := s.update(ctx) if err != nil { return 0, fmt.Errorf("can't update index state: %w", err) } diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go index c8a819b5b..f8201b7df 100644 --- a/pkg/innerring/indexer_test.go +++ b/pkg/innerring/indexer_test.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "fmt" "sync/atomic" "testing" @@ -37,15 +38,15 @@ func TestIndexerReturnsIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(2), idx, "invalid IR index") - size, err := indexer.InnerRingSize() + size, err := indexer.InnerRingSize(context.Background()) require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(3), size, "invalid IR size") }) @@ -56,11 +57,11 @@ func TestIndexerReturnsIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(0), idx, "invalid IR index") }) @@ -71,11 +72,11 @@ func TestIndexerReturnsIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(0), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") }) @@ -100,30 +101,30 @@ func TestIndexerCachesIndexes(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") - size, err := indexer.InnerRingSize() + size, err := indexer.InnerRingSize(context.Background()) require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(0), size, "invalid IR size") require.Equal(t, int32(1), cf.calls.Load(), "invalid commitee calls count") require.Equal(t, int32(1), irf.calls.Load(), "invalid IR calls count") - idx, err = indexer.AlphabetIndex() + idx, err = indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") - size, err = indexer.InnerRingSize() + size, err = indexer.InnerRingSize(context.Background()) require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(0), size, "invalid IR size") @@ -132,15 +133,15 @@ func TestIndexerCachesIndexes(t *testing.T) { time.Sleep(2 * time.Second) - idx, err = indexer.AlphabetIndex() + idx, err = indexer.AlphabetIndex(context.Background()) require.NoError(t, err, "failed to get alphabet index") require.Equal(t, int32(-1), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.NoError(t, err, "failed to get IR index") require.Equal(t, int32(-1), idx, "invalid IR index") - size, err = indexer.InnerRingSize() + size, err = indexer.InnerRingSize(context.Background()) require.NoError(t, err, "failed to get IR size") require.Equal(t, int32(0), size, "invalid IR size") @@ -165,15 +166,15 @@ func TestIndexerThrowsErrors(t *testing.T) { indexer := newInnerRingIndexer(cf, irf, key, time.Second) - idx, err := indexer.AlphabetIndex() + idx, err := indexer.AlphabetIndex(context.Background()) require.ErrorContains(t, err, "test commitee error", "error from commitee not throwed") require.Equal(t, int32(0), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") require.Equal(t, int32(0), idx, "invalid IR index") - size, err := indexer.InnerRingSize() + size, err := indexer.InnerRingSize(context.Background()) require.ErrorContains(t, err, "test commitee error", "error from IR not throwed") require.Equal(t, int32(0), size, "invalid IR size") @@ -189,15 +190,15 @@ func TestIndexerThrowsErrors(t *testing.T) { indexer = newInnerRingIndexer(cf, irf, key, time.Second) - idx, err = indexer.AlphabetIndex() + idx, err = indexer.AlphabetIndex(context.Background()) require.ErrorContains(t, err, "test IR error", "error from commitee not throwed") require.Equal(t, int32(0), idx, "invalid alphabet index") - idx, err = indexer.InnerRingIndex() + idx, err = indexer.InnerRingIndex(context.Background()) require.ErrorContains(t, err, "test IR error", "error from IR not throwed") require.Equal(t, int32(0), idx, "invalid IR index") - size, err = indexer.InnerRingSize() + size, err = indexer.InnerRingSize(context.Background()) require.ErrorContains(t, err, "test IR error", "error from IR not throwed") require.Equal(t, int32(0), size, "invalid IR size") } @@ -219,7 +220,7 @@ type testIRFetcher struct { calls atomic.Int32 } -func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) { +func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { f.calls.Add(1) return f.keys, f.err } diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index 0b9e83443..ae5661905 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -575,19 +575,19 @@ func parseMultinetConfig(cfg *viper.Viper, m metrics.MultinetMetrics) internalNe func (s *Server) initConfigFromBlockchain(ctx context.Context) error { // get current epoch - epoch, err := s.netmapClient.Epoch() + epoch, err := s.netmapClient.Epoch(ctx) if err != nil { return fmt.Errorf("can't read epoch number: %w", err) } // get current epoch duration - epochDuration, err := s.netmapClient.EpochDuration() + epochDuration, err := s.netmapClient.EpochDuration(ctx) if err != nil { return fmt.Errorf("can't read epoch duration: %w", err) } // get balance precision - balancePrecision, err := s.balanceClient.Decimals() + balancePrecision, err := s.balanceClient.Decimals(ctx) if err != nil { return fmt.Errorf("can't read balance contract precision: %w", err) } @@ -597,7 +597,7 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error { s.precision.SetBalancePrecision(balancePrecision) // get next epoch delta tick - s.initialEpochTickDelta, err = s.nextEpochBlockDelta() + s.initialEpochTickDelta, err = s.nextEpochBlockDelta(ctx) if err != nil { return err } @@ -613,8 +613,8 @@ func (s *Server) initConfigFromBlockchain(ctx context.Context) error { return nil } -func (s *Server) nextEpochBlockDelta() (uint32, error) { - epochBlock, err := s.netmapClient.LastEpochBlock() +func (s *Server) nextEpochBlockDelta(ctx context.Context) (uint32, error) { + epochBlock, err := s.netmapClient.LastEpochBlock(ctx) if err != nil { return 0, fmt.Errorf("can't read last epoch block: %w", err) } diff --git a/pkg/innerring/netmap.go b/pkg/innerring/netmap.go index 9961710ca..fb11e9426 100644 --- a/pkg/innerring/netmap.go +++ b/pkg/innerring/netmap.go @@ -1,6 +1,7 @@ package innerring import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" @@ -17,8 +18,8 @@ type networkSettings netmapclient.Client // MaintenanceModeAllowed requests network configuration from the Sidechain // and check allowance of storage node's maintenance mode according to it. // Always returns state.ErrMaintenanceModeDisallowed. -func (s *networkSettings) MaintenanceModeAllowed() error { - allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed() +func (s *networkSettings) MaintenanceModeAllowed(ctx context.Context) error { + allowed, err := (*netmapclient.Client)(s).MaintenanceModeAllowed(ctx) if err != nil { return fmt.Errorf("read maintenance mode's allowance from the Sidechain: %w", err) } else if allowed { diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go index ac3e2a14d..1da3c401d 100644 --- a/pkg/innerring/processors/alphabet/handlers_test.go +++ b/pkg/innerring/processors/alphabet/handlers_test.go @@ -279,6 +279,6 @@ type testNetmapClient struct { netmap *netmap.NetMap } -func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) { +func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) { return c.netmap, nil } diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go index 8e11d2d61..d3d0f83f2 100644 --- a/pkg/innerring/processors/alphabet/process_emit.go +++ b/pkg/innerring/processors/alphabet/process_emit.go @@ -44,7 +44,7 @@ func (ap *Processor) processEmit(ctx context.Context) bool { return true } - networkMap, err := ap.netmapClient.NetMap() + networkMap, err := ap.netmapClient.NetMap(ctx) if err != nil { ap.log.Warn(ctx, logs.AlphabetCantGetNetmapSnapshotToEmitGasToStorageNodes, zap.Error(err)) diff --git a/pkg/innerring/processors/alphabet/processor.go b/pkg/innerring/processors/alphabet/processor.go index 2c4654e7c..0aea74003 100644 --- a/pkg/innerring/processors/alphabet/processor.go +++ b/pkg/innerring/processors/alphabet/processor.go @@ -36,7 +36,7 @@ type ( } netmapClient interface { - NetMap() (*netmap.NetMap, error) + NetMap(ctx context.Context) (*netmap.NetMap, error) } morphClient interface { diff --git a/pkg/innerring/processors/container/common.go b/pkg/innerring/processors/container/common.go index ba12ebb37..5334b9a1f 100644 --- a/pkg/innerring/processors/container/common.go +++ b/pkg/innerring/processors/container/common.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/ecdsa" "errors" "fmt" @@ -45,7 +46,7 @@ type signatureVerificationData struct { // - v.binPublicKey is a public session key // - session context corresponds to the container and verb in v // - session is "alive" -func (cp *Processor) verifySignature(v signatureVerificationData) error { +func (cp *Processor) verifySignature(ctx context.Context, v signatureVerificationData) error { var err error var key frostfsecdsa.PublicKeyRFC6979 keyProvided := v.binPublicKey != nil @@ -58,7 +59,7 @@ func (cp *Processor) verifySignature(v signatureVerificationData) error { } if len(v.binTokenSession) > 0 { - return cp.verifyByTokenSession(v, &key, keyProvided) + return cp.verifyByTokenSession(ctx, v, &key, keyProvided) } if keyProvided { @@ -77,8 +78,8 @@ func (cp *Processor) verifySignature(v signatureVerificationData) error { return errors.New("signature is invalid or calculated with the key not bound to the container owner") } -func (cp *Processor) checkTokenLifetime(token session.Container) error { - curEpoch, err := cp.netState.Epoch() +func (cp *Processor) checkTokenLifetime(ctx context.Context, token session.Container) error { + curEpoch, err := cp.netState.Epoch(ctx) if err != nil { return fmt.Errorf("could not read current epoch: %w", err) } @@ -90,7 +91,7 @@ func (cp *Processor) checkTokenLifetime(token session.Container) error { return nil } -func (cp *Processor) verifyByTokenSession(v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error { +func (cp *Processor) verifyByTokenSession(ctx context.Context, v signatureVerificationData, key *frostfsecdsa.PublicKeyRFC6979, keyProvided bool) error { var tok session.Container err := tok.Unmarshal(v.binTokenSession) @@ -118,7 +119,7 @@ func (cp *Processor) verifyByTokenSession(v signatureVerificationData, key *fros return errors.New("owner differs with token owner") } - err = cp.checkTokenLifetime(tok) + err = cp.checkTokenLifetime(ctx, tok) if err != nil { return fmt.Errorf("check session lifetime: %w", err) } diff --git a/pkg/innerring/processors/container/handlers_test.go b/pkg/innerring/processors/container/handlers_test.go index f28e5372a..1b3842eb0 100644 --- a/pkg/innerring/processors/container/handlers_test.go +++ b/pkg/innerring/processors/container/handlers_test.go @@ -170,11 +170,11 @@ type testNetworkState struct { epoch uint64 } -func (s *testNetworkState) HomomorphicHashDisabled() (bool, error) { +func (s *testNetworkState) HomomorphicHashDisabled(context.Context) (bool, error) { return s.homHashDisabled, nil } -func (s *testNetworkState) Epoch() (uint64, error) { +func (s *testNetworkState) Epoch(context.Context) (uint64, error) { return s.epoch, nil } @@ -187,7 +187,7 @@ func (c *testContainerClient) ContractAddress() util.Uint160 { return c.contractAddress } -func (c *testContainerClient) Get(cid []byte) (*containercore.Container, error) { +func (c *testContainerClient) Get(ctx context.Context, cid []byte) (*containercore.Container, error) { key := hex.EncodeToString(cid) if cont, found := c.get[key]; found { return cont, nil @@ -237,6 +237,6 @@ func (c *testMorphClient) NotarySignAndInvokeTX(mainTx *transaction.Transaction) type testFrostFSIDClient struct{} -func (c *testFrostFSIDClient) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) { +func (c *testFrostFSIDClient) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) { return &frostfsidclient.Subject{}, nil } diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go index ffaea653a..854e2c779 100644 --- a/pkg/innerring/processors/container/process_container.go +++ b/pkg/innerring/processors/container/process_container.go @@ -47,7 +47,7 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool e: put, } - err := cp.checkPutContainer(pctx) + err := cp.checkPutContainer(ctx, pctx) if err != nil { cp.log.Error(ctx, logs.ContainerPutContainerCheckFailed, zap.Error(err), @@ -66,8 +66,8 @@ func (cp *Processor) processContainerPut(ctx context.Context, put putEvent) bool return true } -func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { - binCnr := ctx.e.Container() +func (cp *Processor) checkPutContainer(ctx context.Context, pctx *putContainerContext) error { + binCnr := pctx.e.Container() var cnr containerSDK.Container err := cnr.Unmarshal(binCnr) @@ -75,12 +75,12 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { return fmt.Errorf("invalid binary container: %w", err) } - err = cp.verifySignature(signatureVerificationData{ + err = cp.verifySignature(ctx, signatureVerificationData{ ownerContainer: cnr.Owner(), verb: session.VerbContainerPut, - binTokenSession: ctx.e.SessionToken(), - binPublicKey: ctx.e.PublicKey(), - signature: ctx.e.Signature(), + binTokenSession: pctx.e.SessionToken(), + binPublicKey: pctx.e.PublicKey(), + signature: pctx.e.Signature(), signedData: binCnr, }) if err != nil { @@ -88,13 +88,13 @@ func (cp *Processor) checkPutContainer(ctx *putContainerContext) error { } // check homomorphic hashing setting - err = checkHomomorphicHashing(cp.netState, cnr) + err = checkHomomorphicHashing(ctx, cp.netState, cnr) if err != nil { return fmt.Errorf("incorrect homomorphic hashing setting: %w", err) } // check native name and zone - err = cp.checkNNS(ctx, cnr) + err = cp.checkNNS(ctx, pctx, cnr) if err != nil { return fmt.Errorf("NNS: %w", err) } @@ -110,7 +110,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven return true } - err := cp.checkDeleteContainer(e) + err := cp.checkDeleteContainer(ctx, e) if err != nil { cp.log.Error(ctx, logs.ContainerDeleteContainerCheckFailed, zap.Error(err), @@ -130,7 +130,7 @@ func (cp *Processor) processContainerDelete(ctx context.Context, e containerEven return true } -func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error { +func (cp *Processor) checkDeleteContainer(ctx context.Context, e containerEvent.Delete) error { binCnr := e.ContainerID() var idCnr cid.ID @@ -141,12 +141,12 @@ func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error { } // receive owner of the related container - cnr, err := cp.cnrClient.Get(binCnr) + cnr, err := cp.cnrClient.Get(ctx, binCnr) if err != nil { return fmt.Errorf("could not receive the container: %w", err) } - err = cp.verifySignature(signatureVerificationData{ + err = cp.verifySignature(ctx, signatureVerificationData{ ownerContainer: cnr.Value.Owner(), verb: session.VerbContainerDelete, idContainerSet: true, @@ -163,21 +163,21 @@ func (cp *Processor) checkDeleteContainer(e containerEvent.Delete) error { return nil } -func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Container) error { +func (cp *Processor) checkNNS(ctx context.Context, pctx *putContainerContext, cnr containerSDK.Container) error { // fetch domain info - ctx.d = containerSDK.ReadDomain(cnr) + pctx.d = containerSDK.ReadDomain(cnr) // if PutNamed event => check if values in container correspond to args - if named, ok := ctx.e.(interface { + if named, ok := pctx.e.(interface { Name() string Zone() string }); ok { - if name := named.Name(); name != ctx.d.Name() { - return fmt.Errorf("names differ %s/%s", name, ctx.d.Name()) + if name := named.Name(); name != pctx.d.Name() { + return fmt.Errorf("names differ %s/%s", name, pctx.d.Name()) } - if zone := named.Zone(); zone != ctx.d.Zone() { - return fmt.Errorf("zones differ %s/%s", zone, ctx.d.Zone()) + if zone := named.Zone(); zone != pctx.d.Zone() { + return fmt.Errorf("zones differ %s/%s", zone, pctx.d.Zone()) } } @@ -186,12 +186,12 @@ func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Contain return fmt.Errorf("could not get container owner address: %w", err) } - subject, err := cp.frostFSIDClient.GetSubject(addr) + subject, err := cp.frostFSIDClient.GetSubject(ctx, addr) if err != nil { return fmt.Errorf("could not get subject from FrostfsID contract: %w", err) } - namespace, hasNamespace := strings.CutSuffix(ctx.d.Zone(), ".ns") + namespace, hasNamespace := strings.CutSuffix(pctx.d.Zone(), ".ns") if !hasNamespace { return nil } @@ -203,8 +203,8 @@ func (cp *Processor) checkNNS(ctx *putContainerContext, cnr containerSDK.Contain return nil } -func checkHomomorphicHashing(ns NetworkState, cnr containerSDK.Container) error { - netSetting, err := ns.HomomorphicHashDisabled() +func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr containerSDK.Container) error { + netSetting, err := ns.HomomorphicHashDisabled(ctx) if err != nil { return fmt.Errorf("could not get setting in contract: %w", err) } diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go index a0b7491e1..9be93baa4 100644 --- a/pkg/innerring/processors/container/processor.go +++ b/pkg/innerring/processors/container/processor.go @@ -25,7 +25,7 @@ type ( ContClient interface { ContractAddress() util.Uint160 - Get(cid []byte) (*containercore.Container, error) + Get(ctx context.Context, cid []byte) (*containercore.Container, error) } MorphClient interface { @@ -33,7 +33,7 @@ type ( } FrostFSIDClient interface { - GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) + GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) } // Processor of events produced by container contract in the sidechain. @@ -68,7 +68,7 @@ type NetworkState interface { // // Must return any error encountered // which did not allow reading the value. - Epoch() (uint64, error) + Epoch(ctx context.Context) (uint64, error) // HomomorphicHashDisabled must return boolean that // represents homomorphic network state: @@ -76,7 +76,7 @@ type NetworkState interface { // * false if hashing is enabled. // // which did not allow reading the value. - HomomorphicHashDisabled() (bool, error) + HomomorphicHashDisabled(ctx context.Context) (bool, error) } // New creates a container contract processor instance. diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go index 5a6126249..864c5da67 100644 --- a/pkg/innerring/processors/governance/handlers_test.go +++ b/pkg/innerring/processors/governance/handlers_test.go @@ -236,7 +236,7 @@ type testIRFetcher struct { publicKeys keys.PublicKeys } -func (f *testIRFetcher) InnerRingKeys() (keys.PublicKeys, error) { +func (f *testIRFetcher) InnerRingKeys(context.Context) (keys.PublicKeys, error) { return f.publicKeys, nil } @@ -266,7 +266,7 @@ type testMainnetClient struct { designateHash util.Uint160 } -func (c *testMainnetClient) NeoFSAlphabetList() (res keys.PublicKeys, err error) { +func (c *testMainnetClient) NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) { return c.alphabetKeys, nil } diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go index 245679656..6e22abb3c 100644 --- a/pkg/innerring/processors/governance/process_update.go +++ b/pkg/innerring/processors/governance/process_update.go @@ -25,7 +25,7 @@ func (gp *Processor) processAlphabetSync(ctx context.Context, txHash util.Uint25 return true } - mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList() + mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList(ctx) if err != nil { gp.log.Error(ctx, logs.GovernanceCantFetchAlphabetListFromMainNet, zap.Error(err)) @@ -95,7 +95,7 @@ func prettyKeys(keys keys.PublicKeys) string { } func (gp *Processor) updateNeoFSAlphabetRoleInSidechain(ctx context.Context, sidechainAlphabet, newAlphabet keys.PublicKeys, txHash util.Uint256) { - innerRing, err := gp.irFetcher.InnerRingKeys() + innerRing, err := gp.irFetcher.InnerRingKeys(ctx) if err != nil { gp.log.Error(ctx, logs.GovernanceCantFetchInnerRingListFromSideChain, zap.Error(err)) diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go index 7859ebee1..2d131edda 100644 --- a/pkg/innerring/processors/governance/processor.go +++ b/pkg/innerring/processors/governance/processor.go @@ -52,7 +52,7 @@ type ( // Implementation must take into account availability of // the notary contract. IRFetcher interface { - InnerRingKeys() (keys.PublicKeys, error) + InnerRingKeys(ctx context.Context) (keys.PublicKeys, error) } FrostFSClient interface { @@ -64,7 +64,7 @@ type ( } MainnetClient interface { - NeoFSAlphabetList() (res keys.PublicKeys, err error) + NeoFSAlphabetList(context.Context) (res keys.PublicKeys, err error) GetDesignateHash() util.Uint160 } diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go index 5a5adfb2d..934c3790d 100644 --- a/pkg/innerring/processors/netmap/handlers_test.go +++ b/pkg/innerring/processors/netmap/handlers_test.go @@ -294,7 +294,7 @@ type testNodeStateSettings struct { maintAllowed bool } -func (s *testNodeStateSettings) MaintenanceModeAllowed() error { +func (s *testNodeStateSettings) MaintenanceModeAllowed(context.Context) error { if s.maintAllowed { return nil } @@ -303,7 +303,7 @@ func (s *testNodeStateSettings) MaintenanceModeAllowed() error { type testValidator struct{} -func (v *testValidator) VerifyAndUpdate(*netmap.NodeInfo) error { +func (v *testValidator) VerifyAndUpdate(context.Context, *netmap.NodeInfo) error { return nil } @@ -381,7 +381,7 @@ func (c *testNetmapClient) ContractAddress() util.Uint160 { return c.contractAddress } -func (c *testNetmapClient) EpochDuration() (uint64, error) { +func (c *testNetmapClient) EpochDuration(context.Context) (uint64, error) { return c.epochDuration, nil } @@ -392,7 +392,7 @@ func (c *testNetmapClient) MorphTxHeight(h util.Uint256) (uint32, error) { return 0, fmt.Errorf("not found") } -func (c *testNetmapClient) NetMap() (*netmap.NetMap, error) { +func (c *testNetmapClient) NetMap(context.Context) (*netmap.NetMap, error) { return c.netmap, nil } diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go index 5e0558344..b81dc9989 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go +++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls.go @@ -1,6 +1,7 @@ package locode import ( + "context" "errors" "fmt" @@ -29,7 +30,7 @@ var errMissingRequiredAttr = errors.New("missing required attribute in DB record // - Continent: R.Continent().String(). // // UN-LOCODE attribute remains untouched. -func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error { +func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error { attrLocode := n.LOCODE() if attrLocode == "" { return nil diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go index 8ab174dfd..fa2dd1ac1 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go +++ b/pkg/innerring/processors/netmap/nodevalidation/locode/calls_test.go @@ -1,6 +1,7 @@ package locode_test import ( + "context" "errors" "fmt" "testing" @@ -92,7 +93,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { t.Run("w/o locode", func(t *testing.T) { n := nodeInfoWithSomeAttrs() - err := validator.VerifyAndUpdate(n) + err := validator.VerifyAndUpdate(context.Background(), n) require.NoError(t, err) }) @@ -102,7 +103,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { addLocodeAttrValue(n, "WRONG LOCODE") - err := validator.VerifyAndUpdate(n) + err := validator.VerifyAndUpdate(context.Background(), n) require.Error(t, err) }) @@ -111,7 +112,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { addLocodeAttr(n, locodestd.LOCODE{"RU", "SPB"}) - err := validator.VerifyAndUpdate(n) + err := validator.VerifyAndUpdate(context.Background(), n) require.Error(t, err) }) @@ -119,7 +120,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { addLocodeAttr(n, r.LOCODE) - err := validator.VerifyAndUpdate(n) + err := validator.VerifyAndUpdate(context.Background(), n) require.NoError(t, err) require.Equal(t, rec.CountryCode().String(), n.Attribute("CountryCode")) diff --git a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go index 126f36582..0e4628ac7 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go +++ b/pkg/innerring/processors/netmap/nodevalidation/maddress/calls.go @@ -1,6 +1,7 @@ package maddress import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" @@ -8,7 +9,7 @@ import ( ) // VerifyAndUpdate calls network.VerifyAddress. -func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error { +func (v *Validator) VerifyAndUpdate(_ context.Context, n *netmap.NodeInfo) error { err := network.VerifyMultiAddress(*n) if err != nil { return fmt.Errorf("could not verify multiaddress: %w", err) diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go index e5165f618..03c41a451 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go +++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go @@ -7,6 +7,7 @@ map candidates. package state import ( + "context" "errors" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -23,7 +24,7 @@ type NetworkSettings interface { // no error if allowed; // ErrMaintenanceModeDisallowed if disallowed; // other error if there are any problems with the check. - MaintenanceModeAllowed() error + MaintenanceModeAllowed(ctx context.Context) error } // NetMapCandidateValidator represents tool which checks state of nodes which @@ -55,13 +56,13 @@ func (x *NetMapCandidateValidator) SetNetworkSettings(netSettings NetworkSetting // MUST NOT be called before SetNetworkSettings. // // See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods. -func (x *NetMapCandidateValidator) VerifyAndUpdate(node *netmap.NodeInfo) error { +func (x *NetMapCandidateValidator) VerifyAndUpdate(ctx context.Context, node *netmap.NodeInfo) error { if node.Status().IsOnline() { return nil } if node.Status().IsMaintenance() { - return x.netSettings.MaintenanceModeAllowed() + return x.netSettings.MaintenanceModeAllowed(ctx) } return errors.New("invalid status: MUST be either ONLINE or MAINTENANCE") diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go index b81d7243b..cbf48a710 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go +++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go @@ -1,6 +1,7 @@ package state_test import ( + "context" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap/nodevalidation/state" @@ -13,7 +14,7 @@ type testNetworkSettings struct { disallowed bool } -func (x testNetworkSettings) MaintenanceModeAllowed() error { +func (x testNetworkSettings) MaintenanceModeAllowed(context.Context) error { if x.disallowed { return state.ErrMaintenanceModeDisallowed } @@ -81,7 +82,7 @@ func TestValidator_VerifyAndUpdate(t *testing.T) { testCase.validatorPreparer(&v) } - err := v.VerifyAndUpdate(&node) + err := v.VerifyAndUpdate(context.Background(), &node) if testCase.valid { require.NoError(t, err, testCase.name) diff --git a/pkg/innerring/processors/netmap/nodevalidation/validator.go b/pkg/innerring/processors/netmap/nodevalidation/validator.go index e9b24e024..3dbe98a8d 100644 --- a/pkg/innerring/processors/netmap/nodevalidation/validator.go +++ b/pkg/innerring/processors/netmap/nodevalidation/validator.go @@ -1,6 +1,8 @@ package nodevalidation import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring/processors/netmap" apinetmap "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -26,9 +28,9 @@ func New(validators ...netmap.NodeValidator) *CompositeValidator { // VerifyAndUpdate passes apinetmap.NodeInfo to wrapped validators. // // If error appears, returns it immediately. -func (c *CompositeValidator) VerifyAndUpdate(ni *apinetmap.NodeInfo) error { +func (c *CompositeValidator) VerifyAndUpdate(ctx context.Context, ni *apinetmap.NodeInfo) error { for _, v := range c.validators { - if err := v.VerifyAndUpdate(ni); err != nil { + if err := v.VerifyAndUpdate(ctx, ni); err != nil { return err } } diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go index 93e00bbaa..7c78d24a5 100644 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ b/pkg/innerring/processors/netmap/process_epoch.go @@ -14,7 +14,7 @@ import ( func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoch) bool { epoch := ev.EpochNumber() - epochDuration, err := np.netmapClient.EpochDuration() + epochDuration, err := np.netmapClient.EpochDuration(ctx) if err != nil { np.log.Warn(ctx, logs.NetmapCantGetEpochDuration, zap.Error(err)) @@ -37,7 +37,7 @@ func (np *Processor) processNewEpoch(ctx context.Context, ev netmapEvent.NewEpoc } // get new netmap snapshot - networkMap, err := np.netmapClient.NetMap() + networkMap, err := np.netmapClient.NetMap(ctx) if err != nil { np.log.Warn(ctx, logs.NetmapCantGetNetmapSnapshotToPerformCleanup, zap.Error(err)) diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go index 5b565ffd1..b5c727cc7 100644 --- a/pkg/innerring/processors/netmap/process_peers.go +++ b/pkg/innerring/processors/netmap/process_peers.go @@ -39,7 +39,7 @@ func (np *Processor) processAddPeer(ctx context.Context, ev netmapEvent.AddPeer) } // validate and update node info - err = np.nodeValidator.VerifyAndUpdate(&nodeInfo) + err = np.nodeValidator.VerifyAndUpdate(ctx, &nodeInfo) if err != nil { np.log.Warn(ctx, logs.NetmapCouldNotVerifyAndUpdateInformationAboutNetworkMapCandidate, zap.Error(err), @@ -108,7 +108,7 @@ func (np *Processor) processUpdatePeer(ctx context.Context, ev netmapEvent.Updat var err error if ev.Maintenance() { - err = np.nodeStateSettings.MaintenanceModeAllowed() + err = np.nodeStateSettings.MaintenanceModeAllowed(ctx) if err != nil { np.log.Info(ctx, logs.NetmapPreventSwitchingNodeToMaintenanceState, zap.Error(err), diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go index 36df57afe..277bca1c3 100644 --- a/pkg/innerring/processors/netmap/processor.go +++ b/pkg/innerring/processors/netmap/processor.go @@ -49,15 +49,15 @@ type ( // // If no error occurs, the parameter must point to the // ready-made NodeInfo structure. - VerifyAndUpdate(*netmap.NodeInfo) error + VerifyAndUpdate(context.Context, *netmap.NodeInfo) error } Client interface { MorphNotaryInvoke(ctx context.Context, contract util.Uint160, fee fixedn.Fixed8, nonce uint32, vub *uint32, method string, args ...any) error ContractAddress() util.Uint160 - EpochDuration() (uint64, error) + EpochDuration(ctx context.Context) (uint64, error) MorphTxHeight(h util.Uint256) (res uint32, err error) - NetMap() (*netmap.NetMap, error) + NetMap(ctx context.Context) (*netmap.NetMap, error) NewEpoch(ctx context.Context, epoch uint64) error MorphIsValidScript(script []byte, signers []transaction.Signer) (valid bool, err error) MorphNotarySignAndInvokeTX(mainTx *transaction.Transaction) error diff --git a/pkg/innerring/processors/netmap/wrappers.go b/pkg/innerring/processors/netmap/wrappers.go index 9cd71ae48..310f12248 100644 --- a/pkg/innerring/processors/netmap/wrappers.go +++ b/pkg/innerring/processors/netmap/wrappers.go @@ -34,16 +34,16 @@ func (w *netmapClientWrapper) ContractAddress() util.Uint160 { return w.netmapClient.ContractAddress() } -func (w *netmapClientWrapper) EpochDuration() (uint64, error) { - return w.netmapClient.EpochDuration() +func (w *netmapClientWrapper) EpochDuration(ctx context.Context) (uint64, error) { + return w.netmapClient.EpochDuration(ctx) } func (w *netmapClientWrapper) MorphTxHeight(h util.Uint256) (res uint32, err error) { return w.netmapClient.Morph().TxHeight(h) } -func (w *netmapClientWrapper) NetMap() (*netmap.NetMap, error) { - return w.netmapClient.NetMap() +func (w *netmapClientWrapper) NetMap(ctx context.Context) (*netmap.NetMap, error) { + return w.netmapClient.NetMap(ctx) } func (w *netmapClientWrapper) NewEpoch(ctx context.Context, epoch uint64) error { diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go index 3e9880e70..0ef771359 100644 --- a/pkg/innerring/state.go +++ b/pkg/innerring/state.go @@ -60,7 +60,7 @@ func (s *Server) IsAlphabet(ctx context.Context) bool { // InnerRingIndex is a getter for a global index of node in inner ring list. Negative // index means that node is not in the inner ring list. func (s *Server) InnerRingIndex(ctx context.Context) int { - index, err := s.statusIndex.InnerRingIndex() + index, err := s.statusIndex.InnerRingIndex(ctx) if err != nil { s.log.Error(ctx, logs.InnerringCantGetInnerRingIndex, zap.Error(err)) return -1 @@ -72,7 +72,7 @@ func (s *Server) InnerRingIndex(ctx context.Context) int { // InnerRingSize is a getter for a global size of inner ring list. This value // paired with inner ring index. func (s *Server) InnerRingSize(ctx context.Context) int { - size, err := s.statusIndex.InnerRingSize() + size, err := s.statusIndex.InnerRingSize(ctx) if err != nil { s.log.Error(ctx, logs.InnerringCantGetInnerRingSize, zap.Error(err)) return 0 @@ -84,7 +84,7 @@ func (s *Server) InnerRingSize(ctx context.Context) int { // AlphabetIndex is a getter for a global index of node in alphabet list. // Negative index means that node is not in the alphabet list. func (s *Server) AlphabetIndex(ctx context.Context) int { - index, err := s.statusIndex.AlphabetIndex() + index, err := s.statusIndex.AlphabetIndex(ctx) if err != nil { s.log.Error(ctx, logs.InnerringCantGetAlphabetIndex, zap.Error(err)) return -1 diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index f82268d1d..85652b3ae 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -279,7 +279,7 @@ func (s *containerSource) IsContainerAvailable(ctx context.Context, id cid.ID) ( return true, nil } - wasRemoved, err := container.WasRemoved(s.cs, id) + wasRemoved, err := container.WasRemoved(ctx, s.cs, id) if err != nil { return false, err } diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index fd1530f53..27eaea768 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -425,7 +425,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context } egContainer.Go(func() error { var skip bool - c, err := e.containerSource.Load().cs.Get(cnt) + c, err := e.containerSource.Load().cs.Get(ctx, cnt) if err != nil { if client.IsErrContainerNotFound(err) { skip = true diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index 248c39155..b9d7888e7 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -37,7 +37,7 @@ type containerStorage struct { latency time.Duration } -func (cs *containerStorage) Get(id cid.ID) (*coreContainer.Container, error) { +func (cs *containerStorage) Get(ctx context.Context, id cid.ID) (*coreContainer.Container, error) { time.Sleep(cs.latency) v, ok := cs.cntmap[id] if !ok { @@ -49,7 +49,7 @@ func (cs *containerStorage) Get(id cid.ID) (*coreContainer.Container, error) { return &coreCnt, nil } -func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) { +func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) { return nil, nil } diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go index 6eba58c69..4948f3424 100644 --- a/pkg/local_object_storage/metabase/upgrade.go +++ b/pkg/local_object_storage/metabase/upgrade.go @@ -360,7 +360,7 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv return nil } last = keys[len(keys)-1] - cnt, err := dropNonIndexedUserAttributeBuckets(db, cs, keys) + cnt, err := dropNonIndexedUserAttributeBuckets(ctx, db, cs, keys) if err != nil { log("deleting user attribute buckets completed with an error:", err) return err @@ -376,8 +376,8 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv } } -func dropNonIndexedUserAttributeBuckets(db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) { - keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs) +func dropNonIndexedUserAttributeBuckets(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) { + keysToDrop, err := selectUserAttributeKeysToDrop(ctx, keys, cs) if err != nil { return 0, fmt.Errorf("select non indexed user attributes: %w", err) } @@ -394,7 +394,7 @@ func dropNonIndexedUserAttributeBuckets(db *bbolt.DB, cs container.InfoProvider, return uint64(len(keysToDrop)), nil } -func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([][]byte, error) { +func selectUserAttributeKeysToDrop(ctx context.Context, keys [][]byte, cs container.InfoProvider) ([][]byte, error) { var keysToDrop [][]byte for _, key := range keys { attr, ok := attributeFromAttributeBucket(key) @@ -409,7 +409,7 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([] if !ok { return nil, fmt.Errorf("parse container ID from user attribute bucket key %s", hex.EncodeToString(key)) } - info, err := cs.Info(contID) + info, err := cs.Info(ctx, contID) if err != nil { return nil, err } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 3136ddfcc..fedde2206 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -280,7 +280,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address, var isIndexedContainer bool if hasIndexedAttribute { - info, err := s.containerInfo.Info(addr.Container()) + info, err := s.containerInfo.Info(ctx, addr.Container()) if err != nil { return err } diff --git a/pkg/morph/client/balance/balanceOf.go b/pkg/morph/client/balance/balanceOf.go index 449d49e5a..4462daab4 100644 --- a/pkg/morph/client/balance/balanceOf.go +++ b/pkg/morph/client/balance/balanceOf.go @@ -1,6 +1,7 @@ package balance import ( + "context" "fmt" "math/big" @@ -10,14 +11,14 @@ import ( // BalanceOf receives the amount of funds in the client's account // through the Balance contract call, and returns it. -func (c *Client) BalanceOf(id user.ID) (*big.Int, error) { +func (c *Client) BalanceOf(ctx context.Context, id user.ID) (*big.Int, error) { h := id.ScriptHash() invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(balanceOfMethod) invokePrm.SetArgs(h) - prms, err := c.client.TestInvoke(invokePrm) + prms, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", balanceOfMethod, err) } else if ln := len(prms); ln != 1 { diff --git a/pkg/morph/client/balance/decimals.go b/pkg/morph/client/balance/decimals.go index c2a66dded..57e61d62b 100644 --- a/pkg/morph/client/balance/decimals.go +++ b/pkg/morph/client/balance/decimals.go @@ -1,6 +1,7 @@ package balance import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -8,11 +9,11 @@ import ( // Decimals decimal precision of currency transactions // through the Balance contract call, and returns it. -func (c *Client) Decimals() (uint32, error) { +func (c *Client) Decimals(ctx context.Context) (uint32, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(decimalsMethod) - prms, err := c.client.TestInvoke(invokePrm) + prms, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { return 0, fmt.Errorf("test invoke (%s): %w", decimalsMethod, err) } else if ln := len(prms); ln != 1 { diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index 01fcc98e5..10ded5142 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -499,7 +499,7 @@ func (c *Client) TxHeight(h util.Uint256) (res uint32, err error) { // NeoFSAlphabetList returns keys that stored in NeoFS Alphabet role. Main chain // stores alphabet node keys of inner ring there, however the sidechain stores both // alphabet and non alphabet node keys of inner ring. -func (c *Client) NeoFSAlphabetList() (res keys.PublicKeys, err error) { +func (c *Client) NeoFSAlphabetList(_ context.Context) (res keys.PublicKeys, err error) { c.switchLock.RLock() defer c.switchLock.RUnlock() diff --git a/pkg/morph/client/container/containers_of.go b/pkg/morph/client/container/containers_of.go index 074a586be..60fb8ad7c 100644 --- a/pkg/morph/client/container/containers_of.go +++ b/pkg/morph/client/container/containers_of.go @@ -1,6 +1,7 @@ package container import ( + "context" "errors" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -13,7 +14,7 @@ import ( // to the specified user of FrostFS system. If idUser is nil, returns the list of all containers. // // If remote RPC does not support neo-go session API, fallback to List() method. -func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) { +func (c *Client) ContainersOf(ctx context.Context, idUser *user.ID) ([]cid.ID, error) { var cidList []cid.ID var err error @@ -21,7 +22,7 @@ func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) { cidList = append(cidList, id) return nil } - if err = c.IterateContainersOf(idUser, cb); err != nil { + if err = c.IterateContainersOf(ctx, idUser, cb); err != nil { return nil, err } return cidList, nil @@ -30,7 +31,7 @@ func (c *Client) ContainersOf(idUser *user.ID) ([]cid.ID, error) { // iterateContainers iterates over a list of container identifiers // belonging to the specified user of FrostFS system and executes // `cb` on each element. If idUser is nil, calls it on the list of all containers. -func (c *Client) IterateContainersOf(idUser *user.ID, cb func(item cid.ID) error) error { +func (c *Client) IterateContainersOf(ctx context.Context, idUser *user.ID, cb func(item cid.ID) error) error { var rawID []byte if idUser != nil { rawID = idUser.WalletBytes() @@ -59,7 +60,7 @@ func (c *Client) IterateContainersOf(idUser *user.ID, cb func(item cid.ID) error cnrHash := c.client.ContractAddress() err := c.client.Morph().TestInvokeIterator(itemCb, batchSize, cnrHash, containersOfMethod, rawID) if err != nil && errors.Is(err, unwrap.ErrNoSessionID) { - return c.iterate(idUser, cb) + return c.iterate(ctx, idUser, cb) } return err diff --git a/pkg/morph/client/container/deletion_info.go b/pkg/morph/client/container/deletion_info.go index b86e0ce9c..90bcdd7d5 100644 --- a/pkg/morph/client/container/deletion_info.go +++ b/pkg/morph/client/container/deletion_info.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/sha256" "fmt" "strings" @@ -14,27 +15,27 @@ import ( "github.com/mr-tron/base58" ) -func (x *containerSource) DeletionInfo(cnr cid.ID) (*containercore.DelInfo, error) { - return DeletionInfo((*Client)(x), cnr) +func (x *containerSource) DeletionInfo(ctx context.Context, cnr cid.ID) (*containercore.DelInfo, error) { + return DeletionInfo(ctx, (*Client)(x), cnr) } type deletionInfo interface { - DeletionInfo(cid []byte) (*containercore.DelInfo, error) + DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) } -func DeletionInfo(c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) { +func DeletionInfo(ctx context.Context, c deletionInfo, cnr cid.ID) (*containercore.DelInfo, error) { binCnr := make([]byte, sha256.Size) cnr.Encode(binCnr) - return c.DeletionInfo(binCnr) + return c.DeletionInfo(ctx, binCnr) } -func (c *Client) DeletionInfo(cid []byte) (*containercore.DelInfo, error) { +func (c *Client) DeletionInfo(ctx context.Context, cid []byte) (*containercore.DelInfo, error) { prm := client.TestInvokePrm{} prm.SetMethod(deletionInfoMethod) prm.SetArgs(cid) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { if strings.Contains(err.Error(), containerContract.NotFoundError) { return nil, new(apistatus.ContainerNotFound) diff --git a/pkg/morph/client/container/get.go b/pkg/morph/client/container/get.go index 2ab58bf01..8622d2cdd 100644 --- a/pkg/morph/client/container/get.go +++ b/pkg/morph/client/container/get.go @@ -1,6 +1,7 @@ package container import ( + "context" "crypto/sha256" "fmt" "strings" @@ -16,8 +17,8 @@ import ( type containerSource Client -func (x *containerSource) Get(cnr cid.ID) (*containercore.Container, error) { - return Get((*Client)(x), cnr) +func (x *containerSource) Get(ctx context.Context, cnr cid.ID) (*containercore.Container, error) { + return Get(ctx, (*Client)(x), cnr) } // AsContainerSource provides container Source interface @@ -27,15 +28,15 @@ func AsContainerSource(w *Client) containercore.Source { } type getContainer interface { - Get(cid []byte) (*containercore.Container, error) + Get(ctx context.Context, cid []byte) (*containercore.Container, error) } // Get marshals container ID, and passes it to Wrapper's Get method. -func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) { +func Get(ctx context.Context, c getContainer, cnr cid.ID) (*containercore.Container, error) { binCnr := make([]byte, sha256.Size) cnr.Encode(binCnr) - return c.Get(binCnr) + return c.Get(ctx, binCnr) } // Get reads the container from FrostFS system by binary identifier @@ -43,12 +44,12 @@ func Get(c getContainer, cnr cid.ID) (*containercore.Container, error) { // // If an empty slice is returned for the requested identifier, // storage.ErrNotFound error is returned. -func (c *Client) Get(cid []byte) (*containercore.Container, error) { +func (c *Client) Get(ctx context.Context, cid []byte) (*containercore.Container, error) { prm := client.TestInvokePrm{} prm.SetMethod(getMethod) prm.SetArgs(cid) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { if strings.Contains(err.Error(), containerContract.NotFoundError) { return nil, new(apistatus.ContainerNotFound) diff --git a/pkg/morph/client/container/list.go b/pkg/morph/client/container/list.go index 78ea8278f..fc63d1beb 100644 --- a/pkg/morph/client/container/list.go +++ b/pkg/morph/client/container/list.go @@ -1,6 +1,7 @@ package container import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -15,7 +16,7 @@ import ( // // Iterates through the identifiers of all FrostFS containers if pointer // to user identifier is nil. -func (c *Client) iterate(idUser *user.ID, cb func(cid.ID) error) error { +func (c *Client) iterate(ctx context.Context, idUser *user.ID, cb func(cid.ID) error) error { var rawID []byte if idUser != nil { @@ -26,7 +27,7 @@ func (c *Client) iterate(idUser *user.ID, cb func(cid.ID) error) error { prm.SetMethod(listMethod) prm.SetArgs(rawID) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { return fmt.Errorf("test invoke (%s): %w", listMethod, err) } else if ln := len(res); ln != 1 { diff --git a/pkg/morph/client/frostfsid/subject.go b/pkg/morph/client/frostfsid/subject.go index 305f3ce09..3a789672a 100644 --- a/pkg/morph/client/frostfsid/subject.go +++ b/pkg/morph/client/frostfsid/subject.go @@ -1,6 +1,7 @@ package frostfsid import ( + "context" "fmt" frostfsidclient "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" @@ -14,12 +15,12 @@ const ( methodGetSubjectExtended = "getSubjectExtended" ) -func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) { +func (c *Client) GetSubject(ctx context.Context, addr util.Uint160) (*frostfsidclient.Subject, error) { prm := client.TestInvokePrm{} prm.SetMethod(methodGetSubject) prm.SetArgs(addr) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubject, err) } @@ -37,12 +38,12 @@ func (c *Client) GetSubject(addr util.Uint160) (*frostfsidclient.Subject, error) return subj, nil } -func (c *Client) GetSubjectExtended(addr util.Uint160) (*frostfsidclient.SubjectExtended, error) { +func (c *Client) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*frostfsidclient.SubjectExtended, error) { prm := client.TestInvokePrm{} prm.SetMethod(methodGetSubjectExtended) prm.SetArgs(addr) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", methodGetSubjectExtended, err) } diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go index 2b87df6f7..fcdb70e3f 100644 --- a/pkg/morph/client/netmap/config.go +++ b/pkg/morph/client/netmap/config.go @@ -25,8 +25,8 @@ const ( // MaxObjectSize receives max object size configuration // value through the Netmap contract call. -func (c *Client) MaxObjectSize() (uint64, error) { - objectSize, err := c.readUInt64Config(MaxObjectSizeConfig) +func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) { + objectSize, err := c.readUInt64Config(ctx, MaxObjectSizeConfig) if err != nil { return 0, err } @@ -35,8 +35,8 @@ func (c *Client) MaxObjectSize() (uint64, error) { } // EpochDuration returns number of sidechain blocks per one FrostFS epoch. -func (c *Client) EpochDuration() (uint64, error) { - epochDuration, err := c.readUInt64Config(EpochDurationConfig) +func (c *Client) EpochDuration(ctx context.Context) (uint64, error) { + epochDuration, err := c.readUInt64Config(ctx, EpochDurationConfig) if err != nil { return 0, err } @@ -46,8 +46,8 @@ func (c *Client) EpochDuration() (uint64, error) { // ContainerFee returns fee paid by container owner to each alphabet node // for container registration. -func (c *Client) ContainerFee() (uint64, error) { - fee, err := c.readUInt64Config(ContainerFeeConfig) +func (c *Client) ContainerFee(ctx context.Context) (uint64, error) { + fee, err := c.readUInt64Config(ctx, ContainerFeeConfig) if err != nil { return 0, err } @@ -57,8 +57,8 @@ func (c *Client) ContainerFee() (uint64, error) { // ContainerAliasFee returns additional fee paid by container owner to each // alphabet node for container nice name registration. -func (c *Client) ContainerAliasFee() (uint64, error) { - fee, err := c.readUInt64Config(ContainerAliasFeeConfig) +func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) { + fee, err := c.readUInt64Config(ctx, ContainerAliasFeeConfig) if err != nil { return 0, err } @@ -70,14 +70,14 @@ func (c *Client) ContainerAliasFee() (uint64, error) { // settings. // // Returns (false, nil) if config key is not found in the contract. -func (c *Client) HomomorphicHashDisabled() (bool, error) { - return c.readBoolConfig(HomomorphicHashingDisabledKey) +func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) { + return c.readBoolConfig(ctx, HomomorphicHashingDisabledKey) } // InnerRingCandidateFee returns global configuration value of fee paid by // node to be in inner ring candidates list. -func (c *Client) InnerRingCandidateFee() (uint64, error) { - fee, err := c.readUInt64Config(IrCandidateFeeConfig) +func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) { + fee, err := c.readUInt64Config(ctx, IrCandidateFeeConfig) if err != nil { return 0, err } @@ -87,8 +87,8 @@ func (c *Client) InnerRingCandidateFee() (uint64, error) { // WithdrawFee returns global configuration value of fee paid by user to // withdraw assets from FrostFS contract. -func (c *Client) WithdrawFee() (uint64, error) { - fee, err := c.readUInt64Config(WithdrawFeeConfig) +func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) { + fee, err := c.readUInt64Config(ctx, WithdrawFeeConfig) if err != nil { return 0, err } @@ -101,12 +101,12 @@ func (c *Client) WithdrawFee() (uint64, error) { // that storage nodes are allowed to switch their state to "maintenance". // // By default, maintenance state is disallowed. -func (c *Client) MaintenanceModeAllowed() (bool, error) { - return c.readBoolConfig(MaintenanceModeAllowedConfig) +func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) { + return c.readBoolConfig(ctx, MaintenanceModeAllowedConfig) } -func (c *Client) readUInt64Config(key string) (uint64, error) { - v, err := c.config([]byte(key), IntegerAssert) +func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) { + v, err := c.config(ctx, []byte(key), IntegerAssert) if err != nil { return 0, fmt.Errorf("read netconfig value '%s': %w", key, err) } @@ -117,8 +117,8 @@ func (c *Client) readUInt64Config(key string) (uint64, error) { // reads boolean value by the given key from the FrostFS network configuration // stored in the Sidechain. Returns false if key is not presented. -func (c *Client) readBoolConfig(key string) (bool, error) { - v, err := c.config([]byte(key), BoolAssert) +func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) { + v, err := c.config(ctx, []byte(key), BoolAssert) if err != nil { if errors.Is(err, ErrConfigNotFound) { return false, nil @@ -199,12 +199,12 @@ type NetworkConfiguration struct { } // ReadNetworkConfiguration reads NetworkConfiguration from the FrostFS Sidechain. -func (c *Client) ReadNetworkConfiguration() (NetworkConfiguration, error) { +func (c *Client) ReadNetworkConfiguration(ctx context.Context) (NetworkConfiguration, error) { var res NetworkConfiguration prm := client.TestInvokePrm{} prm.SetMethod(configListMethod) - items, err := c.client.TestInvoke(prm) + items, err := c.client.TestInvoke(ctx, prm) if err != nil { return res, fmt.Errorf("test invoke (%s): %w", configListMethod, err) @@ -285,12 +285,12 @@ var ErrConfigNotFound = errors.New("config value not found") // method of FrostFS Netmap contract. // // Returns ErrConfigNotFound if config key is not found in the contract. -func (c *Client) config(key []byte, assert func(stackitem.Item) (any, error)) (any, error) { +func (c *Client) config(ctx context.Context, key []byte, assert func(stackitem.Item) (any, error)) (any, error) { prm := client.TestInvokePrm{} prm.SetMethod(configMethod) prm.SetArgs(key) - items, err := c.client.TestInvoke(prm) + items, err := c.client.TestInvoke(ctx, prm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", configMethod, err) diff --git a/pkg/morph/client/netmap/epoch.go b/pkg/morph/client/netmap/epoch.go index 6d7394998..8561329ec 100644 --- a/pkg/morph/client/netmap/epoch.go +++ b/pkg/morph/client/netmap/epoch.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -8,11 +9,11 @@ import ( // Epoch receives number of current FrostFS epoch // through the Netmap contract call. -func (c *Client) Epoch() (uint64, error) { +func (c *Client) Epoch(ctx context.Context) (uint64, error) { prm := client.TestInvokePrm{} prm.SetMethod(epochMethod) - items, err := c.client.TestInvoke(prm) + items, err := c.client.TestInvoke(ctx, prm) if err != nil { return 0, fmt.Errorf("test invoke (%s): %w", epochMethod, err) @@ -32,11 +33,11 @@ func (c *Client) Epoch() (uint64, error) { // LastEpochBlock receives block number of current FrostFS epoch // through the Netmap contract call. -func (c *Client) LastEpochBlock() (uint32, error) { +func (c *Client) LastEpochBlock(ctx context.Context) (uint32, error) { prm := client.TestInvokePrm{} prm.SetMethod(lastEpochBlockMethod) - items, err := c.client.TestInvoke(prm) + items, err := c.client.TestInvoke(ctx, prm) if err != nil { return 0, fmt.Errorf("test invoke (%s): %w", lastEpochBlockMethod, err) diff --git a/pkg/morph/client/netmap/innerring.go b/pkg/morph/client/netmap/innerring.go index 0cfad4c82..0e1f9186b 100644 --- a/pkg/morph/client/netmap/innerring.go +++ b/pkg/morph/client/netmap/innerring.go @@ -40,11 +40,11 @@ func (c *Client) UpdateInnerRing(ctx context.Context, p UpdateIRPrm) error { } // GetInnerRingList return current IR list. -func (c *Client) GetInnerRingList() (keys.PublicKeys, error) { +func (c *Client) GetInnerRingList(ctx context.Context) (keys.PublicKeys, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(innerRingListMethod) - prms, err := c.client.TestInvoke(invokePrm) + prms, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", innerRingListMethod, err) } diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go index a0009ea73..97782fc25 100644 --- a/pkg/morph/client/netmap/netmap.go +++ b/pkg/morph/client/netmap/netmap.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "fmt" netmapcontract "git.frostfs.info/TrueCloudLab/frostfs-contract/netmap" @@ -11,12 +12,12 @@ import ( // GetNetMapByEpoch calls "snapshotByEpoch" method with the given epoch and // decodes netmap.NetMap from the response. -func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { +func (c *Client) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(epochSnapshotMethod) invokePrm.SetArgs(epoch) - res, err := c.client.TestInvoke(invokePrm) + res, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", epochSnapshotMethod, err) @@ -34,11 +35,11 @@ func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { // GetCandidates calls "netmapCandidates" method and decodes []netmap.NodeInfo // from the response. -func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) { +func (c *Client) GetCandidates(ctx context.Context) ([]netmap.NodeInfo, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(netMapCandidatesMethod) - res, err := c.client.TestInvoke(invokePrm) + res, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", netMapCandidatesMethod, err) } @@ -51,11 +52,11 @@ func (c *Client) GetCandidates() ([]netmap.NodeInfo, error) { } // NetMap calls "netmap" method and decode netmap.NetMap from the response. -func (c *Client) NetMap() (*netmap.NetMap, error) { +func (c *Client) NetMap(ctx context.Context) (*netmap.NetMap, error) { invokePrm := client.TestInvokePrm{} invokePrm.SetMethod(netMapMethod) - res, err := c.client.TestInvoke(invokePrm) + res, err := c.client.TestInvoke(ctx, invokePrm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", netMapMethod, err) diff --git a/pkg/morph/client/netmap/snapshot.go b/pkg/morph/client/netmap/snapshot.go index a5134bcef..9dbec1a90 100644 --- a/pkg/morph/client/netmap/snapshot.go +++ b/pkg/morph/client/netmap/snapshot.go @@ -1,6 +1,7 @@ package netmap import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -8,12 +9,12 @@ import ( ) // GetNetMap calls "snapshot" method and decodes netmap.NetMap from the response. -func (c *Client) GetNetMap(diff uint64) (*netmap.NetMap, error) { +func (c *Client) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { prm := client.TestInvokePrm{} prm.SetMethod(snapshotMethod) prm.SetArgs(diff) - res, err := c.client.TestInvoke(prm) + res, err := c.client.TestInvoke(ctx, prm) if err != nil { return nil, fmt.Errorf("test invoke (%s): %w", snapshotMethod, err) } diff --git a/pkg/morph/client/static.go b/pkg/morph/client/static.go index 21adebd9e..c4eb120d2 100644 --- a/pkg/morph/client/static.go +++ b/pkg/morph/client/static.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" @@ -205,7 +206,9 @@ func (ti *TestInvokePrm) SetArgs(args ...any) { } // TestInvoke calls TestInvoke method of Client with static internal script hash. -func (s StaticClient) TestInvoke(prm TestInvokePrm) ([]stackitem.Item, error) { +func (s StaticClient) TestInvoke(ctx context.Context, prm TestInvokePrm) ([]stackitem.Item, error) { + _, span := tracing.StartSpanFromContext(ctx, "Morph.TestInvoke."+prm.method) + defer span.End() return s.client.TestInvoke( s.scScriptHash, prm.method, diff --git a/pkg/services/accounting/morph/executor.go b/pkg/services/accounting/morph/executor.go index b77d3e3e6..6c2df8428 100644 --- a/pkg/services/accounting/morph/executor.go +++ b/pkg/services/accounting/morph/executor.go @@ -21,7 +21,7 @@ func NewExecutor(client *balance.Client) accountingSvc.ServiceExecutor { } } -func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) { +func (s *morphExecutor) Balance(ctx context.Context, body *accounting.BalanceRequestBody) (*accounting.BalanceResponseBody, error) { idV2 := body.GetOwnerID() if idV2 == nil { return nil, errors.New("missing account") @@ -34,12 +34,12 @@ func (s *morphExecutor) Balance(_ context.Context, body *accounting.BalanceReque return nil, fmt.Errorf("invalid account: %w", err) } - amount, err := s.client.BalanceOf(id) + amount, err := s.client.BalanceOf(ctx, id) if err != nil { return nil, err } - balancePrecision, err := s.client.Decimals() + balancePrecision, err := s.client.Decimals(ctx) if err != nil { return nil, err } diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go index 9d8f665af..58922fede 100644 --- a/pkg/services/apemanager/executor.go +++ b/pkg/services/apemanager/executor.go @@ -78,12 +78,12 @@ var _ Server = (*Service)(nil) // validateContainerTargetRequest validates request for the container target. // It checks if request actor is the owner of the container, otherwise it denies the request. -func (s *Service) validateContainerTargetRequest(cid string, pubKey *keys.PublicKey) error { +func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string, pubKey *keys.PublicKey) error { var cidSDK cidSDK.ID if err := cidSDK.DecodeString(cid); err != nil { return fmt.Errorf("invalid CID format: %w", err) } - isOwner, err := s.isActorContainerOwner(cidSDK, pubKey) + isOwner, err := s.isActorContainerOwner(ctx, cidSDK, pubKey) if err != nil { return fmt.Errorf("failed to check owner: %w", err) } @@ -117,7 +117,7 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { case apeV2.TargetTypeContainer: reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(reqCID, pub); err != nil { + if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { return nil, err } target = policy_engine.ContainerTarget(reqCID) @@ -153,7 +153,7 @@ func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChain switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { case apeV2.TargetTypeContainer: reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(reqCID, pub); err != nil { + if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { return nil, err } target = policy_engine.ContainerTarget(reqCID) @@ -177,7 +177,7 @@ func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChain return resp, nil } -func (s *Service) ListChains(_ context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) { +func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRequest) (*apemanagerV2.ListChainsResponse, error) { pub, err := getSignaturePublicKey(req.GetVerificationHeader()) if err != nil { return nil, err @@ -188,7 +188,7 @@ func (s *Service) ListChains(_ context.Context, req *apemanagerV2.ListChainsRequ switch targetType := req.GetBody().GetTarget().GetTargetType(); targetType { case apeV2.TargetTypeContainer: reqCID := req.GetBody().GetTarget().GetName() - if err = s.validateContainerTargetRequest(reqCID, pub); err != nil { + if err = s.validateContainerTargetRequest(ctx, reqCID, pub); err != nil { return nil, err } target = policy_engine.ContainerTarget(reqCID) @@ -237,13 +237,13 @@ func getSignaturePublicKey(vh *session.RequestVerificationHeader) (*keys.PublicK return key, nil } -func (s *Service) isActorContainerOwner(cid cidSDK.ID, pk *keys.PublicKey) (bool, error) { +func (s *Service) isActorContainerOwner(ctx context.Context, cid cidSDK.ID, pk *keys.PublicKey) (bool, error) { var actor user.ID user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk)) actorOwnerID := new(refs.OwnerID) actor.WriteToV2(actorOwnerID) - cnr, err := s.cnrSrc.Get(cid) + cnr, err := s.cnrSrc.Get(ctx, cid) if err != nil { return false, fmt.Errorf("get container error: %w", err) } diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go index 86021c3db..c9b0b7363 100644 --- a/pkg/services/common/ape/checker.go +++ b/pkg/services/common/ape/checker.go @@ -1,6 +1,7 @@ package ape import ( + "context" "crypto/ecdsa" "errors" "fmt" @@ -48,7 +49,7 @@ type CheckPrm struct { // CheckCore provides methods to perform the common logic of APE check. type CheckCore interface { // CheckAPE performs the common policy-engine check logic on a prepared request. - CheckAPE(prm CheckPrm) error + CheckAPE(ctx context.Context, prm CheckPrm) error } type checkerCoreImpl struct { @@ -70,7 +71,7 @@ func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStora } // CheckAPE performs the common policy-engine check logic on a prepared request. -func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error { +func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error { var cr policyengine.ChainRouter if prm.BearerToken != nil && !prm.BearerToken.Impersonate() { var err error @@ -85,7 +86,7 @@ func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error { cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) } - groups, err := aperequest.Groups(c.FrostFSSubjectProvider, prm.PublicKey) + groups, err := aperequest.Groups(ctx, c.FrostFSSubjectProvider, prm.PublicKey) if err != nil { return fmt.Errorf("failed to get group ids: %w", err) } diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go index 2c240412b..e1fbe3960 100644 --- a/pkg/services/container/ape.go +++ b/pkg/services/container/ape.go @@ -49,11 +49,11 @@ var ( ) type ir interface { - InnerRingKeys() ([][]byte, error) + InnerRingKeys(ctx context.Context) ([][]byte, error) } type containers interface { - Get(cid.ID) (*containercore.Container, error) + Get(context.Context, cid.ID) (*containercore.Container, error) } type apeChecker struct { @@ -106,7 +106,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List") defer span.End() - role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) + role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) if err != nil { return nil, err } @@ -116,7 +116,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) if err != nil { return nil, err } @@ -126,11 +126,11 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co } } - namespace, err := ac.namespaceByOwner(req.GetBody().GetOwnerID()) + namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID()) if err != nil { return nil, fmt.Errorf("could not get owner namespace: %w", err) } - if err := ac.validateNamespaceByPublicKey(pk, namespace); err != nil { + if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil { return nil, err } @@ -143,7 +143,7 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co reqProps, ) - groups, err := aperequest.Groups(ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) if err != nil { return nil, fmt.Errorf("failed to get group ids: %w", err) } @@ -179,7 +179,7 @@ func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListSt ctx, span := tracing.StartSpanFromContext(stream.Context(), "apeChecker.ListStream") defer span.End() - role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) + role, pk, err := ac.getRoleWithoutContainerID(stream.Context(), req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) if err != nil { return err } @@ -189,7 +189,7 @@ func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListSt nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) if err != nil { return err } @@ -199,11 +199,11 @@ func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListSt } } - namespace, err := ac.namespaceByOwner(req.GetBody().GetOwnerID()) + namespace, err := ac.namespaceByOwner(ctx, req.GetBody().GetOwnerID()) if err != nil { return fmt.Errorf("could not get owner namespace: %w", err) } - if err := ac.validateNamespaceByPublicKey(pk, namespace); err != nil { + if err := ac.validateNamespaceByPublicKey(ctx, pk, namespace); err != nil { return err } @@ -216,7 +216,7 @@ func (ac *apeChecker) ListStream(req *container.ListStreamRequest, stream ListSt reqProps, ) - groups, err := aperequest.Groups(ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) if err != nil { return fmt.Errorf("failed to get group ids: %w", err) } @@ -252,7 +252,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Put") defer span.End() - role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) + role, pk, err := ac.getRoleWithoutContainerID(ctx, req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader()) if err != nil { return nil, err } @@ -262,7 +262,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) if err != nil { return nil, err } @@ -272,7 +272,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont } } - namespace, err := ac.namespaceByKnownOwner(req.GetBody().GetContainer().GetOwnerID()) + namespace, err := ac.namespaceByKnownOwner(ctx, req.GetBody().GetContainer().GetOwnerID()) if err != nil { return nil, fmt.Errorf("get namespace error: %w", err) } @@ -289,7 +289,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont reqProps, ) - groups, err := aperequest.Groups(ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) if err != nil { return nil, fmt.Errorf("failed to get group ids: %w", err) } @@ -321,7 +321,7 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont return nil, apeErr(nativeschema.MethodPutContainer, s) } -func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) { +func (ac *apeChecker) getRoleWithoutContainerID(ctx context.Context, oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) { if vh == nil { return "", nil, errMissingVerificationHeader } @@ -344,7 +344,7 @@ func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.R } pkBytes := pk.Bytes() - isIR, err := ac.isInnerRingKey(pkBytes) + isIR, err := ac.isInnerRingKey(ctx, pkBytes) if err != nil { return "", nil, err } @@ -365,7 +365,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con return err } - cont, err := ac.reader.Get(id) + cont, err := ac.reader.Get(ctx, id) if err != nil { return err } @@ -381,7 +381,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con namespace = cntNamespace } - groups, err := aperequest.Groups(ac.frostFSIDClient, pk) + groups, err := aperequest.Groups(ctx, ac.frostFSIDClient, pk) if err != nil { return fmt.Errorf("failed to get group ids: %w", err) } @@ -458,7 +458,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe if err != nil { return nil, nil, err } - role, err := ac.getRole(actor, pk, cont, cnrID) + role, err := ac.getRole(ctx, actor, pk, cont, cnrID) if err != nil { return nil, nil, err } @@ -466,7 +466,7 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()), nativeschema.PropertyKeyActorRole: role, } - reqProps, err = ac.fillWithUserClaimTags(reqProps, pk) + reqProps, err = ac.fillWithUserClaimTags(ctx, reqProps, pk) if err != nil { return nil, nil, err } @@ -478,13 +478,13 @@ func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMe return reqProps, pk, nil } -func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) { +func (ac *apeChecker) getRole(ctx context.Context, actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) { if cont.Value.Owner().Equals(*actor) { return nativeschema.PropertyValueContainerRoleOwner, nil } pkBytes := pk.Bytes() - isIR, err := ac.isInnerRingKey(pkBytes) + isIR, err := ac.isInnerRingKey(ctx, pkBytes) if err != nil { return "", err } @@ -492,7 +492,7 @@ func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containe return nativeschema.PropertyValueContainerRoleIR, nil } - isContainer, err := ac.isContainerKey(pkBytes, cnrID, cont) + isContainer, err := ac.isContainerKey(ctx, pkBytes, cnrID, cont) if err != nil { return "", err } @@ -586,8 +586,8 @@ func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { return id2.Equals(id) } -func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) { - innerRingKeys, err := ac.ir.InnerRingKeys() +func (ac *apeChecker) isInnerRingKey(ctx context.Context, pk []byte) (bool, error) { + innerRingKeys, err := ac.ir.InnerRingKeys(ctx) if err != nil { return false, err } @@ -601,11 +601,11 @@ func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) { return false, nil } -func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) { +func (ac *apeChecker) isContainerKey(ctx context.Context, pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) { binCnrID := make([]byte, sha256.Size) cnrID.Encode(binCnrID) - nm, err := netmap.GetLatestNetworkMap(ac.nm) + nm, err := netmap.GetLatestNetworkMap(ctx, ac.nm) if err != nil { return false, err } @@ -616,7 +616,7 @@ func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercor // then check previous netmap, this can happen in-between epoch change // when node migrates data from last epoch container - nm, err = netmap.GetPreviousNetworkMap(ac.nm) + nm, err = netmap.GetPreviousNetworkMap(ctx, ac.nm) if err != nil { return false, err } @@ -641,7 +641,7 @@ func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containerc return false } -func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) { +func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) (string, error) { var ownerSDK user.ID if owner == nil { return "", errOwnerIDIsNotSet @@ -652,7 +652,7 @@ func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) { addr := ownerSDK.ScriptHash() namespace := "" - subject, err := ac.frostFSIDClient.GetSubject(addr) + subject, err := ac.frostFSIDClient.GetSubject(ctx, addr) if err == nil { namespace = subject.Namespace } else { @@ -663,7 +663,7 @@ func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) { return namespace, nil } -func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error) { +func (ac *apeChecker) namespaceByKnownOwner(ctx context.Context, owner *refs.OwnerID) (string, error) { var ownerSDK user.ID if owner == nil { return "", errOwnerIDIsNotSet @@ -672,7 +672,7 @@ func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error) return "", err } addr := ownerSDK.ScriptHash() - subject, err := ac.frostFSIDClient.GetSubject(addr) + subject, err := ac.frostFSIDClient.GetSubject(ctx, addr) if err != nil { return "", fmt.Errorf("get subject error: %w", err) } @@ -706,12 +706,12 @@ func validateNamespace(cnrV2 *container.Container, ownerIDNamespace string) erro // validateNamespace validates if a namespace of a request actor equals to owner's namespace. // An actor's namespace is calculated by a public key. -func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNamespace string) error { +func (ac *apeChecker) validateNamespaceByPublicKey(ctx context.Context, pk *keys.PublicKey, ownerIDNamespace string) error { var actor user.ID user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk)) actorOwnerID := new(refs.OwnerID) actor.WriteToV2(actorOwnerID) - actorNamespace, err := ac.namespaceByOwner(actorOwnerID) + actorNamespace, err := ac.namespaceByOwner(ctx, actorOwnerID) if err != nil { return fmt.Errorf("could not get actor namespace: %w", err) } @@ -722,11 +722,11 @@ func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNa } // fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (ac *apeChecker) fillWithUserClaimTags(reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) { +func (ac *apeChecker) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) { if reqProps == nil { reqProps = make(map[string]string) } - props, err := aperequest.FormFrostfsIDRequestProperties(ac.frostFSIDClient, pk) + props, err := aperequest.FormFrostfsIDRequestProperties(ctx, ac.frostFSIDClient, pk) if err != nil { return reqProps, err } diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go index 38b240d94..77a981d1a 100644 --- a/pkg/services/container/ape_test.go +++ b/pkg/services/container/ape_test.go @@ -1092,7 +1092,7 @@ type irStub struct { keys [][]byte } -func (s *irStub) InnerRingKeys() ([][]byte, error) { +func (s *irStub) InnerRingKeys(_ context.Context) ([][]byte, error) { return s.keys, nil } @@ -1100,7 +1100,7 @@ type containerStub struct { c map[cid.ID]*containercore.Container } -func (s *containerStub) Get(id cid.ID) (*containercore.Container, error) { +func (s *containerStub) Get(_ context.Context, id cid.ID) (*containercore.Container, error) { if v, ok := s.c[id]; ok { return v, nil } @@ -1112,21 +1112,21 @@ type netmapStub struct { currentEpoch uint64 } -func (s *netmapStub) GetNetMap(diff uint64) (*netmap.NetMap, error) { +func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { if diff >= s.currentEpoch { return nil, errors.New("invalid diff") } - return s.GetNetMapByEpoch(s.currentEpoch - diff) + return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) } -func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) { +func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { if nm, found := s.netmaps[epoch]; found { return nm, nil } return nil, errors.New("netmap not found") } -func (s *netmapStub) Epoch() (uint64, error) { +func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) { return s.currentEpoch, nil } @@ -1135,7 +1135,7 @@ type frostfsidStub struct { subjectsExt map[util.Uint160]*client.SubjectExtended } -func (f *frostfsidStub) GetSubject(owner util.Uint160) (*client.Subject, error) { +func (f *frostfsidStub) GetSubject(ctx context.Context, owner util.Uint160) (*client.Subject, error) { s, ok := f.subjects[owner] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -1143,7 +1143,7 @@ func (f *frostfsidStub) GetSubject(owner util.Uint160) (*client.Subject, error) return s, nil } -func (f *frostfsidStub) GetSubjectExtended(owner util.Uint160) (*client.SubjectExtended, error) { +func (f *frostfsidStub) GetSubjectExtended(ctx context.Context, owner util.Uint160) (*client.SubjectExtended, error) { s, ok := f.subjectsExt[owner] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go index cadf92e19..eaa608eba 100644 --- a/pkg/services/container/morph/executor.go +++ b/pkg/services/container/morph/executor.go @@ -29,8 +29,8 @@ type Reader interface { // ContainersOf returns a list of container identifiers belonging // to the specified user of FrostFS system. Returns the identifiers // of all FrostFS containers if pointer to owner identifier is nil. - ContainersOf(*user.ID) ([]cid.ID, error) - IterateContainersOf(*user.ID, func(cid.ID) error) error + ContainersOf(context.Context, *user.ID) ([]cid.ID, error) + IterateContainersOf(context.Context, *user.ID, func(cid.ID) error) error } // Writer is an interface of container storage updater. @@ -133,7 +133,7 @@ func (s *morphExecutor) Delete(ctx context.Context, tokV2 *sessionV2.Token, body return new(container.DeleteResponseBody), nil } -func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) { +func (s *morphExecutor) Get(ctx context.Context, body *container.GetRequestBody) (*container.GetResponseBody, error) { idV2 := body.GetContainerID() if idV2 == nil { return nil, errors.New("missing container ID") @@ -146,7 +146,7 @@ func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) ( return nil, fmt.Errorf("invalid container ID: %w", err) } - cnr, err := s.rdr.Get(id) + cnr, err := s.rdr.Get(ctx, id) if err != nil { return nil, err } @@ -173,7 +173,7 @@ func (s *morphExecutor) Get(_ context.Context, body *container.GetRequestBody) ( return res, nil } -func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) { +func (s *morphExecutor) List(ctx context.Context, body *container.ListRequestBody) (*container.ListResponseBody, error) { idV2 := body.GetOwnerID() if idV2 == nil { return nil, errMissingUserID @@ -186,7 +186,7 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody) return nil, fmt.Errorf("invalid user ID: %w", err) } - cnrs, err := s.rdr.ContainersOf(&id) + cnrs, err := s.rdr.ContainersOf(ctx, &id) if err != nil { return nil, err } @@ -243,7 +243,7 @@ func (s *morphExecutor) ListStream(ctx context.Context, req *container.ListStrea return nil } - if err = s.rdr.IterateContainersOf(&id, processCID); err != nil { + if err = s.rdr.IterateContainersOf(ctx, &id, processCID); err != nil { return err } diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go index e2c385c6a..0509d2646 100644 --- a/pkg/services/control/ir/server/calls.go +++ b/pkg/services/control/ir/server/calls.go @@ -48,7 +48,7 @@ func (s *Server) TickEpoch(ctx context.Context, req *control.TickEpochRequest) ( resp := new(control.TickEpochResponse) resp.SetBody(new(control.TickEpochResponse_Body)) - epoch, err := s.netmapClient.Epoch() + epoch, err := s.netmapClient.Epoch(ctx) if err != nil { return nil, fmt.Errorf("getting current epoch: %w", err) } @@ -77,7 +77,7 @@ func (s *Server) RemoveNode(ctx context.Context, req *control.RemoveNodeRequest) resp := new(control.RemoveNodeResponse) resp.SetBody(new(control.RemoveNodeResponse_Body)) - nm, err := s.netmapClient.NetMap() + nm, err := s.netmapClient.NetMap(ctx) if err != nil { return nil, fmt.Errorf("getting netmap: %w", err) } @@ -138,7 +138,7 @@ func (s *Server) RemoveContainer(ctx context.Context, req *control.RemoveContain return nil, status.Error(codes.InvalidArgument, "failed to read owner: "+err.Error()) } - cids, err := s.containerClient.ContainersOf(&owner) + cids, err := s.containerClient.ContainersOf(ctx, &owner) if err != nil { return nil, fmt.Errorf("failed to get owner's containers: %w", err) } diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go index 6982d780d..bbf2cf0cc 100644 --- a/pkg/services/control/rpc.go +++ b/pkg/services/control/rpc.go @@ -1,6 +1,8 @@ package control import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/common" ) @@ -73,6 +75,7 @@ func SetNetmapStatus( // GetNetmapStatus executes ControlService.GetNetmapStatus RPC. func GetNetmapStatus( + _ context.Context, cli *client.Client, req *GetNetmapStatusRequest, opts ...client.CallOption, diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go index da5401515..7469ea74e 100644 --- a/pkg/services/control/server/evacuate_async.go +++ b/pkg/services/control/server/evacuate_async.go @@ -157,7 +157,7 @@ func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *obj return false, nil } - nodes, err := s.getContainerNodes(cid) + nodes, err := s.getContainerNodes(ctx, cid) if err != nil { return false, err } @@ -182,7 +182,7 @@ func (s *Server) replicateObject(ctx context.Context, addr oid.Address, obj *obj } func (s *Server) replicateTree(ctx context.Context, contID cid.ID, treeID string, forest pilorama.Forest) (bool, string, error) { - nodes, err := s.getContainerNodes(contID) + nodes, err := s.getContainerNodes(ctx, contID) if err != nil { return false, "", err } @@ -240,13 +240,13 @@ func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest } } -func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) { - nm, err := s.netMapSrc.GetNetMap(0) +func (s *Server) getContainerNodes(ctx context.Context, contID cid.ID) ([]netmap.NodeInfo, error) { + nm, err := s.netMapSrc.GetNetMap(ctx, 0) if err != nil { return nil, err } - c, err := s.cnrSrc.Get(contID) + c, err := s.cnrSrc.Get(ctx, contID) if err != nil { return nil, err } diff --git a/pkg/services/control/server/get_netmap_status.go b/pkg/services/control/server/get_netmap_status.go index 1c038253a..5e0496910 100644 --- a/pkg/services/control/server/get_netmap_status.go +++ b/pkg/services/control/server/get_netmap_status.go @@ -10,12 +10,12 @@ import ( ) // GetNetmapStatus gets node status in FrostFS network. -func (s *Server) GetNetmapStatus(_ context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) { +func (s *Server) GetNetmapStatus(ctx context.Context, req *control.GetNetmapStatusRequest) (*control.GetNetmapStatusResponse, error) { if err := s.isValidRequest(req); err != nil { return nil, status.Error(codes.PermissionDenied, err.Error()) } - st, epoch, err := s.nodeState.GetNetmapStatus() + st, epoch, err := s.nodeState.GetNetmapStatus(ctx) if err != nil { return nil, err } diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go index 94aa1ff5b..59d701bc6 100644 --- a/pkg/services/control/server/server.go +++ b/pkg/services/control/server/server.go @@ -52,7 +52,7 @@ type NodeState interface { // but starts local maintenance regardless of the network settings. ForceMaintenance(ctx context.Context) error - GetNetmapStatus() (control.NetmapStatus, uint64, error) + GetNetmapStatus(ctx context.Context) (control.NetmapStatus, uint64, error) } // LocalOverrideStorageDecorator interface provides methods to decorate LocalOverrideEngine diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go index 5223047df..44101a153 100644 --- a/pkg/services/netmap/executor.go +++ b/pkg/services/netmap/executor.go @@ -42,7 +42,7 @@ type NetworkInfo interface { // Dump must return recent network information in FrostFS API v2 NetworkInfo structure. // // If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset. - Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error) + Dump(context.Context, versionsdk.Version) (*netmapSDK.NetworkInfo, error) } func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server { @@ -82,7 +82,7 @@ func (s *executorSvc) LocalNodeInfo( } func (s *executorSvc) NetworkInfo( - _ context.Context, + ctx context.Context, req *netmap.NetworkInfoRequest, ) (*netmap.NetworkInfoResponse, error) { verV2 := req.GetMetaHeader().GetVersion() @@ -95,7 +95,7 @@ func (s *executorSvc) NetworkInfo( return nil, fmt.Errorf("can't read version: %w", err) } - ni, err := s.netInfo.Dump(ver) + ni, err := s.netInfo.Dump(ctx, ver) if err != nil { return nil, err } diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go index db0f13ee7..86daec6cc 100644 --- a/pkg/services/object/acl/v2/service.go +++ b/pkg/services/object/acl/v2/service.go @@ -669,13 +669,13 @@ func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.Pa } func (b Service) findRequestInfo(ctx context.Context, req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) { - cnr, err := b.containers.Get(idCnr) // fetch actual container + cnr, err := b.containers.Get(ctx, idCnr) // fetch actual container if err != nil { return info, err } if req.token != nil { - currentEpoch, err := b.nm.Epoch() + currentEpoch, err := b.nm.Epoch(ctx) if err != nil { return info, errors.New("can't fetch current epoch") } @@ -727,13 +727,13 @@ func (b Service) findRequestInfo(ctx context.Context, req MetaWithToken, idCnr c // findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert. func (b Service) findRequestInfoWithoutACLOperationAssert(ctx context.Context, req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) { - cnr, err := b.containers.Get(idCnr) // fetch actual container + cnr, err := b.containers.Get(ctx, idCnr) // fetch actual container if err != nil { return info, err } if req.token != nil { - currentEpoch, err := b.nm.Epoch() + currentEpoch, err := b.nm.Epoch(ctx) if err != nil { return info, errors.New("can't fetch current epoch") } diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go index b03261b90..3cf10eb56 100644 --- a/pkg/services/object/acl/v2/types.go +++ b/pkg/services/object/acl/v2/types.go @@ -1,9 +1,11 @@ package v2 +import "context" + // InnerRingFetcher is an interface that must provide // Inner Ring information. type InnerRingFetcher interface { // InnerRingKeys must return list of public keys of // the actual inner ring. - InnerRingKeys() ([][]byte, error) + InnerRingKeys(ctx context.Context) ([][]byte, error) } diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go index 4a3b5ba5e..ee46a6fe4 100644 --- a/pkg/services/object/ape/checker.go +++ b/pkg/services/object/ape/checker.go @@ -99,7 +99,7 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error { return err } - return c.checkerCore.CheckAPE(checkercore.CheckPrm{ + return c.checkerCore.CheckAPE(ctx, checkercore.CheckPrm{ Request: r, PublicKey: pub, Namespace: prm.Namespace, diff --git a/pkg/services/object/ape/checker_test.go b/pkg/services/object/ape/checker_test.go index e03b5750c..97eb2b2d7 100644 --- a/pkg/services/object/ape/checker_test.go +++ b/pkg/services/object/ape/checker_test.go @@ -219,7 +219,7 @@ func scriptHashFromSenderKey(t *testing.T, senderKey string) util.Uint160 { return pk.GetScriptHash() } -func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) { +func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) { v, ok := f.subjects[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -227,7 +227,7 @@ func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, e return v, nil } -func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) { +func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) { v, ok := f.subjectsExtended[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -619,21 +619,21 @@ type netmapStub struct { currentEpoch uint64 } -func (s *netmapStub) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) { +func (s *netmapStub) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { if diff >= s.currentEpoch { return nil, errors.New("invalid diff") } - return s.GetNetMapByEpoch(s.currentEpoch - diff) + return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) } -func (s *netmapStub) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) { +func (s *netmapStub) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { if nm, found := s.netmaps[epoch]; found { return nm, nil } return nil, errors.New("netmap not found") } -func (s *netmapStub) Epoch() (uint64, error) { +func (s *netmapStub) Epoch(ctx context.Context) (uint64, error) { return s.currentEpoch, nil } @@ -641,14 +641,14 @@ type testContainerSource struct { containers map[cid.ID]*container.Container } -func (s *testContainerSource) Get(cnrID cid.ID) (*container.Container, error) { +func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container.Container, error) { if cnr, found := s.containers[cnrID]; found { return cnr, nil } return nil, fmt.Errorf("container not found") } -func (s *testContainerSource) DeletionInfo(cid.ID) (*container.DelInfo, error) { +func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { return nil, nil } diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go index cb9bbf1b8..001a5f71e 100644 --- a/pkg/services/object/ape/request.go +++ b/pkg/services/object/ape/request.go @@ -140,7 +140,7 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re reqProps[xheadKey] = xhead.GetValue() } - reqProps, err = c.fillWithUserClaimTags(reqProps, prm) + reqProps, err = c.fillWithUserClaimTags(ctx, reqProps, prm) if err != nil { return defaultRequest, err } @@ -177,7 +177,7 @@ func (c *checkerImpl) fillHeaderWithECParent(ctx context.Context, prm Prm, heade return nil, fmt.Errorf("EC parent object ID format error: %w", err) } // only container node have access to collect parent object - contNode, err := c.currentNodeIsContainerNode(prm.Container) + contNode, err := c.currentNodeIsContainerNode(ctx, prm.Container) if err != nil { return nil, fmt.Errorf("check container node status: %w", err) } @@ -200,13 +200,13 @@ func isLogicalError(err error) bool { return errors.As(err, &errObjRemoved) || errors.As(err, &errObjNotFound) } -func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) { - cnr, err := c.cnrSource.Get(cnrID) +func (c *checkerImpl) currentNodeIsContainerNode(ctx context.Context, cnrID cid.ID) (bool, error) { + cnr, err := c.cnrSource.Get(ctx, cnrID) if err != nil { return false, err } - nm, err := netmap.GetLatestNetworkMap(c.nm) + nm, err := netmap.GetLatestNetworkMap(ctx, c.nm) if err != nil { return false, err } @@ -220,7 +220,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) { return true, nil } - nm, err = netmap.GetPreviousNetworkMap(c.nm) + nm, err = netmap.GetPreviousNetworkMap(ctx, c.nm) if err != nil { return false, err } @@ -229,7 +229,7 @@ func (c *checkerImpl) currentNodeIsContainerNode(cnrID cid.ID) (bool, error) { } // fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (c *checkerImpl) fillWithUserClaimTags(reqProps map[string]string, prm Prm) (map[string]string, error) { +func (c *checkerImpl) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, prm Prm) (map[string]string, error) { if reqProps == nil { reqProps = make(map[string]string) } @@ -237,7 +237,7 @@ func (c *checkerImpl) fillWithUserClaimTags(reqProps map[string]string, prm Prm) if err != nil { return nil, err } - props, err := aperequest.FormFrostfsIDRequestProperties(c.frostFSIDClient, pk) + props, err := aperequest.FormFrostfsIDRequestProperties(ctx, c.frostFSIDClient, pk) if err != nil { return reqProps, err } diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go index 9e0f49297..b2ae79dbc 100644 --- a/pkg/services/object/common/target/target.go +++ b/pkg/services/object/common/target/target.go @@ -1,6 +1,7 @@ package target import ( + "context" "errors" "fmt" @@ -13,20 +14,20 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" ) -func New(prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) { +func New(ctx context.Context, prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) { // prepare needed put parameters - if err := preparePrm(&prm); err != nil { + if err := preparePrm(ctx, &prm); err != nil { return nil, fmt.Errorf("could not prepare put parameters: %w", err) } if prm.Header.Signature() != nil { - return newUntrustedTarget(&prm) + return newUntrustedTarget(ctx, &prm) } - return newTrustedTarget(&prm) + return newTrustedTarget(ctx, &prm) } -func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { - maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize() +func newUntrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { + maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx) if maxPayloadSz == 0 { return nil, errors.New("could not obtain max object size parameter") } @@ -48,9 +49,9 @@ func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWrit }, nil } -func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { +func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) { prm.Relay = nil // do not relay request without signature - maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize() + maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize(ctx) if maxPayloadSz == 0 { return nil, errors.New("could not obtain max object size parameter") } @@ -111,11 +112,11 @@ func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter }, nil } -func preparePrm(prm *objectwriter.Params) error { +func preparePrm(ctx context.Context, prm *objectwriter.Params) error { var err error // get latest network map - nm, err := netmap.GetLatestNetworkMap(prm.Config.NetmapSource) + nm, err := netmap.GetLatestNetworkMap(ctx, prm.Config.NetmapSource) if err != nil { return fmt.Errorf("could not get latest network map: %w", err) } @@ -126,7 +127,7 @@ func preparePrm(prm *objectwriter.Params) error { } // get container to store the object - cnrInfo, err := prm.Config.ContainerSource.Get(idCnr) + cnrInfo, err := prm.Config.ContainerSource.Get(ctx, idCnr) if err != nil { return fmt.Errorf("could not get container by ID: %w", err) } diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go index 3b68efab4..dae168baf 100644 --- a/pkg/services/object/common/writer/common.go +++ b/pkg/services/object/common/writer/common.go @@ -29,7 +29,7 @@ func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator { } func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error { - traverser, err := placement.NewTraverser(n.Traversal.Opts...) + traverser, err := placement.NewTraverser(ctx, n.Traversal.Opts...) if err != nil { return fmt.Errorf("could not create object placement traverser: %w", err) } diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index 94bcf6a32..8f269ec21 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -85,7 +85,7 @@ func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error } func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, bool, error) { - currentNodeIsContainerNode, err := e.currentNodeIsContainerNode() + currentNodeIsContainerNode, err := e.currentNodeIsContainerNode(ctx) if err != nil { return false, false, err } @@ -108,8 +108,8 @@ func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.O return true, currentNodeIsContainerNode, nil } -func (e *ECWriter) currentNodeIsContainerNode() (bool, error) { - t, err := placement.NewTraverser(e.PlacementOpts...) +func (e *ECWriter) currentNodeIsContainerNode(ctx context.Context) (bool, error) { + t, err := placement.NewTraverser(ctx, e.PlacementOpts...) if err != nil { return false, err } @@ -128,7 +128,7 @@ func (e *ECWriter) currentNodeIsContainerNode() (bool, error) { } func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error { - t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...) + t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...) if err != nil { return err } @@ -180,7 +180,7 @@ func (e *ECWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error return e.writePartLocal(ctx, obj) } - t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...) + t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...) if err != nil { return err } @@ -217,7 +217,7 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er } partsProcessed := make([]atomic.Bool, len(parts)) objID, _ := obj.ID() - t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...) + t, err := placement.NewTraverser(ctx, append(e.PlacementOpts, placement.ForObject(objID))...) if err != nil { return err } diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go index 8ad7e641a..b7764661f 100644 --- a/pkg/services/object/common/writer/ec_test.go +++ b/pkg/services/object/common/writer/ec_test.go @@ -39,7 +39,7 @@ type testPlacementBuilder struct { vectors [][]netmap.NodeInfo } -func (p *testPlacementBuilder) BuildPlacement(_ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) ( +func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, _ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) ( [][]netmap.NodeInfo, error, ) { arr := slices.Clone(p.vectors[0]) diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go index 0e4c4d9c6..adaf1945b 100644 --- a/pkg/services/object/common/writer/writer.go +++ b/pkg/services/object/common/writer/writer.go @@ -24,7 +24,7 @@ type MaxSizeSource interface { // of physically stored object in system. // // Must return 0 if value can not be obtained. - MaxObjectSize() uint64 + MaxObjectSize(context.Context) uint64 } type ClientConstructor interface { @@ -32,7 +32,7 @@ type ClientConstructor interface { } type InnerRing interface { - InnerRingKeys() ([][]byte, error) + InnerRingKeys(ctx context.Context) ([][]byte, error) } type FormatValidatorConfig interface { diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go index a53299480..e0a7e1da6 100644 --- a/pkg/services/object/get/assemblerec.go +++ b/pkg/services/object/get/assemblerec.go @@ -125,7 +125,7 @@ func (a *assemblerec) reconstructObject(ctx context.Context, writer ObjectWriter func (a *assemblerec) reconstructObjectFromParts(ctx context.Context, headers bool) (*objectSDK.Object, error) { objID := a.addr.Object() - trav, cnr, err := a.traverserGenerator.GenerateTraverser(a.addr.Container(), &objID, a.epoch) + trav, cnr, err := a.traverserGenerator.GenerateTraverser(ctx, a.addr.Container(), &objID, a.epoch) if err != nil { return nil, err } diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go index 6827018dc..3efc72065 100644 --- a/pkg/services/object/get/get_test.go +++ b/pkg/services/object/get/get_test.go @@ -63,7 +63,7 @@ type testClient struct { type testEpochReceiver uint64 -func (e testEpochReceiver) Epoch() (uint64, error) { +func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) { return uint64(e), nil } @@ -79,7 +79,7 @@ func newTestStorage() *testStorage { } } -func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) { +func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, cnr cid.ID, obj *oid.ID, e uint64) (*placement.Traverser, *containerCore.Container, error) { opts := make([]placement.Option, 0, 4) opts = append(opts, placement.ForContainer(g.c), @@ -91,13 +91,13 @@ func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e ui opts = append(opts, placement.ForObject(*obj)) } - t, err := placement.NewTraverser(opts...) + t, err := placement.NewTraverser(context.Background(), opts...) return t, &containerCore.Container{ Value: g.c, }, err } -func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { var addr oid.Address addr.SetContainer(cnr) diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go index 599a6f176..83ef54744 100644 --- a/pkg/services/object/get/getrangeec_test.go +++ b/pkg/services/object/get/getrangeec_test.go @@ -28,14 +28,14 @@ type containerStorage struct { cnt *container.Container } -func (cs *containerStorage) Get(cid.ID) (*coreContainer.Container, error) { +func (cs *containerStorage) Get(context.Context, cid.ID) (*coreContainer.Container, error) { coreCnt := coreContainer.Container{ Value: *cs.cnt, } return &coreCnt, nil } -func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) { +func (cs *containerStorage) DeletionInfo(context.Context, cid.ID) (*coreContainer.DelInfo, error) { return nil, nil } diff --git a/pkg/services/object/get/remote_getter.go b/pkg/services/object/get/remote_getter.go index 0df67dec9..2c64244cf 100644 --- a/pkg/services/object/get/remote_getter.go +++ b/pkg/services/object/get/remote_getter.go @@ -30,7 +30,7 @@ func (g *RemoteGetter) Get(ctx context.Context, prm RemoteGetPrm) (*objectSDK.Ob if err != nil { return nil, err } - epoch, err := g.es.Epoch() + epoch, err := g.es.Epoch(ctx) if err != nil { return nil, err } diff --git a/pkg/services/object/get/request.go b/pkg/services/object/get/request.go index be0950c60..268080486 100644 --- a/pkg/services/object/get/request.go +++ b/pkg/services/object/get/request.go @@ -122,7 +122,7 @@ func (r *request) initEpoch(ctx context.Context) bool { return true } - e, err := r.epochSource.Epoch() + e, err := r.epochSource.Epoch(ctx) switch { default: @@ -141,7 +141,7 @@ func (r *request) initEpoch(ctx context.Context) bool { func (r *request) generateTraverser(ctx context.Context, addr oid.Address) (*placement.Traverser, bool) { obj := addr.Object() - t, _, err := r.traverserGenerator.GenerateTraverser(addr.Container(), &obj, r.curProcEpoch) + t, _, err := r.traverserGenerator.GenerateTraverser(ctx, addr.Container(), &obj, r.curProcEpoch) switch { default: diff --git a/pkg/services/object/get/types.go b/pkg/services/object/get/types.go index 9669afdba..664366d1b 100644 --- a/pkg/services/object/get/types.go +++ b/pkg/services/object/get/types.go @@ -20,11 +20,11 @@ import ( ) type epochSource interface { - Epoch() (uint64, error) + Epoch(ctx context.Context) (uint64, error) } type traverserGenerator interface { - GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) + GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) } type keyStorage interface { diff --git a/pkg/services/object/get/v2/get_range_hash.go b/pkg/services/object/get/v2/get_range_hash.go index 7d26a38c3..308ccd512 100644 --- a/pkg/services/object/get/v2/get_range_hash.go +++ b/pkg/services/object/get/v2/get_range_hash.go @@ -22,7 +22,7 @@ import ( // GetRangeHash calls internal service and returns v2 response. func (s *Service) GetRangeHash(ctx context.Context, req *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { - forward, err := s.needToForwardGetRangeHashRequest(req) + forward, err := s.needToForwardGetRangeHashRequest(ctx, req) if err != nil { return nil, err } @@ -48,7 +48,7 @@ type getRangeForwardParams struct { address oid.Address } -func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) { +func (s *Service) needToForwardGetRangeHashRequest(ctx context.Context, req *objectV2.GetRangeHashRequest) (getRangeForwardParams, error) { if req.GetMetaHeader().GetTTL() <= 1 { return getRangeForwardParams{}, nil } @@ -66,17 +66,17 @@ func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashReq } result.address = addr - cont, err := s.contSource.Get(addr.Container()) + cont, err := s.contSource.Get(ctx, addr.Container()) if err != nil { return result, fmt.Errorf("(%T) could not get container: %w", s, err) } - epoch, err := s.netmapSource.Epoch() + epoch, err := s.netmapSource.Epoch(ctx) if err != nil { return result, fmt.Errorf("(%T) could not get epoch: %w", s, err) } - nm, err := s.netmapSource.GetNetMapByEpoch(epoch) + nm, err := s.netmapSource.GetNetMapByEpoch(ctx, epoch) if err != nil { return result, fmt.Errorf("(%T) could not get netmap: %w", s, err) } @@ -84,7 +84,7 @@ func (s *Service) needToForwardGetRangeHashRequest(req *objectV2.GetRangeHashReq builder := placement.NewNetworkMapBuilder(nm) objectID := addr.Object() - nodesVector, err := builder.BuildPlacement(addr.Container(), &objectID, cont.Value.PlacementPolicy()) + nodesVector, err := builder.BuildPlacement(ctx, addr.Container(), &objectID, cont.Value.PlacementPolicy()) if err != nil { return result, fmt.Errorf("(%T) could not build object placement: %w", s, err) } diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go index 91b4efdc1..5aba13f66 100644 --- a/pkg/services/object/patch/streamer.go +++ b/pkg/services/object/patch/streamer.go @@ -112,7 +112,7 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error { } oV2.GetHeader().SetOwnerID(ownerID) - target, err := target.New(objectwriter.Params{ + target, err := target.New(ctx, objectwriter.Params{ Config: s.Config, Common: commonPrm, Header: objectSDK.NewFromV2(oV2), diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go index fec50b1d9..f3c2dca1a 100644 --- a/pkg/services/object/put/single.go +++ b/pkg/services/object/put/single.go @@ -86,7 +86,7 @@ func (s *Service) PutSingle(ctx context.Context, req *objectAPI.PutSingleRequest } func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) { - if err := s.validarePutSingleSize(obj); err != nil { + if err := s.validarePutSingleSize(ctx, obj); err != nil { return object.ContentMeta{}, err } @@ -97,12 +97,12 @@ func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object) return s.validatePutSingleObject(ctx, obj) } -func (s *Service) validarePutSingleSize(obj *objectSDK.Object) error { +func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Object) error { if uint64(len(obj.Payload())) != obj.PayloadSize() { return target.ErrWrongPayloadSize } - maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize() + maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize(ctx) if obj.PayloadSize() > maxAllowedSize { return target.ErrExceedingMaxSize } @@ -153,7 +153,7 @@ func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Ob func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { localOnly := req.GetMetaHeader().GetTTL() <= 1 - placement, err := s.getPutSinglePlacementOptions(obj, req.GetBody().GetCopiesNumber(), localOnly) + placement, err := s.getPutSinglePlacementOptions(ctx, obj, req.GetBody().GetCopiesNumber(), localOnly) if err != nil { return err } @@ -218,14 +218,14 @@ type putSinglePlacement struct { resetSuccessAfterOnBroadcast bool } -func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) { +func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectSDK.Object, copiesNumber []uint32, localOnly bool) (putSinglePlacement, error) { var result putSinglePlacement cnrID, ok := obj.ContainerID() if !ok { return result, errors.New("missing container ID") } - cnrInfo, err := s.Config.ContainerSource.Get(cnrID) + cnrInfo, err := s.Config.ContainerSource.Get(ctx, cnrID) if err != nil { return result, fmt.Errorf("could not get container by ID: %w", err) } @@ -249,7 +249,7 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb } result.placementOptions = append(result.placementOptions, placement.ForObject(objID)) - latestNetmap, err := netmap.GetLatestNetworkMap(s.Config.NetmapSource) + latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.Config.NetmapSource) if err != nil { return result, fmt.Errorf("could not get latest network map: %w", err) } diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go index f71309d31..19768b7fa 100644 --- a/pkg/services/object/put/streamer.go +++ b/pkg/services/object/put/streamer.go @@ -36,7 +36,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error { } var err error - p.target, err = target.New(prmTarget) + p.target, err = target.New(ctx, prmTarget) if err != nil { return fmt.Errorf("(%T) could not initialize object target: %w", p, err) } diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go index 1cd10cd7f..f0c648187 100644 --- a/pkg/services/object/put/v2/streamer.go +++ b/pkg/services/object/put/v2/streamer.go @@ -56,7 +56,7 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error) s.saveChunks = v.GetSignature() != nil if s.saveChunks { - maxSz := s.stream.MaxSizeSrc.MaxObjectSize() + maxSz := s.stream.MaxSizeSrc.MaxObjectSize(ctx) s.sizes = &sizes{ payloadSz: v.GetHeader().GetPayloadLength(), diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go index e82f999cf..60d469b11 100644 --- a/pkg/services/object/search/container.go +++ b/pkg/services/object/search/container.go @@ -20,7 +20,7 @@ func (exec *execCtx) executeOnContainer(ctx context.Context) error { ) // initialize epoch number - if err := exec.initEpoch(); err != nil { + if err := exec.initEpoch(ctx); err != nil { return fmt.Errorf("%s: %w", logs.CouldNotGetCurrentEpochNumber, err) } @@ -48,7 +48,7 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { zap.Uint64("number", exec.curProcEpoch), ) - traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(exec.containerID(), nil, exec.curProcEpoch) + traverser, _, err := exec.svc.traverserGenerator.GenerateTraverser(ctx, exec.containerID(), nil, exec.curProcEpoch) if err != nil { return fmt.Errorf("%s: %w", logs.SearchCouldNotGenerateContainerTraverser, err) } @@ -114,9 +114,9 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error { return nil } -func (exec *execCtx) getContainer() (containerSDK.Container, error) { +func (exec *execCtx) getContainer(ctx context.Context) (containerSDK.Container, error) { cnrID := exec.containerID() - cnr, err := exec.svc.containerSource.Get(cnrID) + cnr, err := exec.svc.containerSource.Get(ctx, cnrID) if err != nil { return containerSDK.Container{}, err } diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go index eb9635f14..ced51ecce 100644 --- a/pkg/services/object/search/exec.go +++ b/pkg/services/object/search/exec.go @@ -1,6 +1,8 @@ package searchsvc import ( + "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -48,13 +50,13 @@ func (exec *execCtx) netmapLookupDepth() uint64 { return exec.prm.common.NetmapLookupDepth() } -func (exec *execCtx) initEpoch() error { +func (exec *execCtx) initEpoch(ctx context.Context) error { exec.curProcEpoch = exec.netmapEpoch() if exec.curProcEpoch > 0 { return nil } - e, err := exec.svc.currentEpochReceiver.Epoch() + e, err := exec.svc.currentEpochReceiver.Epoch(ctx) if err != nil { return err } diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go index 05643eb2b..918ad421f 100644 --- a/pkg/services/object/search/search_test.go +++ b/pkg/services/object/search/search_test.go @@ -59,7 +59,7 @@ type simpleIDWriter struct { type testEpochReceiver uint64 -func (e testEpochReceiver) Epoch() (uint64, error) { +func (e testEpochReceiver) Epoch(ctx context.Context) (uint64, error) { return uint64(e), nil } @@ -82,8 +82,8 @@ func newTestStorage() *testStorage { } } -func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) { - t, err := placement.NewTraverser( +func (g *testTraverserGenerator) GenerateTraverser(ctx context.Context, _ cid.ID, _ *oid.ID, epoch uint64) (*placement.Traverser, *containerCore.Container, error) { + t, err := placement.NewTraverser(context.Background(), placement.ForContainer(g.c), placement.UseBuilder(g.b[epoch]), placement.WithoutSuccessTracking(), @@ -91,7 +91,7 @@ func (g *testTraverserGenerator) GenerateTraverser(_ cid.ID, _ *oid.ID, epoch ui return t, &containerCore.Container{Value: g.c}, err } -func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (p *testPlacementBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { var addr oid.Address addr.SetContainer(cnr) diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go index 77d25357a..e1aeca957 100644 --- a/pkg/services/object/search/service.go +++ b/pkg/services/object/search/service.go @@ -46,11 +46,11 @@ type cfg struct { } traverserGenerator interface { - GenerateTraverser(cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) + GenerateTraverser(context.Context, cid.ID, *oid.ID, uint64) (*placement.Traverser, *container.Container, error) } currentEpochReceiver interface { - Epoch() (uint64, error) + Epoch(ctx context.Context) (uint64, error) } keyStore *util.KeyStorage diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go index 910384a0b..fed168187 100644 --- a/pkg/services/object/search/util.go +++ b/pkg/services/object/search/util.go @@ -113,7 +113,7 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c } func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) { - cnr, err := exec.getContainer() + cnr, err := exec.getContainer(ctx) if err != nil { return nil, err } diff --git a/pkg/services/object/util/placement.go b/pkg/services/object/util/placement.go index 1bd39f9ea..195944f92 100644 --- a/pkg/services/object/util/placement.go +++ b/pkg/services/object/util/placement.go @@ -1,6 +1,7 @@ package util import ( + "context" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" @@ -43,8 +44,8 @@ func NewLocalPlacement(b placement.Builder, s netmap.AnnouncedKeys) placement.Bu } } -func (p *localPlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - vs, err := p.builder.BuildPlacement(cnr, obj, policy) +func (p *localPlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { + vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy) if err != nil { return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err) } @@ -76,8 +77,8 @@ func NewRemotePlacementBuilder(b placement.Builder, s netmap.AnnouncedKeys) plac } } -func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - vs, err := p.builder.BuildPlacement(cnr, obj, policy) +func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { + vs, err := p.builder.BuildPlacement(ctx, cnr, obj, policy) if err != nil { return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err) } @@ -122,15 +123,15 @@ func (g *TraverserGenerator) WithTraverseOptions(opts ...placement.Option) *Trav // GenerateTraverser generates placement Traverser for provided object address // using epoch-th network map. -func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) { +func (g *TraverserGenerator) GenerateTraverser(ctx context.Context, idCnr cid.ID, idObj *oid.ID, epoch uint64) (*placement.Traverser, *container.Container, error) { // get network map by epoch - nm, err := g.netMapSrc.GetNetMapByEpoch(epoch) + nm, err := g.netMapSrc.GetNetMapByEpoch(ctx, epoch) if err != nil { return nil, nil, fmt.Errorf("could not get network map #%d: %w", epoch, err) } // get container related container - cnr, err := g.cnrSrc.Get(idCnr) + cnr, err := g.cnrSrc.Get(ctx, idCnr) if err != nil { return nil, nil, fmt.Errorf("could not get container: %w", err) } @@ -160,7 +161,7 @@ func (g *TraverserGenerator) GenerateTraverser(idCnr cid.ID, idObj *oid.ID, epoc ) } - t, err := placement.NewTraverser(traverseOpts...) + t, err := placement.NewTraverser(ctx, traverseOpts...) if err != nil { return nil, nil, err } diff --git a/pkg/services/object_manager/placement/netmap.go b/pkg/services/object_manager/placement/netmap.go index 1782e27ea..b3f8d9c03 100644 --- a/pkg/services/object_manager/placement/netmap.go +++ b/pkg/services/object_manager/placement/netmap.go @@ -1,6 +1,7 @@ package placement import ( + "context" "crypto/sha256" "fmt" @@ -35,12 +36,12 @@ func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder { } } -func (s *netMapSrc) GetNetMap(_ uint64) (*netmapSDK.NetMap, error) { +func (s *netMapSrc) GetNetMap(_ context.Context, _ uint64) (*netmapSDK.NetMap, error) { return s.nm, nil } -func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { - nm, err := netmap.GetLatestNetworkMap(b.nmSrc) +func (b *netMapBuilder) BuildPlacement(ctx context.Context, cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) { + nm, err := netmap.GetLatestNetworkMap(ctx, b.nmSrc) if err != nil { return nil, fmt.Errorf("could not get network map: %w", err) } diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go index 8daf38217..efa4a5b06 100644 --- a/pkg/services/object_manager/placement/traverser.go +++ b/pkg/services/object_manager/placement/traverser.go @@ -1,6 +1,7 @@ package placement import ( + "context" "errors" "fmt" "slices" @@ -21,7 +22,7 @@ type Builder interface { // // Must return all container nodes if object identifier // is nil. - BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) + BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) } type NodeState interface { @@ -78,7 +79,7 @@ func defaultCfg() *cfg { } // NewTraverser creates, initializes with options and returns Traverser instance. -func NewTraverser(opts ...Option) (*Traverser, error) { +func NewTraverser(ctx context.Context, opts ...Option) (*Traverser, error) { cfg := defaultCfg() for i := range opts { @@ -98,7 +99,7 @@ func NewTraverser(opts ...Option) (*Traverser, error) { return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilPolicy) } - ns, err := cfg.builder.BuildPlacement(cfg.cnr, cfg.obj, cfg.policy) + ns, err := cfg.builder.BuildPlacement(ctx, cfg.cnr, cfg.obj, cfg.policy) if err != nil { return nil, fmt.Errorf("could not build placement: %w", err) } diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go index 624efb007..9c825bf19 100644 --- a/pkg/services/object_manager/placement/traverser_test.go +++ b/pkg/services/object_manager/placement/traverser_test.go @@ -1,6 +1,7 @@ package placement import ( + "context" "slices" "strconv" "testing" @@ -18,7 +19,7 @@ type testBuilder struct { vectors [][]netmap.NodeInfo } -func (b testBuilder) BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (b testBuilder) BuildPlacement(context.Context, cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { return b.vectors, nil } @@ -102,7 +103,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodesCopy := copyVectors(nodes) - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{vectors: nodesCopy}), WithoutSuccessTracking(), @@ -131,7 +132,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodesCopy := copyVectors(nodes) - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -160,7 +161,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodesCopy := copyVectors(nodes) - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{vectors: nodesCopy}), ) @@ -201,7 +202,7 @@ func TestTraverserObjectScenarios(t *testing.T) { nodes, cnr := testPlacement(selectors, replicas) - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: [][]netmap.NodeInfo{{nodes[1][1]}}, // single node (local) @@ -276,7 +277,7 @@ func TestTraverserRemValues(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{vectors: nodesCopy}), WithCopyNumbers(testCase.copyNumbers), @@ -322,7 +323,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { m := []Metric{NewAttributeMetric("ClusterName")} - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -374,7 +375,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { m := []Metric{NewAttributeMetric("ClusterName")} - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -445,7 +446,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { NewAttributeMetric("UN-LOCODE"), } - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -483,7 +484,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { nodesCopy = copyVectors(nodes) - tr, err = NewTraverser( + tr, err = NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -516,7 +517,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { nodesCopy = copyVectors(nodes) - tr, err = NewTraverser( + tr, err = NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, @@ -567,7 +568,7 @@ func TestTraverserPriorityMetrics(t *testing.T) { m := []Metric{NewAttributeMetric("ClusterName")} - tr, err := NewTraverser( + tr, err := NewTraverser(context.Background(), ForContainer(cnr), UseBuilder(&testBuilder{ vectors: nodesCopy, diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index 7df372476..dcaaec0b4 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -28,10 +28,10 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er )) defer span.End() - cnr, err := p.cnrSrc.Get(objInfo.Address.Container()) + cnr, err := p.cnrSrc.Get(ctx, objInfo.Address.Container()) if err != nil { if client.IsErrContainerNotFound(err) { - existed, errWasRemoved := containercore.WasRemoved(p.cnrSrc, objInfo.Address.Container()) + existed, errWasRemoved := containercore.WasRemoved(ctx, p.cnrSrc, objInfo.Address.Container()) if errWasRemoved != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotConfirmContainerRemoval, errWasRemoved) } else if existed { @@ -56,7 +56,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er func (p *Policer) processRepContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { idObj := objInfo.Address.Object() idCnr := objInfo.Address.Container() - nn, err := p.placementBuilder.BuildPlacement(idCnr, &idObj, policy) + nn, err := p.placementBuilder.BuildPlacement(ctx, idCnr, &idObj, policy) if err != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) } diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go index f6d3b9ea1..1ee31d480 100644 --- a/pkg/services/policer/ec.go +++ b/pkg/services/policer/ec.go @@ -39,7 +39,7 @@ func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectco // All of them must be stored on all of the container nodes. func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error { objID := objInfo.Address.Object() - nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objID, policy) + nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objID, policy) if err != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) } @@ -69,7 +69,7 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec } func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error { - nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy()) + nn, err := p.placementBuilder.BuildPlacement(ctx, objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy()) if err != nil { return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err) } diff --git a/pkg/services/policer/ec_test.go b/pkg/services/policer/ec_test.go index e230153f9..c6980536b 100644 --- a/pkg/services/policer/ec_test.go +++ b/pkg/services/policer/ec_test.go @@ -36,7 +36,7 @@ func TestECChunkHasValidPlacement(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(chunkAddress.Container()) { return cnr, nil } @@ -123,7 +123,7 @@ func TestECChunkHasInvalidPlacement(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(chunkAddress.Container()) { return cnr, nil } @@ -448,7 +448,7 @@ func TestECChunkRestore(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(parentAddress.Container()) { return cnr, nil } @@ -599,7 +599,7 @@ func TestECChunkRestoreNodeOff(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(parentAddress.Container()) { return cnr, nil } diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go index 9b9ab99ac..cef4c36d9 100644 --- a/pkg/services/policer/policer_test.go +++ b/pkg/services/policer/policer_test.go @@ -36,10 +36,10 @@ func TestBuryObjectWithoutContainer(t *testing.T) { // Container source and bury function buryCh := make(chan oid.Address) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { return &container.DelInfo{}, nil }, } @@ -239,14 +239,14 @@ func TestProcessObject(t *testing.T) { cnr.Value.Init() cnr.Value.SetPlacementPolicy(policy) containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { if id.Equals(addr.Container()) { return cnr, nil } t.Errorf("unexpected container requested: got=%v, want=%v", id, addr.Container()) return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { return &container.DelInfo{}, nil }, } @@ -304,10 +304,10 @@ func TestProcessObjectError(t *testing.T) { cnr := &container.Container{} cnr.Value.Init() source := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { return nil, new(apistatus.ContainerNotFound) }, } @@ -352,10 +352,10 @@ func TestIteratorContract(t *testing.T) { } containerSrc := containerSrc{ - get: func(id cid.ID) (*container.Container, error) { + get: func(ctx context.Context, id cid.ID) (*container.Container, error) { return nil, new(apistatus.ContainerNotFound) }, - deletionInfo: func(id cid.ID) (*container.DelInfo, error) { + deletionInfo: func(ctx context.Context, id cid.ID) (*container.DelInfo, error) { return &container.DelInfo{}, nil }, } @@ -444,18 +444,22 @@ func (it *sliceKeySpaceIterator) Rewind() { } type containerSrc struct { - get func(id cid.ID) (*container.Container, error) - deletionInfo func(id cid.ID) (*container.DelInfo, error) + get func(ctx context.Context, id cid.ID) (*container.Container, error) + deletionInfo func(ctx context.Context, id cid.ID) (*container.DelInfo, error) } -func (f containerSrc) Get(id cid.ID) (*container.Container, error) { return f.get(id) } +func (f containerSrc) Get(ctx context.Context, id cid.ID) (*container.Container, error) { + return f.get(ctx, id) +} -func (f containerSrc) DeletionInfo(id cid.ID) (*container.DelInfo, error) { return f.deletionInfo(id) } +func (f containerSrc) DeletionInfo(ctx context.Context, id cid.ID) (*container.DelInfo, error) { + return f.deletionInfo(ctx, id) +} // placementBuilderFunc is a placement.Builder backed by a function type placementBuilderFunc func(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) -func (f placementBuilderFunc) BuildPlacement(c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { +func (f placementBuilderFunc) BuildPlacement(ctx context.Context, c cid.ID, o *oid.ID, p netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) { return f(c, o, p) } diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go index 606044f8e..c4b03cbe6 100644 --- a/pkg/services/tree/ape.go +++ b/pkg/services/tree/ape.go @@ -36,7 +36,7 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string, nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(publicKey.Bytes()), nativeschema.PropertyKeyActorRole: schemaRole, } - reqProps, err = s.fillWithUserClaimTags(reqProps, publicKey) + reqProps, err = s.fillWithUserClaimTags(ctx, reqProps, publicKey) if err != nil { return aperequest.Request{}, err } @@ -74,7 +74,7 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, return fmt.Errorf("failed to create ape request: %w", err) } - return s.apeChecker.CheckAPE(checkercore.CheckPrm{ + return s.apeChecker.CheckAPE(ctx, checkercore.CheckPrm{ Request: request, Namespace: namespace, Container: cid, @@ -85,11 +85,11 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, } // fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key. -func (s *Service) fillWithUserClaimTags(reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) { +func (s *Service) fillWithUserClaimTags(ctx context.Context, reqProps map[string]string, publicKey *keys.PublicKey) (map[string]string, error) { if reqProps == nil { reqProps = make(map[string]string) } - props, err := aperequest.FormFrostfsIDRequestProperties(s.frostfsidSubjectProvider, publicKey) + props, err := aperequest.FormFrostfsIDRequestProperties(ctx, s.frostfsidSubjectProvider, publicKey) if err != nil { return reqProps, err } diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go index 3f94925b5..0afc7660a 100644 --- a/pkg/services/tree/ape_test.go +++ b/pkg/services/tree/ape_test.go @@ -37,7 +37,7 @@ type frostfsIDProviderMock struct { subjectsExtended map[util.Uint160]*client.SubjectExtended } -func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, error) { +func (f *frostfsIDProviderMock) GetSubject(ctx context.Context, key util.Uint160) (*client.Subject, error) { v, ok := f.subjects[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) @@ -45,7 +45,7 @@ func (f *frostfsIDProviderMock) GetSubject(key util.Uint160) (*client.Subject, e return v, nil } -func (f *frostfsIDProviderMock) GetSubjectExtended(key util.Uint160) (*client.SubjectExtended, error) { +func (f *frostfsIDProviderMock) GetSubjectExtended(ctx context.Context, key util.Uint160) (*client.SubjectExtended, error) { v, ok := f.subjectsExtended[key] if !ok { return nil, fmt.Errorf("%s", frostfsidcore.SubjectNotFoundErrorMessage) diff --git a/pkg/services/tree/container.go b/pkg/services/tree/container.go index 435257550..c641a21a2 100644 --- a/pkg/services/tree/container.go +++ b/pkg/services/tree/container.go @@ -2,6 +2,7 @@ package tree import ( "bytes" + "context" "crypto/sha256" "fmt" "sync" @@ -32,13 +33,13 @@ type containerCacheItem struct { const defaultContainerCacheSize = 10 // getContainerNodes returns nodes in the container and a position of local key in the list. -func (s *Service) getContainerNodes(cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) { - nm, err := s.nmSource.GetNetMap(0) +func (s *Service) getContainerNodes(ctx context.Context, cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, error) { + nm, err := s.nmSource.GetNetMap(ctx, 0) if err != nil { return nil, -1, fmt.Errorf("can't get netmap: %w", err) } - cnr, err := s.cnrSource.Get(cid) + cnr, err := s.cnrSource.Get(ctx, cid) if err != nil { return nil, -1, fmt.Errorf("can't get container: %w", err) } diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go index a3f488009..4ad760846 100644 --- a/pkg/services/tree/options.go +++ b/pkg/services/tree/options.go @@ -1,6 +1,7 @@ package tree import ( + "context" "crypto/ecdsa" "time" @@ -18,12 +19,12 @@ import ( type ContainerSource interface { container.Source - DeletionInfo(cid.ID) (*container.DelInfo, error) + DeletionInfo(ctx context.Context, cid cid.ID) (*container.DelInfo, error) // List must return list of all the containers in the FrostFS network // at the moment of a call and any error that does not allow fetching // container information. - List() ([]cid.ID, error) + List(ctx context.Context) ([]cid.ID, error) } type cfg struct { diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index bcbb73589..164815c76 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -149,7 +149,7 @@ func (s *Service) replicateLoop(ctx context.Context) { return case op := <-s.replicateCh: start := time.Now() - err := s.replicate(op) + err := s.replicate(ctx, op) if err != nil { s.log.Error(ctx, logs.TreeErrorDuringReplication, zap.Error(err), @@ -161,14 +161,14 @@ func (s *Service) replicateLoop(ctx context.Context) { } } -func (s *Service) replicate(op movePair) error { +func (s *Service) replicate(ctx context.Context, op movePair) error { req := newApplyRequest(&op) err := SignMessage(req, s.key) if err != nil { return fmt.Errorf("can't sign data: %w", err) } - nodes, localIndex, err := s.getContainerNodes(op.cid) + nodes, localIndex, err := s.getContainerNodes(ctx, op.cid) if err != nil { return fmt.Errorf("can't get container nodes: %w", err) } diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 2df3c08e6..3c0214a98 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -118,7 +118,7 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } @@ -161,7 +161,7 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } @@ -216,7 +216,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } @@ -260,7 +260,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } @@ -303,7 +303,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } @@ -376,7 +376,7 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS return err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(srv.Context(), cid) if err != nil { return err } @@ -586,7 +586,7 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di } // Apply locally applies operation from the remote node to the tree. -func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, error) { +func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { err := verifyMessage(req) if err != nil { return nil, err @@ -599,7 +599,7 @@ func (s *Service) Apply(_ context.Context, req *ApplyRequest) (*ApplyResponse, e key := req.GetSignature().GetKey() - _, pos, _, err := s.getContainerInfo(cid, key) + _, pos, _, err := s.getContainerInfo(ctx, cid, key) if err != nil { return nil, err } @@ -641,7 +641,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) return err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(srv.Context(), cid) if err != nil { return err } @@ -713,7 +713,7 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList return nil, err } - ns, pos, err := s.getContainerNodes(cid) + ns, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, err } @@ -755,8 +755,8 @@ func metaToProto(arr []pilorama.KeyValue) []KeyValue { // getContainerInfo returns the list of container nodes, position in the container for the node // with pub key and total amount of nodes in all replicas. -func (s *Service) getContainerInfo(cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) { - cntNodes, _, err := s.getContainerNodes(cid) +func (s *Service) getContainerInfo(ctx context.Context, cid cidSDK.ID, pub []byte) ([]netmapSDK.NodeInfo, int, int, error) { + cntNodes, _, err := s.getContainerNodes(ctx, cid) if err != nil { return nil, 0, 0, err } diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index b0f00615a..d15438e81 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -49,7 +49,7 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, return err } - cnr, err := s.cnrSource.Get(cid) + cnr, err := s.cnrSource.Get(ctx, cid) if err != nil { return fmt.Errorf("can't get container %s: %w", cid, err) } diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index 7bc5002dc..97f8a727a 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -39,7 +39,7 @@ type dummySubjectProvider struct { subjects map[util.Uint160]client.SubjectExtended } -func (s dummySubjectProvider) GetSubject(addr util.Uint160) (*client.Subject, error) { +func (s dummySubjectProvider) GetSubject(ctx context.Context, addr util.Uint160) (*client.Subject, error) { res := s.subjects[addr] return &client.Subject{ PrimaryKey: res.PrimaryKey, @@ -50,7 +50,7 @@ func (s dummySubjectProvider) GetSubject(addr util.Uint160) (*client.Subject, er }, nil } -func (s dummySubjectProvider) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) { +func (s dummySubjectProvider) GetSubjectExtended(ctx context.Context, addr util.Uint160) (*client.SubjectExtended, error) { res := s.subjects[addr] return &res, nil } @@ -65,7 +65,7 @@ func (s dummyEpochSource) CurrentEpoch() uint64 { type dummyContainerSource map[string]*containercore.Container -func (s dummyContainerSource) List() ([]cid.ID, error) { +func (s dummyContainerSource) List(context.Context) ([]cid.ID, error) { res := make([]cid.ID, 0, len(s)) var cnr cid.ID @@ -81,7 +81,7 @@ func (s dummyContainerSource) List() ([]cid.ID, error) { return res, nil } -func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) { +func (s dummyContainerSource) Get(ctx context.Context, id cid.ID) (*containercore.Container, error) { cnt, ok := s[id.String()] if !ok { return nil, errors.New("container not found") @@ -89,7 +89,7 @@ func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) { return cnt, nil } -func (s dummyContainerSource) DeletionInfo(id cid.ID) (*containercore.DelInfo, error) { +func (s dummyContainerSource) DeletionInfo(ctx context.Context, id cid.ID) (*containercore.DelInfo, error) { return &containercore.DelInfo{}, nil } diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 1a455def9..9b177d6b6 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -39,7 +39,7 @@ const defaultSyncWorkerCount = 20 // tree IDs from the other container nodes. Returns ErrNotInContainer if the node // is not included in the container. func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { - nodes, pos, err := s.getContainerNodes(cid) + nodes, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return fmt.Errorf("can't get container nodes: %w", err) } @@ -112,7 +112,7 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { // SynchronizeTree tries to synchronize log starting from the last stored height. func (s *Service) SynchronizeTree(ctx context.Context, cid cid.ID, treeID string) error { - nodes, pos, err := s.getContainerNodes(cid) + nodes, pos, err := s.getContainerNodes(ctx, cid) if err != nil { return fmt.Errorf("can't get container nodes: %w", err) } @@ -393,7 +393,7 @@ func (s *Service) syncLoop(ctx context.Context) { start := time.Now() - cnrs, err := s.cfg.cnrSource.List() + cnrs, err := s.cfg.cnrSource.List(ctx) if err != nil { s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err)) s.metrics.AddSyncDuration(time.Since(start), false) @@ -463,7 +463,7 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID continue } - existed, err := containerCore.WasRemoved(s.cnrSource, cnr) + existed, err := containerCore.WasRemoved(ctx, s.cnrSource, cnr) if err != nil { s.log.Error(ctx, logs.TreeCouldNotCheckIfContainerExisted, zap.Stringer("cid", cnr), @@ -493,7 +493,7 @@ func (s *Service) containersToSync(ctx context.Context, cnrs []cid.ID) (map[cid. cnrsToSync := make([]cid.ID, 0, len(cnrs)) for _, cnr := range cnrs { - _, pos, err := s.getContainerNodes(cnr) + _, pos, err := s.getContainerNodes(ctx, cnr) if err != nil { s.log.Error(ctx, logs.TreeCouldNotCalculateContainerNodes, zap.Stringer("cid", cnr), From 69c35b1d61fad2d08edf008f709f19ad45fdf250 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Thu, 6 Feb 2025 21:19:24 +0300 Subject: [PATCH 352/591] [#1637] govulncheck: Use patch release with security fixes Signed-off-by: Alexander Chuprov --- .forgejo/workflows/vulncheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index 8a5a818aa..7c89a3555 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -18,7 +18,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.23.5' + go-version: '1.23.6' - name: Install govulncheck run: go install golang.org/x/vuln/cmd/govulncheck@latest From 155f9eecb0159ac98ea85de0fefe04bbe24148c7 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 21 Jan 2025 14:15:59 +0300 Subject: [PATCH 353/591] [#1608] config: Add QoS section and config Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 1 + cmd/frostfs-node/config/qos/config.go | 46 ++++++++++++++++++++++ cmd/frostfs-node/config/qos/config_test.go | 40 +++++++++++++++++++ cmd/frostfs-node/qos.go | 20 ++++++++++ config/example/node.env | 3 ++ config/example/node.json | 14 +++++++ config/example/node.yaml | 10 +++++ docs/storage-node-configuration.md | 20 +++++++++- 8 files changed, 153 insertions(+), 1 deletion(-) create mode 100644 cmd/frostfs-node/config/qos/config.go create mode 100644 cmd/frostfs-node/config/qos/config_test.go create mode 100644 cmd/frostfs-node/qos.go diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 75d6f6dec..511777566 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -493,6 +493,7 @@ type cfg struct { cfgNetmap cfgNetmap cfgControlService cfgControlService cfgObject cfgObject + cfgQoSService cfgQoSService } // ReadCurrentNetMap reads network map which has been cached at the diff --git a/cmd/frostfs-node/config/qos/config.go b/cmd/frostfs-node/config/qos/config.go new file mode 100644 index 000000000..85f8180ed --- /dev/null +++ b/cmd/frostfs-node/config/qos/config.go @@ -0,0 +1,46 @@ +package qos + +import ( + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" +) + +const ( + subsection = "qos" + criticalSubSection = "critical" + internalSubSection = "internal" +) + +// CriticalAuthorizedKeys parses and returns an array of "critical.authorized_keys" config +// parameter from "qos" section. +// +// Returns an empty list if not set. +func CriticalAuthorizedKeys(c *config.Config) keys.PublicKeys { + return authorizedKeys(c, criticalSubSection) +} + +// InternalAuthorizedKeys parses and returns an array of "internal.authorized_keys" config +// parameter from "qos" section. +// +// Returns an empty list if not set. +func InternalAuthorizedKeys(c *config.Config) keys.PublicKeys { + return authorizedKeys(c, internalSubSection) +} + +func authorizedKeys(c *config.Config, sub string) keys.PublicKeys { + strKeys := config.StringSliceSafe(c.Sub(subsection).Sub(sub), "authorized_keys") + pubs := make(keys.PublicKeys, 0, len(strKeys)) + + for i := range strKeys { + pub, err := keys.NewPublicKeyFromString(strKeys[i]) + if err != nil { + panic(fmt.Errorf("invalid authorized key %s for qos.%s: %w", strKeys[i], sub, err)) + } + + pubs = append(pubs, pub) + } + + return pubs +} diff --git a/cmd/frostfs-node/config/qos/config_test.go b/cmd/frostfs-node/config/qos/config_test.go new file mode 100644 index 000000000..b3b6019cc --- /dev/null +++ b/cmd/frostfs-node/config/qos/config_test.go @@ -0,0 +1,40 @@ +package qos + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +func TestQoSSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + empty := configtest.EmptyConfig() + + require.Empty(t, CriticalAuthorizedKeys(empty)) + require.Empty(t, InternalAuthorizedKeys(empty)) + }) + + const path = "../../../../config/example/node" + + criticalPubs := make(keys.PublicKeys, 2) + criticalPubs[0], _ = keys.NewPublicKeyFromString("035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11") + criticalPubs[1], _ = keys.NewPublicKeyFromString("028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6") + + internalPubs := make(keys.PublicKeys, 2) + internalPubs[0], _ = keys.NewPublicKeyFromString("02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2") + internalPubs[1], _ = keys.NewPublicKeyFromString("031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a") + + fileConfigTest := func(c *config.Config) { + require.Equal(t, criticalPubs, CriticalAuthorizedKeys(c)) + require.Equal(t, internalPubs, InternalAuthorizedKeys(c)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) +} diff --git a/cmd/frostfs-node/qos.go b/cmd/frostfs-node/qos.go new file mode 100644 index 000000000..20ef43230 --- /dev/null +++ b/cmd/frostfs-node/qos.go @@ -0,0 +1,20 @@ +package main + +import qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos" + +type cfgQoSService struct{} + +func initQoSService(c *cfg) { + criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg) + internalPubs := qosconfig.InternalAuthorizedKeys(c.appCfg) + rawCriticalPubs := make([][]byte, 0, len(criticalPubs)) + rawInternalPubs := make([][]byte, 0, len(internalPubs)) + for i := range criticalPubs { + rawCriticalPubs = append(rawCriticalPubs, criticalPubs[i].Bytes()) + } + for i := range internalPubs { + rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes()) + } + + c.cfgQoSService = cfgQoSService{} +} diff --git a/config/example/node.env b/config/example/node.env index b2a0633a9..2ba432b1b 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -225,3 +225,6 @@ FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185" FROSTFS_MULTINET_BALANCER=roundrobin FROSTFS_MULTINET_RESTRICT=false FROSTFS_MULTINET_FALLBACK_DELAY=350ms + +FROSTFS_QOS_CRITICAL_AUTHORIZED_KEYS="035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" +FROSTFS_QOS_INTERNAL_AUTHORIZED_KEYS="02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a" diff --git a/config/example/node.json b/config/example/node.json index f3192ac2f..cfde8bcc7 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -305,5 +305,19 @@ "balancer": "roundrobin", "restrict": false, "fallback_delay": "350ms" + }, + "qos": { + "critical": { + "authorized_keys": [ + "035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11", + "028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6" + ] + }, + "internal": { + "authorized_keys": [ + "02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2", + "031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a" + ] + } } } diff --git a/config/example/node.yaml b/config/example/node.yaml index c5acf5386..1f8ec843d 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -270,3 +270,13 @@ multinet: balancer: roundrobin restrict: false fallback_delay: 350ms + +qos: + critical: + authorized_keys: # list of hex-encoded public keys that have rights to use `critical` IO tag + - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 + - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 + internal: + authorized_keys: # list of hex-encoded public keys that have rights to use `internal` IO tag + - 02b3622bf4017bdfe317c58aed5f4c753f206b7db896046fa7d774bbc4bf7f8dc2 + - 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 98d72cb69..de2729c68 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -26,7 +26,8 @@ There are some custom types used for brevity: | `storage` | [Storage engine configuration](#storage-section) | | `runtime` | [Runtime configuration](#runtime-section) | | `audit` | [Audit configuration](#audit-section) | -| `multinet` | [Multinet configuration](#multinet-section) | +| `multinet` | [Multinet configuration](#multinet-section) | +| `qos` | [QoS configuration](#qos-section) | # `control` section ```yaml @@ -471,3 +472,20 @@ multinet: | `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". | | `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. | | `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. | + +# `qos` section +```yaml +qos: + critical: + authorized_keys: + - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 + - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 + internal: + authorized_keys: + - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 + - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 +``` +| Parameter | Type | Default value | Description | +| -------------------------- | -------------- | ------------- | --------------------------------------------------------------------------- | +| `critical.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `critical` are allowed. | +| `internal.authorized_keys` | `[]public key` | empty | List of public keys for which requests with the tag `internal` are allowed. | From 9729f31e5c96028a434a175cb4c2ce51b74875a9 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 29 Jan 2025 11:26:09 +0300 Subject: [PATCH 354/591] [#1608] grpc: Add QoS interceptors for server and clients Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-cli/internal/client/sdk.go | 5 +++-- cmd/frostfs-cli/modules/tree/client.go | 3 +++ cmd/frostfs-node/grpc.go | 3 +++ go.mod | 1 + go.sum | 2 ++ pkg/network/cache/multi.go | 3 +++ pkg/services/tree/cache.go | 3 +++ pkg/services/tree/sync.go | 3 +++ 8 files changed, 21 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go index 2d9c45cbd..71c35bd2c 100644 --- a/cmd/frostfs-cli/internal/client/sdk.go +++ b/cmd/frostfs-cli/internal/client/sdk.go @@ -13,6 +13,7 @@ import ( commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -56,8 +57,8 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey prmDial := client.PrmDial{ Endpoint: addr.URIAddr(), GRPCDialOptions: []grpc.DialOption{ - grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()), - grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()), + grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor(), qos.NewUnaryClientInteceptor()), + grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor(), qos.NewStreamClientInterceptor()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), }, } diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go index a70624ac8..421f11532 100644 --- a/cmd/frostfs-cli/modules/tree/client.go +++ b/cmd/frostfs-cli/modules/tree/client.go @@ -11,6 +11,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "github.com/spf13/cobra" "github.com/spf13/viper" "google.golang.org/grpc" @@ -36,10 +37,12 @@ func _client() (tree.TreeServiceClient, error) { grpc.WithChainUnaryInterceptor( metrics.NewUnaryClientInterceptor(), tracing.NewUnaryClientInteceptor(), + qos.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), + qos.NewStreamClientInterceptor(), ), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), } diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go index 6105be861..4d679e4cc 100644 --- a/cmd/frostfs-node/grpc.go +++ b/cmd/frostfs-node/grpc.go @@ -12,6 +12,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -130,10 +131,12 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr serverOpts := []grpc.ServerOption{ grpc.MaxRecvMsgSize(maxRecvMsgSize), grpc.ChainUnaryInterceptor( + qos.NewUnaryServerInterceptor(), metrics.NewUnaryServerInterceptor(), tracing.NewUnaryServerInterceptor(), ), grpc.ChainStreamInterceptor( + qos.NewStreamServerInterceptor(), metrics.NewStreamServerInterceptor(), tracing.NewStreamServerInterceptor(), ), diff --git a/go.mod b/go.mod index cc6b0a202..8f2e7bc51 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 + git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 diff --git a/go.sum b/go.sum index eae467b31..e92b64886 100644 --- a/go.sum +++ b/go.sum @@ -8,6 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421 h1:pP19IawSdsLCKFv7HMNfWAeH6E3uSnntKZkwka+/2+4= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index 1bcb83259..5c3beb553 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -11,6 +11,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -64,10 +65,12 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address grpc.WithChainUnaryInterceptor( metrics.NewUnaryClientInterceptor(), tracing.NewUnaryClientInteceptor(), + qos.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), + qos.NewStreamClientInterceptor(), ), grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index ac80d0e4c..7f1dcf07c 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -12,6 +12,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "github.com/hashicorp/golang-lru/v2/simplelru" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" @@ -97,10 +98,12 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (* grpc.WithChainUnaryInterceptor( metrics.NewUnaryClientInterceptor(), tracing.NewUnaryClientInteceptor(), + qos.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), + qos.NewStreamClientInterceptor(), ), grpc.WithContextDialer(c.ds.GrpcContextDialer()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 9b177d6b6..a0485d9d8 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -20,6 +20,7 @@ import ( metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/panjf2000/ants/v2" @@ -342,10 +343,12 @@ func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) { grpc.WithChainUnaryInterceptor( metrics.NewUnaryClientInterceptor(), tracing_grpc.NewUnaryClientInteceptor(), + qos.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( metrics.NewStreamClientInterceptor(), tracing_grpc.NewStreamClientInterceptor(), + qos.NewStreamClientInterceptor(), ), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), From f6b3f79e8916f826f55bb28aff799699d1465be0 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 29 Jan 2025 11:28:52 +0300 Subject: [PATCH 355/591] [#1608] qos: Add qos service to adjust incoming IO tags Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/main.go | 1 + cmd/frostfs-node/qos.go | 81 ++++++++++++++++++- internal/logs/logs.go | 3 + .../metabase/upgrade_test.go | 2 +- 4 files changed, 83 insertions(+), 4 deletions(-) diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go index 3c15dc439..b53bd8823 100644 --- a/cmd/frostfs-node/main.go +++ b/cmd/frostfs-node/main.go @@ -101,6 +101,7 @@ func initApp(ctx context.Context, c *cfg) { initAndLog(ctx, c, "gRPC", func(c *cfg) { initGRPC(ctx, c) }) initAndLog(ctx, c, "netmap", func(c *cfg) { initNetmapService(ctx, c) }) + initAndLog(ctx, c, "qos", func(c *cfg) { initQoSService(c) }) initAccessPolicyEngine(ctx, c) initAndLog(ctx, c, "access policy engine", func(c *cfg) { diff --git a/cmd/frostfs-node/qos.go b/cmd/frostfs-node/qos.go index 20ef43230..bfc278333 100644 --- a/cmd/frostfs-node/qos.go +++ b/cmd/frostfs-node/qos.go @@ -1,8 +1,24 @@ package main -import qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos" +import ( + "bytes" + "context" -type cfgQoSService struct{} + qosconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + qosTagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "go.uber.org/zap" +) + +type cfgQoSService struct { + netmapSource netmap.Source + logger *logger.Logger + allowedCriticalPubs [][]byte + allowedInternalPubs [][]byte +} func initQoSService(c *cfg) { criticalPubs := qosconfig.CriticalAuthorizedKeys(c.appCfg) @@ -16,5 +32,64 @@ func initQoSService(c *cfg) { rawInternalPubs = append(rawInternalPubs, internalPubs[i].Bytes()) } - c.cfgQoSService = cfgQoSService{} + c.cfgQoSService = cfgQoSService{ + netmapSource: c.netMapSource, + logger: c.log, + allowedCriticalPubs: rawCriticalPubs, + allowedInternalPubs: rawInternalPubs, + } +} + +func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context { + rawTag, defined := qosTagging.IOTagFromContext(ctx) + if !defined { + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + ioTag, err := qos.FromRawString(rawTag) + if err != nil { + s.logger.Warn(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + + switch ioTag { + case qos.IOTagClient: + return ctx + case qos.IOTagCritical: + for _, pk := range s.allowedCriticalPubs { + if bytes.Equal(pk, requestSignPublicKey) { + return ctx + } + } + nm, err := s.netmapSource.GetNetMap(ctx, 0) + if err != nil { + s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + for _, node := range nm.Nodes() { + if bytes.Equal(node.PublicKey(), requestSignPublicKey) { + return ctx + } + } + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + case qos.IOTagInternal: + for _, pk := range s.allowedInternalPubs { + if bytes.Equal(pk, requestSignPublicKey) { + return ctx + } + } + nm, err := s.netmapSource.GetNetMap(ctx, 0) + if err != nil { + s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } + for _, node := range nm.Nodes() { + if bytes.Equal(node.PublicKey(), requestSignPublicKey) { + return ctx + } + } + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + default: + s.logger.Warn(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag)) + return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) + } } diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 0610dc175..6a72644e5 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -510,4 +510,7 @@ const ( BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file" WritecacheCantGetObject = "can't get an object from fstree" FailedToUpdateMultinetConfiguration = "failed to update multinet configuration" + FailedToParseIncomingIOTag = "failed to parse incoming IO tag" + NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`" + FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`" ) diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go index 5444264be..c90de4dd6 100644 --- a/pkg/local_object_storage/metabase/upgrade_test.go +++ b/pkg/local_object_storage/metabase/upgrade_test.go @@ -45,7 +45,7 @@ func TestUpgradeV2ToV3(t *testing.T) { type testContainerInfoProvider struct{} -func (p *testContainerInfoProvider) Info(id cid.ID) (container.Info, error) { +func (p *testContainerInfoProvider) Info(ctx context.Context, id cid.ID) (container.Info, error) { return container.Info{}, nil } From 12da2f826278762e8879994c2788e88b9084afe4 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 29 Jan 2025 11:29:22 +0300 Subject: [PATCH 356/591] [#1608] object: Add IO tag adjustment layer Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/object.go | 5 +- internal/qos/tags.go | 39 +++++++++++ pkg/services/object/qos.go | 132 +++++++++++++++++++++++++++++++++++++ 3 files changed, 174 insertions(+), 2 deletions(-) create mode 100644 internal/qos/tags.go create mode 100644 pkg/services/object/qos.go diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 77446b81c..40d3cc1cd 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -168,7 +168,7 @@ func initObjectService(c *cfg) { sPatch := createPatchSvc(sGet, sPut) // build service pipeline - // grpc | audit | | signature | response | acl | ape | split + // grpc | audit | qos | | signature | response | acl | ape | split splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch) @@ -191,7 +191,8 @@ func initObjectService(c *cfg) { c.shared.metricsSvc = objectService.NewMetricCollector( signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg)) - auditSvc := objectService.NewAuditService(c.shared.metricsSvc, c.log, c.audit) + qosService := objectService.NewQoSObjectService(c.shared.metricsSvc, &c.cfgQoSService) + auditSvc := objectService.NewAuditService(qosService, c.log, c.audit) server := objectTransportGRPC.New(auditSvc) c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { diff --git a/internal/qos/tags.go b/internal/qos/tags.go new file mode 100644 index 000000000..6a9a7f7a4 --- /dev/null +++ b/internal/qos/tags.go @@ -0,0 +1,39 @@ +package qos + +import "fmt" + +type IOTag string + +const ( + IOTagClient IOTag = "client" + IOTagInternal IOTag = "internal" + IOTagBackground IOTag = "background" + IOTagWritecache IOTag = "writecache" + IOTagPolicer IOTag = "policer" + IOTagCritical IOTag = "critical" + + ioTagUnknown IOTag = "" +) + +func FromRawString(s string) (IOTag, error) { + switch s { + case string(IOTagCritical): + return IOTagCritical, nil + case string(IOTagClient): + return IOTagClient, nil + case string(IOTagInternal): + return IOTagInternal, nil + case string(IOTagBackground): + return IOTagBackground, nil + case string(IOTagWritecache): + return IOTagWritecache, nil + case string(IOTagPolicer): + return IOTagPolicer, nil + default: + return ioTagUnknown, fmt.Errorf("unknown tag %s", s) + } +} + +func (t IOTag) String() string { + return string(t) +} diff --git a/pkg/services/object/qos.go b/pkg/services/object/qos.go new file mode 100644 index 000000000..145a316e2 --- /dev/null +++ b/pkg/services/object/qos.go @@ -0,0 +1,132 @@ +package object + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" +) + +var _ ServiceServer = (*qosObjectService)(nil) + +type AdjustIOTag interface { + AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context +} + +type qosObjectService struct { + next ServiceServer + adj AdjustIOTag +} + +func NewQoSObjectService(next ServiceServer, adjIOTag AdjustIOTag) ServiceServer { + return &qosObjectService{ + next: next, + adj: adjIOTag, + } +} + +func (q *qosObjectService) Delete(ctx context.Context, req *object.DeleteRequest) (*object.DeleteResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Delete(ctx, req) +} + +func (q *qosObjectService) Get(req *object.GetRequest, s GetObjectStream) error { + ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Get(req, &qosReadStream[*object.GetResponse]{ + ctxF: func() context.Context { return ctx }, + sender: s, + }) +} + +func (q *qosObjectService) GetRange(req *object.GetRangeRequest, s GetObjectRangeStream) error { + ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.GetRange(req, &qosReadStream[*object.GetRangeResponse]{ + ctxF: func() context.Context { return ctx }, + sender: s, + }) +} + +func (q *qosObjectService) GetRangeHash(ctx context.Context, req *object.GetRangeHashRequest) (*object.GetRangeHashResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.GetRangeHash(ctx, req) +} + +func (q *qosObjectService) Head(ctx context.Context, req *object.HeadRequest) (*object.HeadResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Head(ctx, req) +} + +func (q *qosObjectService) Patch(ctx context.Context) (PatchObjectStream, error) { + s, err := q.next.Patch(ctx) + if err != nil { + return nil, err + } + return &qosWriteStream[*object.PatchRequest, *object.PatchResponse]{ + s: s, + adj: q.adj, + }, nil +} + +func (q *qosObjectService) Put(ctx context.Context) (PutObjectStream, error) { + s, err := q.next.Put(ctx) + if err != nil { + return nil, err + } + return &qosWriteStream[*object.PutRequest, *object.PutResponse]{ + s: s, + adj: q.adj, + }, nil +} + +func (q *qosObjectService) PutSingle(ctx context.Context, req *object.PutSingleRequest) (*object.PutSingleResponse, error) { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.PutSingle(ctx, req) +} + +func (q *qosObjectService) Search(req *object.SearchRequest, s SearchStream) error { + ctx := q.adj.AdjustIncomingTag(s.Context(), req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.next.Search(req, &qosReadStream[*object.SearchResponse]{ + ctxF: func() context.Context { return ctx }, + sender: s, + }) +} + +type qosSend[T any] interface { + Send(T) error +} + +type qosReadStream[T any] struct { + sender qosSend[T] + ctxF func() context.Context +} + +func (g *qosReadStream[T]) Context() context.Context { + return g.ctxF() +} + +func (g *qosReadStream[T]) Send(resp T) error { + return g.sender.Send(resp) +} + +type qosVerificationHeader interface { + GetVerificationHeader() *session.RequestVerificationHeader +} + +type qosSendRecv[TReq qosVerificationHeader, TResp any] interface { + Send(context.Context, TReq) error + CloseAndRecv(context.Context) (TResp, error) +} + +type qosWriteStream[TReq qosVerificationHeader, TResp any] struct { + s qosSendRecv[TReq, TResp] + adj AdjustIOTag +} + +func (q *qosWriteStream[TReq, TResp]) CloseAndRecv(ctx context.Context) (TResp, error) { + return q.s.CloseAndRecv(ctx) +} + +func (q *qosWriteStream[TReq, TResp]) Send(ctx context.Context, req TReq) error { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + return q.s.Send(ctx, req) +} From 170860c14a7778ff141a136899e52ba20ee6607b Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 29 Jan 2025 11:44:20 +0300 Subject: [PATCH 357/591] [#1608] logger: Add IO tag logging Signed-off-by: Dmitrii Stepanov --- pkg/util/logger/log.go | 35 +++++++++++++++-------------------- 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/pkg/util/logger/log.go b/pkg/util/logger/log.go index 269e07d90..413b1d9aa 100644 --- a/pkg/util/logger/log.go +++ b/pkg/util/logger/log.go @@ -4,37 +4,32 @@ import ( "context" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing" + qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "go.uber.org/zap" ) func (l *Logger) Debug(ctx context.Context, msg string, fields ...zap.Field) { - if traceID := tracing.GetTraceID(ctx); traceID != "" { - l.z.Debug(msg, append(fields, zap.String("trace_id", traceID))...) - return - } - l.z.Debug(msg, fields...) + l.z.Debug(msg, appendContext(ctx, fields...)...) } func (l *Logger) Info(ctx context.Context, msg string, fields ...zap.Field) { - if traceID := tracing.GetTraceID(ctx); traceID != "" { - l.z.Info(msg, append(fields, zap.String("trace_id", traceID))...) - return - } - l.z.Info(msg, fields...) + l.z.Info(msg, appendContext(ctx, fields...)...) } func (l *Logger) Warn(ctx context.Context, msg string, fields ...zap.Field) { - if traceID := tracing.GetTraceID(ctx); traceID != "" { - l.z.Warn(msg, append(fields, zap.String("trace_id", traceID))...) - return - } - l.z.Warn(msg, fields...) + l.z.Warn(msg, appendContext(ctx, fields...)...) } func (l *Logger) Error(ctx context.Context, msg string, fields ...zap.Field) { - if traceID := tracing.GetTraceID(ctx); traceID != "" { - l.z.Error(msg, append(fields, zap.String("trace_id", traceID))...) - return - } - l.z.Error(msg, fields...) + l.z.Error(msg, appendContext(ctx, fields...)...) +} + +func appendContext(ctx context.Context, fields ...zap.Field) []zap.Field { + if traceID := tracing.GetTraceID(ctx); traceID != "" { + fields = append(fields, zap.String("trace_id", traceID)) + } + if ioTag, ioTagDefined := qos.IOTagFromContext(ctx); ioTagDefined { + fields = append(fields, zap.String("io_tag", ioTag)) + } + return fields } From dc6aea7b7956bfca222288fa057eecdaca35ca4d Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 29 Jan 2025 15:56:31 +0300 Subject: [PATCH 358/591] [#1608] control: Use IO tag `critical` Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/control.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go index ecd82bba5..1d9ac3df0 100644 --- a/cmd/frostfs-node/control.go +++ b/cmd/frostfs-node/control.go @@ -7,9 +7,13 @@ import ( controlconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/control" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" controlSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" + metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" + tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "go.uber.org/zap" "google.golang.org/grpc" ) @@ -50,7 +54,17 @@ func initControlService(ctx context.Context, c *cfg) { return } - c.cfgControlService.server = grpc.NewServer() + c.cfgControlService.server = grpc.NewServer( + grpc.ChainUnaryInterceptor( + func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagCritical.String()) + return handler(ctx, req) + }, + metrics.NewUnaryServerInterceptor(), + tracing.NewUnaryServerInterceptor(), + ), + // control service has no stream methods, so no stream interceptors added + ) c.onShutdown(func() { stopGRPC(ctx, "FrostFS Control API", c.cfgControlService.server, c.log) From acec938b2df8705c008efa2f47b4b2ecd9767245 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 29 Jan 2025 17:43:53 +0300 Subject: [PATCH 359/591] [#1608] qos: Add client grpc interceptors `qos` client interceptors replace internal IO tags `writecache`, `policer` and `background` with `internal` IO tag for outcomming RPC. Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/control.go | 6 +---- internal/qos/grpc.go | 51 +++++++++++++++++++++++++++++++++++++ pkg/network/cache/multi.go | 9 ++++--- pkg/services/tree/cache.go | 9 ++++--- pkg/services/tree/sync.go | 9 ++++--- 5 files changed, 70 insertions(+), 14 deletions(-) create mode 100644 internal/qos/grpc.go diff --git a/cmd/frostfs-node/control.go b/cmd/frostfs-node/control.go index 1d9ac3df0..1825013c7 100644 --- a/cmd/frostfs-node/control.go +++ b/cmd/frostfs-node/control.go @@ -13,7 +13,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "go.uber.org/zap" "google.golang.org/grpc" ) @@ -56,10 +55,7 @@ func initControlService(ctx context.Context, c *cfg) { c.cfgControlService.server = grpc.NewServer( grpc.ChainUnaryInterceptor( - func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagCritical.String()) - return handler(ctx, req) - }, + qos.NewSetCriticalIOTagUnaryServerInterceptor(), metrics.NewUnaryServerInterceptor(), tracing.NewUnaryServerInterceptor(), ), diff --git a/internal/qos/grpc.go b/internal/qos/grpc.go new file mode 100644 index 000000000..c253f1e9d --- /dev/null +++ b/internal/qos/grpc.go @@ -0,0 +1,51 @@ +package qos + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "google.golang.org/grpc" +) + +func NewSetCriticalIOTagUnaryServerInterceptor() grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + ctx = tagging.ContextWithIOTag(ctx, IOTagCritical.String()) + return handler(ctx, req) + } +} + +func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor { + return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + rawTag, ok := tagging.IOTagFromContext(ctx) + if !ok { + return invoker(ctx, method, req, reply, cc, opts...) + } + tag, err := FromRawString(rawTag) + if err != nil { + tag = IOTagClient + } + if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache { + tag = IOTagInternal + } + ctx = tagging.ContextWithIOTag(ctx, tag.String()) + return invoker(ctx, method, req, reply, cc, opts...) + } +} + +func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientInterceptor { + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + rawTag, ok := tagging.IOTagFromContext(ctx) + if !ok { + return streamer(ctx, desc, cc, method, opts...) + } + tag, err := FromRawString(rawTag) + if err != nil { + tag = IOTagClient + } + if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache { + tag = IOTagInternal + } + ctx = tagging.ContextWithIOTag(ctx, tag.String()) + return streamer(ctx, desc, cc, method, opts...) + } +} diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index 5c3beb553..e94fa580a 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -7,11 +7,12 @@ import ( "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -63,14 +64,16 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address grpcOpts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( + qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), tracing.NewUnaryClientInteceptor(), - qos.NewUnaryClientInteceptor(), + tagging.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( + qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), - qos.NewStreamClientInterceptor(), + tagging.NewStreamClientInterceptor(), ), grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index 7f1dcf07c..125871fc4 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -9,10 +9,11 @@ import ( "time" internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "github.com/hashicorp/golang-lru/v2/simplelru" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" @@ -96,14 +97,16 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (* opts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( + qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), tracing.NewUnaryClientInteceptor(), - qos.NewUnaryClientInteceptor(), + tagging.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( + qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), - qos.NewStreamClientInterceptor(), + tagging.NewStreamClientInterceptor(), ), grpc.WithContextDialer(c.ds.GrpcContextDialer()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index a0485d9d8..3e0a45385 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -13,6 +13,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" @@ -20,7 +21,7 @@ import ( metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/panjf2000/ants/v2" @@ -341,14 +342,16 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) { return grpc.NewClient(a.URIAddr(), grpc.WithChainUnaryInterceptor( + qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), tracing_grpc.NewUnaryClientInteceptor(), - qos.NewUnaryClientInteceptor(), + tagging.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( + qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), metrics.NewStreamClientInterceptor(), tracing_grpc.NewStreamClientInterceptor(), - qos.NewStreamClientInterceptor(), + tagging.NewStreamClientInterceptor(), ), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), From bfe325e61d79cd297b8c7a3a153dd59a67238f18 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 29 Jan 2025 17:45:40 +0300 Subject: [PATCH 360/591] [#1608] policer: Add IO tag to context Signed-off-by: Dmitrii Stepanov --- pkg/services/policer/process.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/services/policer/process.go b/pkg/services/policer/process.go index bd830d04e..635a5683b 100644 --- a/pkg/services/policer/process.go +++ b/pkg/services/policer/process.go @@ -7,7 +7,9 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" ) @@ -18,6 +20,7 @@ func (p *Policer) Run(ctx context.Context) { } func (p *Policer) shardPolicyWorker(ctx context.Context) { + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagPolicer.String()) for { select { case <-ctx.Done(): From fd0c6c461dd83daacd9e5d7e4427366ac2982f15 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 29 Jan 2025 17:47:10 +0300 Subject: [PATCH 361/591] [#1608] cli: Drop redundant interceptors There is no metrics or IO tags in cli. Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-cli/internal/client/sdk.go | 5 ++--- cmd/frostfs-cli/modules/tree/client.go | 6 ------ 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go index 71c35bd2c..2d9c45cbd 100644 --- a/cmd/frostfs-cli/internal/client/sdk.go +++ b/cmd/frostfs-cli/internal/client/sdk.go @@ -13,7 +13,6 @@ import ( commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -57,8 +56,8 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey prmDial := client.PrmDial{ Endpoint: addr.URIAddr(), GRPCDialOptions: []grpc.DialOption{ - grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor(), qos.NewUnaryClientInteceptor()), - grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor(), qos.NewStreamClientInterceptor()), + grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()), + grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), }, } diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go index 421f11532..c6953f126 100644 --- a/cmd/frostfs-cli/modules/tree/client.go +++ b/cmd/frostfs-cli/modules/tree/client.go @@ -9,9 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" - metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "github.com/spf13/cobra" "github.com/spf13/viper" "google.golang.org/grpc" @@ -35,14 +33,10 @@ func _client() (tree.TreeServiceClient, error) { opts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( - metrics.NewUnaryClientInterceptor(), tracing.NewUnaryClientInteceptor(), - qos.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( - metrics.NewStreamClientInterceptor(), tracing.NewStreamClientInterceptor(), - qos.NewStreamClientInterceptor(), ), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), } From abba5b20890d157305ab8581b8ae3c9dadf0da47 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 31 Jan 2025 10:12:46 +0300 Subject: [PATCH 362/591] [#1608] writecache: Add IO tag to flush worker Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/writecache/flush.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index d9e34ceab..3f9b36f9d 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -6,6 +6,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -14,6 +15,7 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "go.opentelemetry.io/otel/attribute" @@ -35,6 +37,7 @@ func (c *cache) runFlushLoop(ctx context.Context) { if c.disableBackgroundFlush { return } + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagWritecache.String()) fl := newFlushLimiter(c.flushSizeLimit) c.wg.Add(1) go func() { From 92450a76ba11cb5c75eeffa1fe2513e49f09a4df Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 31 Jan 2025 11:00:28 +0300 Subject: [PATCH 363/591] [#1608] shard: Add IO tag to GC Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/shard/gc.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 1b218a372..4a5ec7a71 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -6,11 +6,13 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.uber.org/zap" @@ -149,7 +151,7 @@ func (gc *gc) init(ctx context.Context) { if sz > 0 { gc.workerPool = gc.workerPoolInit(sz) } - + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) gc.wg.Add(2) go gc.tickRemover(ctx) go gc.listenEvents(ctx) From 0671c277db2837fab94fdef2249b05f64caf2b73 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 31 Jan 2025 12:14:51 +0300 Subject: [PATCH 364/591] [#1608] tree: Add IO tag for tree sync requests Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/tree.go | 2 +- pkg/services/tree/qos.go | 101 +++++++++++++++++++++++++++++++++++ pkg/services/tree/service.go | 3 ++ 3 files changed, 105 insertions(+), 1 deletion(-) create mode 100644 pkg/services/tree/qos.go diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index f8330a25e..65414f0ca 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -72,7 +72,7 @@ func initTreeService(c *cfg) { ) c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) { - tree.RegisterTreeServiceServer(s, c.treeService) + tree.RegisterTreeServiceServer(s, tree.NewIOTagAdjustServer(c.treeService, &c.cfgQoSService)) }) c.workers = append(c.workers, newWorkerFromFunc(func(ctx context.Context) { diff --git a/pkg/services/tree/qos.go b/pkg/services/tree/qos.go new file mode 100644 index 000000000..8f21686df --- /dev/null +++ b/pkg/services/tree/qos.go @@ -0,0 +1,101 @@ +package tree + +import ( + "context" + + "google.golang.org/grpc" +) + +var _ TreeServiceServer = (*ioTagAdjust)(nil) + +type AdjustIOTag interface { + AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context +} + +type ioTagAdjust struct { + s TreeServiceServer + a AdjustIOTag +} + +func NewIOTagAdjustServer(s TreeServiceServer, a AdjustIOTag) TreeServiceServer { + return &ioTagAdjust{ + s: s, + a: a, + } +} + +func (i *ioTagAdjust) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Add(ctx, req) +} + +func (i *ioTagAdjust) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.AddByPath(ctx, req) +} + +func (i *ioTagAdjust) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Apply(ctx, req) +} + +func (i *ioTagAdjust) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.GetNodeByPath(ctx, req) +} + +func (i *ioTagAdjust) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error { + ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey()) + return i.s.GetOpLog(req, &qosServerWrapper[*GetOpLogResponse]{ + sender: srv, + ServerStream: srv, + ctxF: func() context.Context { return ctx }, + }) +} + +func (i *ioTagAdjust) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error { + ctx := i.a.AdjustIncomingTag(srv.Context(), req.GetSignature().GetKey()) + return i.s.GetSubTree(req, &qosServerWrapper[*GetSubTreeResponse]{ + sender: srv, + ServerStream: srv, + ctxF: func() context.Context { return ctx }, + }) +} + +func (i *ioTagAdjust) Healthcheck(ctx context.Context, req *HealthcheckRequest) (*HealthcheckResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Healthcheck(ctx, req) +} + +func (i *ioTagAdjust) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Move(ctx, req) +} + +func (i *ioTagAdjust) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.Remove(ctx, req) +} + +func (i *ioTagAdjust) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) { + ctx = i.a.AdjustIncomingTag(ctx, req.GetSignature().GetKey()) + return i.s.TreeList(ctx, req) +} + +type qosSend[T any] interface { + Send(T) error +} + +type qosServerWrapper[T any] struct { + grpc.ServerStream + sender qosSend[T] + ctxF func() context.Context +} + +func (w *qosServerWrapper[T]) Send(resp T) error { + return w.sender.Send(resp) +} + +func (w *qosServerWrapper[T]) Context() context.Context { + return w.ctxF() +} diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 3c0214a98..2e9722e79 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -9,9 +9,11 @@ import ( "sync" "sync/atomic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -83,6 +85,7 @@ func New(opts ...Option) *Service { // Start starts the service. func (s *Service) Start(ctx context.Context) { + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) go s.replicateLoop(ctx) go s.syncLoop(ctx) From 5d79abe523b0254fe0484220d6d2c57a172090e1 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 5 Feb 2025 15:32:20 +0300 Subject: [PATCH 365/591] [#1608] shard: Add IO tag for rebuild Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/shard/rebuild.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go index 10eb51a28..0593f5894 100644 --- a/pkg/local_object_storage/shard/rebuild.go +++ b/pkg/local_object_storage/shard/rebuild.go @@ -6,10 +6,12 @@ import ( "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -103,6 +105,7 @@ func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *lo default: } log.Info(ctx, logs.BlobstoreRebuildStarted) + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil { log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err)) } else { From a7145ca9bff4ef15abf3f7f170e420f806ebc310 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 10 Feb 2025 15:26:12 +0300 Subject: [PATCH 366/591] [#1614] adm/frostfsid: Add 'set-kv' Signed-off-by: Alexander Chuprov --- .../modules/morph/frostfsid/frostfsid.go | 43 +++++++++++++++++++ .../internal/modules/morph/frostfsid/root.go | 1 + 2 files changed, 44 insertions(+) diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go index b229d0436..974299154 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go @@ -1,6 +1,7 @@ package frostfsid import ( + "errors" "fmt" "math/big" "sort" @@ -38,6 +39,11 @@ const ( groupIDFlag = "group-id" rootNamespacePlaceholder = "" + + keyFlag = "key" + keyDescFlag = "Key for storing a value in the subject's KV storage" + valueFlag = "value" + valueDescFlag = "Value to be stored in the subject's KV storage" ) var ( @@ -151,6 +157,15 @@ var ( }, Run: frostfsidListGroupSubjects, } + + frostfsidSetKVCmd = &cobra.Command{ + Use: "set-kv", + Short: "Store a key-value pair in the subject's KV storage", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + }, + Run: frostfsidSetKV, + } ) func initFrostfsIDCreateNamespaceCmd() { @@ -236,6 +251,14 @@ func initFrostfsIDListGroupSubjectsCmd() { frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") } +func initFrostfsIDSetKVCmd() { + Cmd.AddCommand(frostfsidSetKVCmd) + frostfsidSetKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + frostfsidSetKVCmd.Flags().String(subjectAddressFlag, "", "Subject address") + frostfsidSetKVCmd.Flags().String(keyFlag, "", keyDescFlag) + frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag) +} + func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) { ns := getFrostfsIDNamespace(cmd) @@ -403,6 +426,26 @@ func frostfsidRemoveSubjectFromGroup(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "remove subject from group error: %w", err) } +func frostfsidSetKV(cmd *cobra.Command, _ []string) { + subjectAddress := getFrostfsIDSubjectAddress(cmd) + key, _ := cmd.Flags().GetString(keyFlag) + value, _ := cmd.Flags().GetString(valueFlag) + + if key == "" { + commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty")) + } + + ffsid, err := newFrostfsIDClient(cmd) + commonCmd.ExitOnErr(cmd, "init contract client: %w", err) + + method, args := ffsid.roCli.SetSubjectKVCall(subjectAddress, key, value) + + ffsid.addCall(method, args) + + err = ffsid.sendWait() + commonCmd.ExitOnErr(cmd, "set KV: %w", err) +} + func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { ns := getFrostfsIDNamespace(cmd) groupID := getFrostfsIDGroupID(cmd) diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go index 6ffcaa487..930865f81 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go @@ -12,6 +12,7 @@ func init() { initFrostfsIDAddSubjectToGroupCmd() initFrostfsIDRemoveSubjectFromGroupCmd() initFrostfsIDListGroupSubjectsCmd() + initFrostfsIDSetKVCmd() initFrostfsIDAddSubjectKeyCmd() initFrostfsIDRemoveSubjectKeyCmd() } From 076952f4c73a8a2d7388f658344d8090a91f64a9 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 10 Feb 2025 15:28:29 +0300 Subject: [PATCH 367/591] [#1614] adm/frostfsid: Add 'delete-kv' Signed-off-by: Alexander Chuprov --- .../modules/morph/frostfsid/frostfsid.go | 34 +++++++++++++++++++ .../internal/modules/morph/frostfsid/root.go | 1 + 2 files changed, 35 insertions(+) diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go index 974299154..4fbd0bfe1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go @@ -166,6 +166,14 @@ var ( }, Run: frostfsidSetKV, } + frostfsidDeleteKVCmd = &cobra.Command{ + Use: "delete-kv", + Short: "Delete a value from the subject's KV storage", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + }, + Run: frostfsidDeleteKV, + } ) func initFrostfsIDCreateNamespaceCmd() { @@ -259,6 +267,13 @@ func initFrostfsIDSetKVCmd() { frostfsidSetKVCmd.Flags().String(valueFlag, "", valueDescFlag) } +func initFrostfsIDDeleteKVCmd() { + Cmd.AddCommand(frostfsidDeleteKVCmd) + frostfsidDeleteKVCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + frostfsidDeleteKVCmd.Flags().String(subjectAddressFlag, "", "Subject address") + frostfsidDeleteKVCmd.Flags().String(keyFlag, "", keyDescFlag) +} + func frostfsidCreateNamespace(cmd *cobra.Command, _ []string) { ns := getFrostfsIDNamespace(cmd) @@ -446,6 +461,25 @@ func frostfsidSetKV(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "set KV: %w", err) } +func frostfsidDeleteKV(cmd *cobra.Command, _ []string) { + subjectAddress := getFrostfsIDSubjectAddress(cmd) + key, _ := cmd.Flags().GetString(keyFlag) + + if key == "" { + commonCmd.ExitOnErr(cmd, "", errors.New("key can't be empty")) + } + + ffsid, err := newFrostfsIDClient(cmd) + commonCmd.ExitOnErr(cmd, "init contract client: %w", err) + + method, args := ffsid.roCli.DeleteSubjectKVCall(subjectAddress, key) + + ffsid.addCall(method, args) + + err = ffsid.sendWait() + commonCmd.ExitOnErr(cmd, "delete KV: %w", err) +} + func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { ns := getFrostfsIDNamespace(cmd) groupID := getFrostfsIDGroupID(cmd) diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go index 930865f81..8aad5c5c1 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/root.go @@ -13,6 +13,7 @@ func init() { initFrostfsIDRemoveSubjectFromGroupCmd() initFrostfsIDListGroupSubjectsCmd() initFrostfsIDSetKVCmd() + initFrostfsIDDeleteKVCmd() initFrostfsIDAddSubjectKeyCmd() initFrostfsIDRemoveSubjectKeyCmd() } From b2163ff44c584f1f00bfc78272492b6e33fa3132 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Tue, 11 Feb 2025 12:00:25 +0300 Subject: [PATCH 368/591] [#1614] adm: Allow use any wallets Signed-off-by: Alexander Chuprov --- cmd/frostfs-adm/internal/commonflags/flags.go | 4 + .../internal/modules/morph/ape/ape_util.go | 5 +- .../internal/modules/morph/helper/actor.go | 88 +++++++++++++++---- .../internal/modules/morph/helper/util.go | 24 ++++- .../internal/modules/morph/nns/domains.go | 2 + .../internal/modules/morph/nns/helper.go | 26 +++++- .../internal/modules/morph/nns/record.go | 3 + .../internal/modules/morph/nns/root.go | 5 ++ 8 files changed, 135 insertions(+), 22 deletions(-) diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go index 87692d013..c25d902c2 100644 --- a/cmd/frostfs-adm/internal/commonflags/flags.go +++ b/cmd/frostfs-adm/internal/commonflags/flags.go @@ -16,6 +16,10 @@ const ( EndpointFlagDesc = "N3 RPC node endpoint" EndpointFlagShort = "r" + WalletPath = "wallet" + WalletPathShorthand = "w" + WalletPathUsage = "Path to the wallet" + AlphabetWalletsFlag = "alphabet-wallets" AlphabetWalletsFlagDesc = "Path to alphabet wallets dir" diff --git a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go index 914682647..3c332c3f0 100644 --- a/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go +++ b/cmd/frostfs-adm/internal/modules/morph/ape/ape_util.go @@ -3,6 +3,8 @@ package ape import ( "errors" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" @@ -76,7 +78,8 @@ func newPolicyContractInterface(cmd *cobra.Command) (*morph.ContractStorage, *he c, err := helper.NewRemoteClient(viper.GetViper()) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - ac, err := helper.NewLocalActor(cmd, c, constants.ConsensusAccountName) + walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) + ac, err := helper.NewLocalActor(c, &helper.AlphabetWallets{Path: walletDir, Label: constants.ConsensusAccountName}) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) var ch util.Uint160 diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go index eb0444408..6499ace5f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/actor.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/actor.go @@ -3,9 +3,6 @@ package helper import ( "fmt" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" - commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" @@ -16,7 +13,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -28,32 +24,86 @@ type LocalActor struct { rpcInvoker invoker.RPCInvoke } +type AlphabetWallets struct { + Label string + Path string +} + +func (a *AlphabetWallets) GetAccount(v *viper.Viper) ([]*wallet.Account, error) { + w, err := GetAlphabetWallets(v, a.Path) + if err != nil { + return nil, err + } + + var accounts []*wallet.Account + for _, wall := range w { + acc, err := GetWalletAccount(wall, a.Label) + if err != nil { + return nil, err + } + accounts = append(accounts, acc) + } + return accounts, nil +} + +type RegularWallets struct{ Path string } + +func (r *RegularWallets) GetAccount() ([]*wallet.Account, error) { + w, err := getRegularWallet(r.Path) + if err != nil { + return nil, err + } + + return []*wallet.Account{w.GetAccount(w.GetChangeAddress())}, nil +} + // NewLocalActor create LocalActor with accounts form provided wallets. // In case of empty wallets provided created actor with dummy account only for read operation. // // If wallets are provided, the contract client will use accounts with accName name from these wallets. // To determine which account name should be used in a contract client, refer to how the contract // verifies the transaction signature. -func NewLocalActor(cmd *cobra.Command, c actor.RPCActor, accName string) (*LocalActor, error) { - walletDir := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) +func NewLocalActor(c actor.RPCActor, alphabet *AlphabetWallets, regularWallets ...*RegularWallets) (*LocalActor, error) { var act *actor.Actor var accounts []*wallet.Account + var signers []actor.SignerAccount - wallets, err := GetAlphabetWallets(viper.GetViper(), walletDir) - commonCmd.ExitOnErr(cmd, "unable to get alphabet wallets: %w", err) + if alphabet != nil { + account, err := alphabet.GetAccount(viper.GetViper()) + if err != nil { + return nil, err + } - for _, w := range wallets { - acc, err := GetWalletAccount(w, accName) - commonCmd.ExitOnErr(cmd, fmt.Sprintf("can't find %s account: %%w", accName), err) - accounts = append(accounts, acc) + accounts = append(accounts, account...) + signers = append(signers, actor.SignerAccount{ + Signer: transaction.Signer{ + Account: account[0].Contract.ScriptHash(), + Scopes: transaction.Global, + }, + Account: account[0], + }) } - act, err = actor.New(c, []actor.SignerAccount{{ - Signer: transaction.Signer{ - Account: accounts[0].Contract.ScriptHash(), - Scopes: transaction.Global, - }, - Account: accounts[0], - }}) + + for _, w := range regularWallets { + if w == nil { + continue + } + account, err := w.GetAccount() + if err != nil { + return nil, err + } + + accounts = append(accounts, account...) + signers = append(signers, actor.SignerAccount{ + Signer: transaction.Signer{ + Account: account[0].Contract.ScriptHash(), + Scopes: transaction.Global, + }, + Account: account[0], + }) + } + + act, err := actor.New(c, signers) if err != nil { return nil, err } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go index c26aa447b..be6b2c6dd 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/util.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" + "github.com/nspcc-dev/neo-go/cli/input" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" @@ -22,6 +23,27 @@ import ( "github.com/spf13/viper" ) +func getRegularWallet(walletPath string) (*wallet.Wallet, error) { + w, err := wallet.NewWalletFromFile(walletPath) + if err != nil { + return nil, err + } + + password, err := input.ReadPassword("Enter password for wallet:") + if err != nil { + return nil, fmt.Errorf("can't fetch password: %w", err) + } + + for i := range w.Accounts { + if err = w.Accounts[i].Decrypt(password, keys.NEP2ScryptParams()); err != nil { + err = fmt.Errorf("can't unlock wallet: %w", err) + break + } + } + + return w, err +} + func GetAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, error) { wallets, err := openAlphabetWallets(v, walletDir) if err != nil { @@ -51,7 +73,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er if errors.Is(err, os.ErrNotExist) { err = nil } else { - err = fmt.Errorf("can't open wallet: %w", err) + err = fmt.Errorf("can't open alphabet wallet: %w", err) } break } diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go index 1668bb327..e25d0db49 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go @@ -19,6 +19,7 @@ func initRegisterCmd() { registerCmd.Flags().Int64(nnsRetryFlag, constants.NNSRetryDefVal, "SOA record RETRY parameter") registerCmd.Flags().Int64(nnsExpireFlag, int64(constants.DefaultExpirationTime), "SOA record EXPIRE parameter") registerCmd.Flags().Int64(nnsTTLFlag, constants.NNSTtlDefVal, "SOA record TTL parameter") + registerCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(registerCmd.Flags(), nnsNameFlag) } @@ -48,6 +49,7 @@ func initDeleteCmd() { deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) + deleteCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag) } diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go index b13cbc8a1..5cf02e866 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go @@ -1,7 +1,11 @@ package nns import ( + "errors" + client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" @@ -16,7 +20,27 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { c, err := helper.NewRemoteClient(v) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - ac, err := helper.NewLocalActor(cmd, c, constants.CommitteeAccountName) + alphabetWalletPath := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) + walletPath := config.ResolveHomePath(viper.GetString(commonflags.WalletPath)) + + var ( + alphabet *helper.AlphabetWallets + regularWallets []*helper.RegularWallets + ) + + if alphabetWalletPath != "" { + alphabet = &helper.AlphabetWallets{Path: alphabetWalletPath, Label: constants.ConsensusAccountName} + } + + if walletPath != "" { + regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath}) + } + + if alphabet == nil && regularWallets == nil { + commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided")) + } + + ac, err := helper.NewLocalActor(c, alphabet, regularWallets...) commonCmd.ExitOnErr(cmd, "can't create actor: %w", err) r := management.NewReader(ac.Invoker) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go index 09ed92ab3..9cb47356f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go @@ -19,6 +19,7 @@ func initAddRecordCmd() { addRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) addRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) addRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) + addRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(addRecordCmd.Flags(), nnsRecordTypeFlag) @@ -40,6 +41,7 @@ func initDelRecordsCmd() { delRecordsCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) delRecordsCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordsCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) + delRecordsCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag) @@ -52,6 +54,7 @@ func initDelRecordCmd() { delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc) delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc) + delRecordCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag) _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag) diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go index 9bdeaccd9..64a279288 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go @@ -39,6 +39,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: registerDomain, } @@ -48,6 +49,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: deleteDomain, } @@ -75,6 +77,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: addRecord, } @@ -92,6 +95,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: delRecords, } @@ -101,6 +105,7 @@ var ( PreRun: func(cmd *cobra.Command, _ []string) { _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) }, Run: delRecord, } From 304bee938ba7fbd1ebbbef2b9996a0ae87950be6 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Tue, 11 Feb 2025 12:00:51 +0300 Subject: [PATCH 369/591] [#1614] adm/nns: Add 'set-admin' Signed-off-by: Alexander Chuprov --- cmd/frostfs-adm/internal/commonflags/flags.go | 3 +++ .../internal/modules/morph/nns/domains.go | 27 +++++++++++++++++++ .../internal/modules/morph/nns/helper.go | 9 +++++-- .../internal/modules/morph/nns/root.go | 12 +++++++++ 4 files changed, 49 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go index c25d902c2..f194e97f5 100644 --- a/cmd/frostfs-adm/internal/commonflags/flags.go +++ b/cmd/frostfs-adm/internal/commonflags/flags.go @@ -23,6 +23,9 @@ const ( AlphabetWalletsFlag = "alphabet-wallets" AlphabetWalletsFlagDesc = "Path to alphabet wallets dir" + AdminWalletPath = "wallet-admin" + AdminWalletUsage = "Path to the admin wallet" + LocalDumpFlag = "local-dump" ProtoConfigPath = "protocol" ContractsInitFlag = "contracts" diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go index e25d0db49..14f6eb390 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/domains.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go @@ -6,7 +6,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" + "github.com/spf13/viper" ) func initRegisterCmd() { @@ -64,3 +66,28 @@ func deleteDomain(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "delete domain error: %w", err) cmd.Println("Domain deleted successfully") } + +func initSetAdminCmd() { + Cmd.AddCommand(setAdminCmd) + setAdminCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + setAdminCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc) + setAdminCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc) + setAdminCmd.Flags().StringP(commonflags.WalletPath, commonflags.WalletPathShorthand, "", commonflags.WalletPathUsage) + setAdminCmd.Flags().String(commonflags.AdminWalletPath, "", commonflags.AdminWalletUsage) + _ = setAdminCmd.MarkFlagRequired(commonflags.AdminWalletPath) + + _ = cobra.MarkFlagRequired(setAdminCmd.Flags(), nnsNameFlag) +} + +func setAdmin(cmd *cobra.Command, _ []string) { + c, actor := nnsWriter(cmd) + + name, _ := cmd.Flags().GetString(nnsNameFlag) + w, err := wallet.NewWalletFromFile(viper.GetString(commonflags.AdminWalletPath)) + commonCmd.ExitOnErr(cmd, "can't get admin wallet: %w", err) + h, vub, err := c.SetAdmin(name, w.GetAccount(w.GetChangeAddress()).ScriptHash()) + + _, err = actor.Wait(h, vub, err) + commonCmd.ExitOnErr(cmd, "Set admin error: %w", err) + cmd.Println("Set admin successfully") +} diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go index 5cf02e866..e49f62256 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/helper.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/helper.go @@ -20,8 +20,9 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { c, err := helper.NewRemoteClient(v) commonCmd.ExitOnErr(cmd, "unable to create NEO rpc client: %w", err) - alphabetWalletPath := config.ResolveHomePath(viper.GetString(commonflags.AlphabetWalletsFlag)) - walletPath := config.ResolveHomePath(viper.GetString(commonflags.WalletPath)) + alphabetWalletPath := config.ResolveHomePath(v.GetString(commonflags.AlphabetWalletsFlag)) + walletPath := config.ResolveHomePath(v.GetString(commonflags.WalletPath)) + adminWalletPath := config.ResolveHomePath(v.GetString(commonflags.AdminWalletPath)) var ( alphabet *helper.AlphabetWallets @@ -36,6 +37,10 @@ func nnsWriter(cmd *cobra.Command) (*client.Contract, *helper.LocalActor) { regularWallets = append(regularWallets, &helper.RegularWallets{Path: walletPath}) } + if adminWalletPath != "" { + regularWallets = append(regularWallets, &helper.RegularWallets{Path: adminWalletPath}) + } + if alphabet == nil && regularWallets == nil { commonCmd.ExitOnErr(cmd, "", errors.New("no wallets provided")) } diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go index 64a279288..bb84933c6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go +++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go @@ -109,6 +109,17 @@ var ( }, Run: delRecord, } + setAdminCmd = &cobra.Command{ + Use: "set-admin", + Short: "Sets admin for domain", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag)) + _ = viper.BindPFlag(commonflags.WalletPath, cmd.Flags().Lookup(commonflags.WalletPath)) + _ = viper.BindPFlag(commonflags.AdminWalletPath, cmd.Flags().Lookup(commonflags.AdminWalletPath)) + }, + Run: setAdmin, + } ) func init() { @@ -121,4 +132,5 @@ func init() { initGetRecordsCmd() initDelRecordsCmd() initDelRecordCmd() + initSetAdminCmd() } From 1bcaa1af1f4ff32b7a7f7bdfbef1ca62954361a2 Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Mon, 10 Feb 2025 18:30:18 +0300 Subject: [PATCH 370/591] [#1641] govulncheck: Fix minor toolchain updates for good Signed-off-by: Vitaliy Potyarkin --- .forgejo/workflows/vulncheck.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index 7c89a3555..140434dfc 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -18,7 +18,8 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.23.6' + go-version: '1.23' + check-latest: true - name: Install govulncheck run: go install golang.org/x/vuln/cmd/govulncheck@latest From fe0cf86dc681e221706d9dcb4b25413bfdf065e9 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 12 Feb 2025 15:37:38 +0300 Subject: [PATCH 371/591] [#1643] go.mod: Bump frostfs-observability version To add `grpc_client_msg_send_handling_seconds` metric. Signed-off-by: Dmitrii Stepanov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8f2e7bc51..5df7a5cc1 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d - git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 + git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421 git.frostfs.info/TrueCloudLab/hrw v1.2.1 diff --git a/go.sum b/go.sum index e92b64886..39a6a98e6 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSV git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88 h1:9bvBDLApbbO5sXBKdODpE9tzy3HV99nXxkDWNn22rdI= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421 h1:pP19IawSdsLCKFv7HMNfWAeH6E3uSnntKZkwka+/2+4= From 9b29e7392fb960f7d3c923f17ce29d5475ef9562 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 18 Feb 2025 09:38:00 +0300 Subject: [PATCH 372/591] [#1647] go.mod: Bump frostfs-sdk-go version Signed-off-by: Dmitrii Stepanov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5df7a5cc1..23ddad276 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 diff --git a/go.sum b/go.sum index 39a6a98e6..ecd2ab525 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e13 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421 h1:pP19IawSdsLCKFv7HMNfWAeH6E3uSnntKZkwka+/2+4= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250202151421-8389887a3421/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4 h1:dOZHuOywvH1ms8U38lDCWpysgkCCeJ02RLI7zDhPcyw= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= From 02f3a7f65c0d649f29db04f5c49703e48621b95b Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Tue, 18 Feb 2025 10:51:43 +0300 Subject: [PATCH 373/591] [#1648] writecache: Fix race condition when reporting cache size metrics There is a race condition when multiple cache operation try to report the cache size metrics simultaneously. Consider the following example: - the initial total size of objects stored in the cache size is 2 - worker X deletes an object and reads the cache size, which is 1 - worker Y deletes an object and reads the cache size, which is 0 - worker Y reports the cache size it learnt, which is 0 - worker X reports the cache size it learnt, which is 1 As a result, the observed cache size is 1 (i. e. one object remains in the cache), which is incorrect because the actual cache size is 0. To fix this, let's report the metrics periodically in the flush loop. Signed-off-by: Aleksey Savchuk --- pkg/local_object_storage/writecache/flush.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index 3f9b36f9d..2d07d8b32 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -87,6 +87,9 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) { } c.modeMtx.RUnlock() + + // counter changed by fstree + c.estimateCacheSize() case <-ctx.Done(): return } From b2adf1109e380c364b8132ee7fa1947094c65c86 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Thu, 20 Feb 2025 12:27:07 +0300 Subject: [PATCH 374/591] [#1646] cli: Use Cmp() functions for ID-like structs Signed-off-by: Alexander Chuprov --- cmd/frostfs-cli/internal/client/client.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index ceae36ae7..3f235f070 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -9,7 +9,6 @@ import ( "io" "os" "slices" - "strings" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/accounting" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" @@ -77,9 +76,7 @@ func ListContainers(ctx context.Context, prm ListContainersPrm) (res ListContain // SortedIDList returns sorted list of identifiers of user's containers. func (x ListContainersRes) SortedIDList() []cid.ID { list := x.cliRes.Containers() - slices.SortFunc(list, func(lhs, rhs cid.ID) int { - return strings.Compare(lhs.EncodeToString(), rhs.EncodeToString()) - }) + slices.SortFunc(list, cid.ID.Cmp) return list } @@ -687,9 +684,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes return nil, fmt.Errorf("read object list: %w", err) } - slices.SortFunc(list, func(a, b oid.ID) int { - return strings.Compare(a.EncodeToString(), b.EncodeToString()) - }) + slices.SortFunc(list, oid.ID.Cmp) return &SearchObjectsRes{ ids: list, From 003d568ae2361e7b675091b2a0dff36b490a6013 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 30 Jan 2025 16:27:48 +0300 Subject: [PATCH 375/591] [#1628] innerring: Relax container homomorphic hashing check Our initial desire was to prohibit using homomorphic hashing on the network level because of the resource consumption. However, the ability to use it, doesn't mean that we must. So only fail validation if container wants to have homomorphic hashing, but the network prohibits it. Signed-off-by: Evgenii Stratonikov --- pkg/innerring/processors/container/process_container.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go index 854e2c779..8e4ab2623 100644 --- a/pkg/innerring/processors/container/process_container.go +++ b/pkg/innerring/processors/container/process_container.go @@ -209,7 +209,7 @@ func checkHomomorphicHashing(ctx context.Context, ns NetworkState, cnr container return fmt.Errorf("could not get setting in contract: %w", err) } - if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting != cnrSetting { + if cnrSetting := containerSDK.IsHomomorphicHashingDisabled(cnr); netSetting && !cnrSetting { return fmt.Errorf("network setting: %t, container setting: %t", netSetting, cnrSetting) } From 37972a91c19c6aa1b44c322bb5e485d553f031a9 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 24 Feb 2025 12:47:09 +0300 Subject: [PATCH 376/591] [#1654] adm: Make 'morph-init' idempotent Signed-off-by: Alexander Chuprov --- .../modules/morph/initialize/initialize_transfer.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go index 7f1bfee2b..d379204a6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go @@ -83,11 +83,14 @@ func transferFunds(c *helper.InitializeContext) error { // transferFundsFinished checks balances of accounts we transfer GAS to. // The stage is considered finished if the balance is greater than the half of what we need to transfer. func transferFundsFinished(c *helper.InitializeContext) (bool, error) { - acc := c.Accounts[0] - r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash) - res, err := r.BalanceOf(acc.Contract.ScriptHash()) - if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 { + res, err := r.BalanceOf(c.ConsensusAcc.ScriptHash()) + if err != nil { + return false, err + } + + version, err := c.Client.GetVersion() + if err != nil || res.Cmp(big.NewInt(int64(version.Protocol.InitialGasDistribution))) != -1 { return false, err } From 2ff032db90de1c1c2af24a9231eeafaabbe5d643 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 24 Feb 2025 13:22:23 +0300 Subject: [PATCH 377/591] [#1654] adm: Get 'gasInitialTotalSupply' from network Signed-off-by: Alexander Chuprov --- .../morph/initialize/initialize_transfer.go | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go index d379204a6..bb684b3a9 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go @@ -22,15 +22,14 @@ import ( ) const ( - gasInitialTotalSupply = 30000000 * native.GASFactor // initialAlphabetGASAmount represents the amount of GAS given to each alphabet node. initialAlphabetGASAmount = 10_000 * native.GASFactor // initialProxyGASAmount represents the amount of GAS given to a proxy contract. initialProxyGASAmount = 50_000 * native.GASFactor ) -func initialCommitteeGASAmount(c *helper.InitializeContext) int64 { - return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2 +func initialCommitteeGASAmount(c *helper.InitializeContext, initialGasDistribution int64) int64 { + return (initialGasDistribution - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2 } func transferFunds(c *helper.InitializeContext) error { @@ -42,6 +41,11 @@ func transferFunds(c *helper.InitializeContext) error { return err } + version, err := c.Client.GetVersion() + if err != nil { + return err + } + var transfers []transferTarget for _, acc := range c.Accounts { to := acc.Contract.ScriptHash() @@ -59,7 +63,7 @@ func transferFunds(c *helper.InitializeContext) error { transferTarget{ Token: gas.Hash, Address: c.CommitteeAcc.Contract.ScriptHash(), - Amount: initialCommitteeGASAmount(c), + Amount: initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)), }, transferTarget{ Token: neo.Hash, @@ -95,7 +99,11 @@ func transferFundsFinished(c *helper.InitializeContext) (bool, error) { } res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash()) - return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err + if err != nil { + return false, err + } + + return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c, int64(version.Protocol.InitialGasDistribution)))) == 1, err } func transferGASToProxy(c *helper.InitializeContext) error { From 9a0507704ae0b5069ab6555730fb758f71d04618 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Thu, 20 Feb 2025 17:47:28 +0300 Subject: [PATCH 378/591] [#1651] apemanager: Wrap some errors with `InvalidArgument` status Signed-off-by: Airat Arifullin --- pkg/services/apemanager/errors/errors.go | 6 ++++++ pkg/services/apemanager/executor.go | 14 +++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/pkg/services/apemanager/errors/errors.go b/pkg/services/apemanager/errors/errors.go index e64f9a8d1..1d485321c 100644 --- a/pkg/services/apemanager/errors/errors.go +++ b/pkg/services/apemanager/errors/errors.go @@ -9,3 +9,9 @@ func ErrAPEManagerAccessDenied(reason string) error { err.WriteReason(reason) return err } + +func ErrAPEManagerInvalidArgument(msg string) error { + err := new(apistatus.InvalidArgument) + err.SetMessage(msg) + return err +} diff --git a/pkg/services/apemanager/executor.go b/pkg/services/apemanager/executor.go index 58922fede..fc08fe569 100644 --- a/pkg/services/apemanager/executor.go +++ b/pkg/services/apemanager/executor.go @@ -81,7 +81,7 @@ var _ Server = (*Service)(nil) func (s *Service) validateContainerTargetRequest(ctx context.Context, cid string, pubKey *keys.PublicKey) error { var cidSDK cidSDK.ID if err := cidSDK.DecodeString(cid); err != nil { - return fmt.Errorf("invalid CID format: %w", err) + return apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid CID format: %v", err)) } isOwner, err := s.isActorContainerOwner(ctx, cidSDK, pubKey) if err != nil { @@ -101,7 +101,7 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques chain, err := decodeAndValidateChain(req.GetBody().GetChain().GetKind().(*apeV2.ChainRaw).GetRaw()) if err != nil { - return nil, err + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(err.Error()) } if len(chain.ID) == 0 { const randomIDLength = 10 @@ -122,7 +122,7 @@ func (s *Service) AddChain(ctx context.Context, req *apemanagerV2.AddChainReques } target = policy_engine.ContainerTarget(reqCID) default: - return nil, fmt.Errorf("unsupported target type: %s", targetType) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) } txHash, vub, err := s.contractStorage.AddMorphRuleChain(apechain.Ingress, target, &chain) @@ -158,7 +158,7 @@ func (s *Service) RemoveChain(ctx context.Context, req *apemanagerV2.RemoveChain } target = policy_engine.ContainerTarget(reqCID) default: - return nil, fmt.Errorf("unsupported target type: %s", targetType) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) } txHash, vub, err := s.contractStorage.RemoveMorphRuleChain(apechain.Ingress, target, req.GetBody().GetChainID()) @@ -193,7 +193,7 @@ func (s *Service) ListChains(ctx context.Context, req *apemanagerV2.ListChainsRe } target = policy_engine.ContainerTarget(reqCID) default: - return nil, fmt.Errorf("unsupported target type: %s", targetType) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("unsupported target type: %s", targetType)) } chs, err := s.contractStorage.ListMorphRuleChains(apechain.Ingress, target) @@ -227,11 +227,11 @@ func getSignaturePublicKey(vh *session.RequestVerificationHeader) (*keys.PublicK } sig := vh.GetBodySignature() if sig == nil { - return nil, errEmptyBodySignature + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(errEmptyBodySignature.Error()) } key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256()) if err != nil { - return nil, fmt.Errorf("invalid signature key: %w", err) + return nil, apemanager_errors.ErrAPEManagerInvalidArgument(fmt.Sprintf("invalid signature key: %v", err)) } return key, nil From a97bded440c345c5212f7aaa9c9a89785b22cd32 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Thu, 6 Feb 2025 11:09:33 +0300 Subject: [PATCH 379/591] [#1639] config: Separate `replicator.pool_size` from other settings Separated `replicator.pool_size` and `object.put.remote_pool_size` settings. Signed-off-by: Aleksey Savchuk --- cmd/frostfs-node/config.go | 4 ---- cmd/frostfs-node/config/replicator/config.go | 11 ++++++++++- cmd/frostfs-node/config/replicator/config_test.go | 2 +- docs/storage-node-configuration.md | 8 ++++---- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 511777566..d575c7228 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1177,10 +1177,6 @@ func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) { fatalOnErr(err) replicatorPoolSize := replicatorconfig.PoolSize(cfg) - if replicatorPoolSize <= 0 { - replicatorPoolSize = putRemoteCapacity - } - pool.replication, err = ants.NewPool(replicatorPoolSize) fatalOnErr(err) diff --git a/cmd/frostfs-node/config/replicator/config.go b/cmd/frostfs-node/config/replicator/config.go index 0fbac935c..e954bf19d 100644 --- a/cmd/frostfs-node/config/replicator/config.go +++ b/cmd/frostfs-node/config/replicator/config.go @@ -11,6 +11,8 @@ const ( // PutTimeoutDefault is a default timeout of object put request in replicator. PutTimeoutDefault = 5 * time.Second + // PoolSizeDefault is a default pool size for put request in replicator. + PoolSizeDefault = 10 ) // PutTimeout returns the value of "put_timeout" config parameter @@ -28,6 +30,13 @@ func PutTimeout(c *config.Config) time.Duration { // PoolSize returns the value of "pool_size" config parameter // from "replicator" section. +// +// Returns PoolSizeDefault if the value is non-positive integer. func PoolSize(c *config.Config) int { - return int(config.IntSafe(c.Sub(subsection), "pool_size")) + v := int(config.IntSafe(c.Sub(subsection), "pool_size")) + if v > 0 { + return v + } + + return PoolSizeDefault } diff --git a/cmd/frostfs-node/config/replicator/config_test.go b/cmd/frostfs-node/config/replicator/config_test.go index 2129c01b4..2aa490946 100644 --- a/cmd/frostfs-node/config/replicator/config_test.go +++ b/cmd/frostfs-node/config/replicator/config_test.go @@ -15,7 +15,7 @@ func TestReplicatorSection(t *testing.T) { empty := configtest.EmptyConfig() require.Equal(t, replicatorconfig.PutTimeoutDefault, replicatorconfig.PutTimeout(empty)) - require.Equal(t, 0, replicatorconfig.PoolSize(empty)) + require.Equal(t, replicatorconfig.PoolSizeDefault, replicatorconfig.PoolSize(empty)) }) const path = "../../../../config/example/node" diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index de2729c68..aef05d589 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -396,10 +396,10 @@ replicator: pool_size: 10 ``` -| Parameter | Type | Default value | Description | -|---------------|------------|----------------------------------------|---------------------------------------------| -| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. | -| `pool_size` | `int` | Equal to `object.put.remote_pool_size` | Maximum amount of concurrent replications. | +| Parameter | Type | Default value | Description | +|---------------|------------|---------------|---------------------------------------------| +| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. | +| `pool_size` | `int` | `10` | Maximum amount of concurrent replications. | # `object` section Contains object-service related parameters. From dce269c62e555a08eb6e768b8fb25d7307564991 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Thu, 6 Feb 2025 11:50:12 +0300 Subject: [PATCH 380/591] [#1639] services/object: Remove limiting pools for Put operation Signed-off-by: Aleksey Savchuk --- cmd/frostfs-node/config.go | 22 +----------- cmd/frostfs-node/config/object/config.go | 28 --------------- cmd/frostfs-node/config/object/config_test.go | 4 --- cmd/frostfs-node/object.go | 1 - config/example/node.env | 2 -- config/example/node.json | 2 -- config/example/node.yaml | 2 -- docs/storage-node-configuration.md | 4 --- internal/logs/logs.go | 1 - pkg/services/object/common/writer/common.go | 10 ++---- pkg/services/object/common/writer/ec.go | 36 ++----------------- pkg/services/object/common/writer/ec_test.go | 5 --- pkg/services/object/common/writer/writer.go | 16 --------- pkg/services/object/put/service.go | 3 -- pkg/services/object/util/log.go | 8 ----- 15 files changed, 7 insertions(+), 137 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index d575c7228..43ec40d1d 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -664,10 +664,6 @@ type cfgAccessPolicyEngine struct { } type cfgObjectRoutines struct { - putRemote *ants.Pool - - putLocal *ants.Pool - replication *ants.Pool } @@ -1166,16 +1162,6 @@ func initAccessPolicyEngine(ctx context.Context, c *cfg) { func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) { var err error - optNonBlocking := ants.WithNonblocking(true) - - putRemoteCapacity := objectconfig.Put(cfg).PoolSizeRemote() - pool.putRemote, err = ants.NewPool(putRemoteCapacity, optNonBlocking) - fatalOnErr(err) - - putLocalCapacity := objectconfig.Put(cfg).PoolSizeLocal() - pool.putLocal, err = ants.NewPool(putLocalCapacity, optNonBlocking) - fatalOnErr(err) - replicatorPoolSize := replicatorconfig.PoolSize(cfg) pool.replication, err = ants.NewPool(replicatorPoolSize) fatalOnErr(err) @@ -1410,13 +1396,7 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { } func (c *cfg) reloadPools() error { - newSize := objectconfig.Put(c.appCfg).PoolSizeLocal() - c.reloadPool(c.cfgObject.pool.putLocal, newSize, "object.put.local_pool_size") - - newSize = objectconfig.Put(c.appCfg).PoolSizeRemote() - c.reloadPool(c.cfgObject.pool.putRemote, newSize, "object.put.remote_pool_size") - - newSize = replicatorconfig.PoolSize(c.appCfg) + newSize := replicatorconfig.PoolSize(c.appCfg) c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size") return nil diff --git a/cmd/frostfs-node/config/object/config.go b/cmd/frostfs-node/config/object/config.go index 6ff1fe2ab..c8c967d30 100644 --- a/cmd/frostfs-node/config/object/config.go +++ b/cmd/frostfs-node/config/object/config.go @@ -21,10 +21,6 @@ const ( putSubsection = "put" getSubsection = "get" - - // PutPoolSizeDefault is a default value of routine pool size to - // process object.Put requests in object service. - PutPoolSizeDefault = 10 ) // Put returns structure that provides access to "put" subsection of @@ -35,30 +31,6 @@ func Put(c *config.Config) PutConfig { } } -// PoolSizeRemote returns the value of "remote_pool_size" config parameter. -// -// Returns PutPoolSizeDefault if the value is not a positive number. -func (g PutConfig) PoolSizeRemote() int { - v := config.Int(g.cfg, "remote_pool_size") - if v > 0 { - return int(v) - } - - return PutPoolSizeDefault -} - -// PoolSizeLocal returns the value of "local_pool_size" config parameter. -// -// Returns PutPoolSizeDefault if the value is not a positive number. -func (g PutConfig) PoolSizeLocal() int { - v := config.Int(g.cfg, "local_pool_size") - if v > 0 { - return int(v) - } - - return PutPoolSizeDefault -} - // SkipSessionTokenIssuerVerification returns the value of "skip_session_token_issuer_verification" config parameter or `false“ if is not defined. func (g PutConfig) SkipSessionTokenIssuerVerification() bool { return config.BoolSafe(g.cfg, "skip_session_token_issuer_verification") diff --git a/cmd/frostfs-node/config/object/config_test.go b/cmd/frostfs-node/config/object/config_test.go index e2bb105d9..1c525ef55 100644 --- a/cmd/frostfs-node/config/object/config_test.go +++ b/cmd/frostfs-node/config/object/config_test.go @@ -13,8 +13,6 @@ func TestObjectSection(t *testing.T) { t.Run("defaults", func(t *testing.T) { empty := configtest.EmptyConfig() - require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeRemote()) - require.Equal(t, objectconfig.PutPoolSizeDefault, objectconfig.Put(empty).PoolSizeLocal()) require.EqualValues(t, objectconfig.DefaultTombstoneLifetime, objectconfig.TombstoneLifetime(empty)) require.False(t, objectconfig.Put(empty).SkipSessionTokenIssuerVerification()) }) @@ -22,8 +20,6 @@ func TestObjectSection(t *testing.T) { const path = "../../../../config/example/node" fileConfigTest := func(c *config.Config) { - require.Equal(t, 100, objectconfig.Put(c).PoolSizeRemote()) - require.Equal(t, 200, objectconfig.Put(c).PoolSizeLocal()) require.EqualValues(t, 10, objectconfig.TombstoneLifetime(c)) require.True(t, objectconfig.Put(c).SkipSessionTokenIssuerVerification()) } diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 40d3cc1cd..ad6f4140a 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -326,7 +326,6 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche c, c.cfgNetmap.state, irFetcher, - objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal), objectwriter.WithLogger(c.log), objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification), ) diff --git a/config/example/node.env b/config/example/node.env index 2ba432b1b..aa3c72a91 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -87,8 +87,6 @@ FROSTFS_REPLICATOR_POOL_SIZE=10 FROSTFS_CONTAINER_LIST_STREAM_BATCH_SIZE=500 # Object service section -FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100 -FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200 FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10 FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE" diff --git a/config/example/node.json b/config/example/node.json index cfde8bcc7..afa815bc3 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -134,8 +134,6 @@ "tombstone_lifetime": 10 }, "put": { - "remote_pool_size": 100, - "local_pool_size": 200, "skip_session_token_issuer_verification": true }, "get": { diff --git a/config/example/node.yaml b/config/example/node.yaml index 1f8ec843d..f63cc514b 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -117,8 +117,6 @@ object: delete: tombstone_lifetime: 10 # tombstone "local" lifetime in epochs put: - remote_pool_size: 100 # number of async workers for remote PUT operations - local_pool_size: 200 # number of async workers for local PUT operations skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true get: priority: # list of metrics of nodes for prioritization diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index aef05d589..b2ab75b7e 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -406,8 +406,6 @@ Contains object-service related parameters. ```yaml object: - put: - remote_pool_size: 100 get: priority: - $attribute:ClusterName @@ -416,8 +414,6 @@ object: | Parameter | Type | Default value | Description | |-----------------------------|------------|---------------|------------------------------------------------------------------------------------------------| | `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. | -| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. | -| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. | | `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. | # `runtime` section diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 6a72644e5..d48a4da9b 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -125,7 +125,6 @@ const ( SearchCouldNotWriteObjectIdentifiers = "could not write object identifiers" SearchLocalOperationFailed = "local operation failed" UtilObjectServiceError = "object service error" - UtilCouldNotPushTaskToWorkerPool = "could not push task to worker pool" V2CantCheckIfRequestFromInnerRing = "can't check if request from inner ring" V2CantCheckIfRequestFromContainerNode = "can't check if request from container node" ClientCouldNotRestoreBlockSubscriptionAfterRPCSwitch = "could not restore block subscription after RPC switch" diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go index dae168baf..1998e9638 100644 --- a/pkg/services/object/common/writer/common.go +++ b/pkg/services/object/common/writer/common.go @@ -79,11 +79,11 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. continue } - workerPool, isLocal := n.cfg.getWorkerPool(addr.PublicKey()) + isLocal := n.cfg.NetmapKeys.IsLocalKey(addr.PublicKey()) item := new(bool) wg.Add(1) - if err := workerPool.Submit(func() { + go func() { defer wg.Done() err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr}) @@ -95,11 +95,7 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. traverser.SubmitSuccess() *item = true - }); err != nil { - wg.Done() - svcutil.LogWorkerPoolError(ctx, n.cfg.Logger, "PUT", err) - return true - } + }() // Mark the container node as processed in order to exclude it // in subsequent container broadcast. Note that we don't diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go index 8f269ec21..26a53e315 100644 --- a/pkg/services/object/common/writer/ec.go +++ b/pkg/services/object/common/writer/ec.go @@ -149,17 +149,7 @@ func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) } - completed := make(chan interface{}) - if poolErr := e.Config.RemotePool.Submit(func() { - defer close(completed) - err = e.Relay(ctx, info, c) - }); poolErr != nil { - close(completed) - svcutil.LogWorkerPoolError(ctx, e.Config.Logger, "PUT", poolErr) - return poolErr - } - <-completed - + err = e.Relay(ctx, info, c) if err == nil { return nil } @@ -343,21 +333,11 @@ func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, n } func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error { - var err error localTarget := LocalTarget{ Storage: e.Config.LocalStore, Container: e.Container, } - completed := make(chan interface{}) - if poolErr := e.Config.LocalPool.Submit(func() { - defer close(completed) - err = localTarget.WriteObject(ctx, obj, e.ObjectMeta) - }); poolErr != nil { - close(completed) - return poolErr - } - <-completed - return err + return localTarget.WriteObject(ctx, obj, e.ObjectMeta) } func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error { @@ -371,15 +351,5 @@ func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, n nodeInfo: clientNodeInfo, } - var err error - completed := make(chan interface{}) - if poolErr := e.Config.RemotePool.Submit(func() { - defer close(completed) - err = remoteTaget.WriteObject(ctx, obj, e.ObjectMeta) - }); poolErr != nil { - close(completed) - return poolErr - } - <-completed - return err + return remoteTaget.WriteObject(ctx, obj, e.ObjectMeta) } diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go index b7764661f..2458e352f 100644 --- a/pkg/services/object/common/writer/ec_test.go +++ b/pkg/services/object/common/writer/ec_test.go @@ -31,7 +31,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/panjf2000/ants/v2" "github.com/stretchr/testify/require" ) @@ -131,9 +130,6 @@ func TestECWriter(t *testing.T) { nodeKey, err := keys.NewPrivateKey() require.NoError(t, err) - pool, err := ants.NewPool(4, ants.WithNonblocking(true)) - require.NoError(t, err) - log, err := logger.NewLogger(nil) require.NoError(t, err) @@ -141,7 +137,6 @@ func TestECWriter(t *testing.T) { ecw := ECWriter{ Config: &Config{ NetmapKeys: n, - RemotePool: pool, Logger: log, ClientConstructor: clientConstructor{vectors: ns}, KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil), diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go index adaf1945b..d3d2b41b4 100644 --- a/pkg/services/object/common/writer/writer.go +++ b/pkg/services/object/common/writer/writer.go @@ -12,7 +12,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy" objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -52,8 +51,6 @@ type Config struct { NetmapSource netmap.Source - RemotePool, LocalPool util.WorkerPool - NetmapKeys netmap.AnnouncedKeys FormatValidator *object.FormatValidator @@ -69,12 +66,6 @@ type Config struct { type Option func(*Config) -func WithWorkerPools(remote, local util.WorkerPool) Option { - return func(c *Config) { - c.RemotePool, c.LocalPool = remote, local - } -} - func WithLogger(l *logger.Logger) Option { return func(c *Config) { c.Logger = l @@ -87,13 +78,6 @@ func WithVerifySessionTokenIssuer(v bool) Option { } } -func (c *Config) getWorkerPool(pub []byte) (util.WorkerPool, bool) { - if c.NetmapKeys.IsLocalKey(pub) { - return c.LocalPool, true - } - return c.RemotePool, false -} - type Params struct { Config *Config diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go index 5cc0a5722..099486b3f 100644 --- a/pkg/services/object/put/service.go +++ b/pkg/services/object/put/service.go @@ -6,7 +6,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" ) @@ -27,8 +26,6 @@ func NewService(ks *objutil.KeyStorage, opts ...objectwriter.Option, ) *Service { c := &objectwriter.Config{ - RemotePool: util.NewPseudoWorkerPool(), - LocalPool: util.NewPseudoWorkerPool(), Logger: logger.NewLoggerWrapper(zap.L()), KeyStorage: ks, ClientConstructor: cc, diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go index 2c1e053ac..b10826226 100644 --- a/pkg/services/object/util/log.go +++ b/pkg/services/object/util/log.go @@ -17,11 +17,3 @@ func LogServiceError(ctx context.Context, l *logger.Logger, req string, node net zap.Error(err), ) } - -// LogWorkerPoolError writes debug error message of object worker pool to provided logger. -func LogWorkerPoolError(ctx context.Context, l *logger.Logger, req string, err error) { - l.Error(ctx, logs.UtilCouldNotPushTaskToWorkerPool, - zap.String("request", req), - zap.Error(err), - ) -} From f0b2017057841693612ef0e8239443afd230ce47 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Thu, 20 Feb 2025 11:18:02 +0300 Subject: [PATCH 381/591] [#1639] go.mod: Update sdk-go and qos Signed-off-by: Aleksey Savchuk --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 23ddad276..2bfc3abfe 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 - git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe + git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250213125059-356851eed3bf git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 diff --git a/go.sum b/go.sum index ecd2ab525..4a7dfd4dc 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250213125059-356851eed3bf h1:ik2aMBpTJJpoZe2ffcGShXRkrvny65NEPLVt67KmH/A= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250213125059-356851eed3bf/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4 h1:dOZHuOywvH1ms8U38lDCWpysgkCCeJ02RLI7zDhPcyw= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= From 55908865996def33858ca6e7274d0e0e892a9bf1 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Fri, 7 Feb 2025 17:23:10 +0300 Subject: [PATCH 382/591] [#1639] qos: Add interceptors for limiting active RPCs Signed-off-by: Aleksey Savchuk --- internal/qos/grpc.go | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/internal/qos/grpc.go b/internal/qos/grpc.go index c253f1e9d..534a1f74b 100644 --- a/internal/qos/grpc.go +++ b/internal/qos/grpc.go @@ -3,7 +3,9 @@ package qos import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" "google.golang.org/grpc" ) @@ -49,3 +51,36 @@ func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientIntercepto return streamer(ctx, desc, cc, method, opts...) } } + +func NewMaxActiveRPCLimiterUnaryServerInterceptor(getLimiter func() limiting.Limiter) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == IOTagCritical.String() { + return handler(ctx, req) + } + + release, ok := getLimiter().Acquire(info.FullMethod) + if !ok { + return nil, new(apistatus.ResourceExhausted) + } + defer release() + + return handler(ctx, req) + } +} + +//nolint:contextcheck (grpc.ServerStream manages the context itself) +func NewMaxActiveRPCLimiterStreamServerInterceptor(getLimiter func() limiting.Limiter) grpc.StreamServerInterceptor { + return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if tag, ok := tagging.IOTagFromContext(ss.Context()); ok && tag == IOTagCritical.String() { + return handler(srv, ss) + } + + release, ok := getLimiter().Acquire(info.FullMethod) + if !ok { + return new(apistatus.ResourceExhausted) + } + defer release() + + return handler(srv, ss) + } +} From dae0949f6eb8af3677f6552f52a00c2fdaa6bf79 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Fri, 7 Feb 2025 15:17:37 +0300 Subject: [PATCH 383/591] [#1639] node: Support active RPC limiting - Allow configuration of active RPC limits for method groups - Apply RPC limiting for all services except the control service Signed-off-by: Aleksey Savchuk --- cmd/frostfs-node/config.go | 40 ++++++++++++-- cmd/frostfs-node/config/rpc/config.go | 43 +++++++++++++++ cmd/frostfs-node/config/rpc/config_test.go | 53 +++++++++++++++++++ cmd/frostfs-node/config/rpc/testdata/node.env | 3 ++ .../config/rpc/testdata/node.json | 18 +++++++ .../config/rpc/testdata/node.yaml | 8 +++ cmd/frostfs-node/grpc.go | 4 ++ config/example/node.env | 5 ++ config/example/node.json | 17 ++++++ config/example/node.yaml | 10 ++++ docs/storage-node-configuration.md | 21 ++++++++ 11 files changed, 217 insertions(+), 5 deletions(-) create mode 100644 cmd/frostfs-node/config/rpc/config.go create mode 100644 cmd/frostfs-node/config/rpc/config_test.go create mode 100644 cmd/frostfs-node/config/rpc/testdata/node.env create mode 100644 cmd/frostfs-node/config/rpc/testdata/node.json create mode 100644 cmd/frostfs-node/config/rpc/testdata/node.yaml diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 43ec40d1d..a7aeedc21 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -29,6 +29,7 @@ import ( nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" + rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc" tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" @@ -69,6 +70,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/state" "git.frostfs.info/TrueCloudLab/frostfs-observability/logging/lokicore" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" netmapV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -528,6 +530,8 @@ type cfgGRPC struct { maxChunkSize uint64 maxAddrAmount uint64 reconnectTimeout time.Duration + + limiter atomic.Pointer[limiting.SemaphoreLimiter] } func (c *cfgGRPC) append(e string, l net.Listener, s *grpc.Server) { @@ -717,7 +721,7 @@ func initCfg(appCfg *config.Config) *cfg { c.cfgNetmap = initNetmap(appCfg, netState, relayOnly) - c.cfgGRPC = initCfgGRPC() + c.cfgGRPC = initCfgGRPC(appCfg) c.cfgMorph = cfgMorph{ proxyScriptHash: contractsconfig.Proxy(appCfg), @@ -848,14 +852,23 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID { } } -func initCfgGRPC() cfgGRPC { +func initCfgGRPC(appCfg *config.Config) (cfg cfgGRPC) { maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes - return cfgGRPC{ - maxChunkSize: maxChunkSize, - maxAddrAmount: maxAddrAmount, + var limits []limiting.KeyLimit + for _, l := range rpcconfig.Limits(appCfg) { + limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps}) } + + limiter, err := limiting.NewSemaphoreLimiter(limits) + fatalOnErr(err) + + cfg.maxChunkSize = maxChunkSize + cfg.maxAddrAmount = maxAddrAmount + cfg.limiter.Store(limiter) + + return } func initCfgObject(appCfg *config.Config) cfgObject { @@ -1392,9 +1405,26 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }}) } + components = append(components, dCmp{"rpc_limiter", c.reloadLimits}) + return components } +func (c *cfg) reloadLimits() error { + var limits []limiting.KeyLimit + for _, l := range rpcconfig.Limits(c.appCfg) { + limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps}) + } + + limiter, err := limiting.NewSemaphoreLimiter(limits) + if err != nil { + return err + } + + c.cfgGRPC.limiter.Store(limiter) + return nil +} + func (c *cfg) reloadPools() error { newSize := replicatorconfig.PoolSize(c.appCfg) c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size") diff --git a/cmd/frostfs-node/config/rpc/config.go b/cmd/frostfs-node/config/rpc/config.go new file mode 100644 index 000000000..197990d07 --- /dev/null +++ b/cmd/frostfs-node/config/rpc/config.go @@ -0,0 +1,43 @@ +package rpcconfig + +import ( + "strconv" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" +) + +const ( + subsection = "rpc" + limitsSubsection = "limits" +) + +type LimitConfig struct { + Methods []string + MaxOps int64 +} + +// Limits returns the "limits" config from "rpc" section. +func Limits(c *config.Config) []LimitConfig { + c = c.Sub(subsection).Sub(limitsSubsection) + + var limits []LimitConfig + + for i := uint64(0); ; i++ { + si := strconv.FormatUint(i, 10) + sc := c.Sub(si) + + methods := config.StringSliceSafe(sc, "methods") + if len(methods) == 0 { + break + } + + maxOps := config.IntSafe(sc, "max_ops") + if maxOps == 0 { + panic("no max operations for method group") + } + + limits = append(limits, LimitConfig{methods, maxOps}) + } + + return limits +} diff --git a/cmd/frostfs-node/config/rpc/config_test.go b/cmd/frostfs-node/config/rpc/config_test.go new file mode 100644 index 000000000..31a837cee --- /dev/null +++ b/cmd/frostfs-node/config/rpc/config_test.go @@ -0,0 +1,53 @@ +package rpcconfig + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "github.com/stretchr/testify/require" +) + +func TestRPCSection(t *testing.T) { + t.Run("defaults", func(t *testing.T) { + require.Empty(t, Limits(configtest.EmptyConfig())) + }) + + t.Run("correct config", func(t *testing.T) { + const path = "../../../../config/example/node" + + fileConfigTest := func(c *config.Config) { + limits := Limits(c) + require.Len(t, limits, 2) + + limit0 := limits[0] + limit1 := limits[1] + + require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"}) + require.Equal(t, limit0.MaxOps, int64(1000)) + + require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"}) + require.Equal(t, limit1.MaxOps, int64(10000)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) + }) + + t.Run("no max operations", func(t *testing.T) { + const path = "testdata/node" + + fileConfigTest := func(c *config.Config) { + require.Panics(t, func() { _ = Limits(c) }) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) + }) +} diff --git a/cmd/frostfs-node/config/rpc/testdata/node.env b/cmd/frostfs-node/config/rpc/testdata/node.env new file mode 100644 index 000000000..2fed4c5bc --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/node.env @@ -0,0 +1,3 @@ +FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" +FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" +FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/node.json b/cmd/frostfs-node/config/rpc/testdata/node.json new file mode 100644 index 000000000..6156aa71d --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/node.json @@ -0,0 +1,18 @@ +{ + "rpc": { + "limits": [ + { + "methods": [ + "/neo.fs.v2.object.ObjectService/PutSingle", + "/neo.fs.v2.object.ObjectService/Put" + ] + }, + { + "methods": [ + "/neo.fs.v2.object.ObjectService/Get" + ], + "max_ops": 10000 + } + ] + } +} diff --git a/cmd/frostfs-node/config/rpc/testdata/node.yaml b/cmd/frostfs-node/config/rpc/testdata/node.yaml new file mode 100644 index 000000000..e50b7ae93 --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/node.yaml @@ -0,0 +1,8 @@ +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go index 4d679e4cc..e1a273ce4 100644 --- a/cmd/frostfs-node/grpc.go +++ b/cmd/frostfs-node/grpc.go @@ -9,9 +9,11 @@ import ( grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" qos "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "go.uber.org/zap" "google.golang.org/grpc" @@ -134,11 +136,13 @@ func getGrpcServerOpts(ctx context.Context, c *cfg, sc *grpcconfig.Config) ([]gr qos.NewUnaryServerInterceptor(), metrics.NewUnaryServerInterceptor(), tracing.NewUnaryServerInterceptor(), + qosInternal.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }), ), grpc.ChainStreamInterceptor( qos.NewStreamServerInterceptor(), metrics.NewStreamServerInterceptor(), tracing.NewStreamServerInterceptor(), + qosInternal.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return c.cfgGRPC.limiter.Load() }), ), } diff --git a/config/example/node.env b/config/example/node.env index aa3c72a91..2ebef181a 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -91,6 +91,11 @@ FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10 FROSTFS_OBJECT_GET_PRIORITY="$attribute:ClusterName $attribute:UN-LOCODE" +FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" +FROSTFS_RPC_LIMITS_0_MAX_OPS=1000 +FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" +FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 + # Storage engine section FROSTFS_STORAGE_SHARD_POOL_SIZE=15 FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100 diff --git a/config/example/node.json b/config/example/node.json index afa815bc3..0ed72effc 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -140,6 +140,23 @@ "priority": ["$attribute:ClusterName", "$attribute:UN-LOCODE"] } }, + "rpc": { + "limits": [ + { + "methods": [ + "/neo.fs.v2.object.ObjectService/PutSingle", + "/neo.fs.v2.object.ObjectService/Put" + ], + "max_ops": 1000 + }, + { + "methods": [ + "/neo.fs.v2.object.ObjectService/Get" + ], + "max_ops": 10000 + } + ] + }, "storage": { "shard_pool_size": 15, "shard_ro_error_threshold": 100, diff --git a/config/example/node.yaml b/config/example/node.yaml index f63cc514b..6b810653e 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -123,6 +123,16 @@ object: - $attribute:ClusterName - $attribute:UN-LOCODE +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + max_ops: 1000 + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 + storage: # note: shard configuration can be omitted for relay node (see `node.relay`) shard_pool_size: 15 # size of per-shard worker pools used for PUT operations diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index b2ab75b7e..1eb5437ba 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -416,6 +416,27 @@ object: | `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. | | `get.priority` | `[]string` | | List of metrics of nodes for prioritization. Used for computing response on GET requests. | + +# `rpc` section +Contains limits on the number of active RPC for specified method(s). + +```yaml +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + max_ops: 1000 + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 +``` + +| Parameter | Type | Default value | Description | +|------------------|------------|---------------|--------------------------------------------------------------| +| `limits.max_ops` | `int` | | Maximum number of active RPC allowed for the given method(s) | +| `limits.methods` | `[]string` | | List of RPC methods sharing the given limit | + # `runtime` section Contains runtime parameters. From 92ab58984b6bbe7722ef8306ca9718e9543d5d9c Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Fri, 28 Feb 2025 15:07:24 +0300 Subject: [PATCH 384/591] [#1658] node: Simplify RPC limiter initialization - Move all initialization logic to one place - Initialize the limiter after all RPC services are registered to be able to validate that configured limits match the methods registered earlier Signed-off-by: Aleksey Savchuk --- cmd/frostfs-node/config.go | 31 +++---------------------------- cmd/frostfs-node/grpc.go | 17 +++++++++++++++++ cmd/frostfs-node/main.go | 2 ++ 3 files changed, 22 insertions(+), 28 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index a7aeedc21..3e1bfb4f2 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -29,7 +29,6 @@ import ( nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" - rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc" tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" @@ -721,7 +720,7 @@ func initCfg(appCfg *config.Config) *cfg { c.cfgNetmap = initNetmap(appCfg, netState, relayOnly) - c.cfgGRPC = initCfgGRPC(appCfg) + c.cfgGRPC = initCfgGRPC() c.cfgMorph = cfgMorph{ proxyScriptHash: contractsconfig.Proxy(appCfg), @@ -852,21 +851,12 @@ func initFrostfsID(appCfg *config.Config) cfgFrostfsID { } } -func initCfgGRPC(appCfg *config.Config) (cfg cfgGRPC) { +func initCfgGRPC() (cfg cfgGRPC) { maxChunkSize := uint64(maxMsgSize) * 3 / 4 // 25% to meta, 75% to payload maxAddrAmount := maxChunkSize / addressSize // each address is about 72 bytes - var limits []limiting.KeyLimit - for _, l := range rpcconfig.Limits(appCfg) { - limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps}) - } - - limiter, err := limiting.NewSemaphoreLimiter(limits) - fatalOnErr(err) - cfg.maxChunkSize = maxChunkSize cfg.maxAddrAmount = maxAddrAmount - cfg.limiter.Store(limiter) return } @@ -1405,26 +1395,11 @@ func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }}) } - components = append(components, dCmp{"rpc_limiter", c.reloadLimits}) + components = append(components, dCmp{"rpc_limiter", func() error { return initRPCLimiter(c) }}) return components } -func (c *cfg) reloadLimits() error { - var limits []limiting.KeyLimit - for _, l := range rpcconfig.Limits(c.appCfg) { - limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps}) - } - - limiter, err := limiting.NewSemaphoreLimiter(limits) - if err != nil { - return err - } - - c.cfgGRPC.limiter.Store(limiter) - return nil -} - func (c *cfg) reloadPools() error { newSize := replicatorconfig.PoolSize(c.appCfg) c.reloadPool(c.cfgObject.pool.replication, newSize, "replicator.pool_size") diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go index e1a273ce4..9ea3dd8a2 100644 --- a/cmd/frostfs-node/grpc.go +++ b/cmd/frostfs-node/grpc.go @@ -4,10 +4,12 @@ import ( "context" "crypto/tls" "errors" + "fmt" "net" "time" grpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/grpc" + rpcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/rpc" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" qosInternal "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" @@ -231,3 +233,18 @@ func stopGRPC(ctx context.Context, name string, s *grpc.Server, l *logger.Logger l.Info(ctx, logs.FrostFSNodeGRPCServerStoppedSuccessfully) } + +func initRPCLimiter(c *cfg) error { + var limits []limiting.KeyLimit + for _, l := range rpcconfig.Limits(c.appCfg) { + limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps}) + } + + limiter, err := limiting.NewSemaphoreLimiter(limits) + if err != nil { + return fmt.Errorf("create RPC limiter: %w", err) + } + + c.cfgGRPC.limiter.Store(limiter) + return nil +} diff --git a/cmd/frostfs-node/main.go b/cmd/frostfs-node/main.go index b53bd8823..0228d2a10 100644 --- a/cmd/frostfs-node/main.go +++ b/cmd/frostfs-node/main.go @@ -117,6 +117,8 @@ func initApp(ctx context.Context, c *cfg) { initAndLog(ctx, c, "apemanager", initAPEManagerService) initAndLog(ctx, c, "control", func(c *cfg) { initControlService(ctx, c) }) + initAndLog(ctx, c, "RPC limiter", func(c *cfg) { fatalOnErr(initRPCLimiter(c)) }) + initAndLog(ctx, c, "morph notifications", func(c *cfg) { listenMorphNotifications(ctx, c) }) } From c66027103910ab413a51dede50f64f767ef582ae Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Fri, 28 Feb 2025 15:14:15 +0300 Subject: [PATCH 385/591] [#1658] node: Validate RPC limiter configuration Validate that configured limits match the methods registered earlier. Signed-off-by: Aleksey Savchuk --- cmd/frostfs-node/grpc.go | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/cmd/frostfs-node/grpc.go b/cmd/frostfs-node/grpc.go index 9ea3dd8a2..6b6d44750 100644 --- a/cmd/frostfs-node/grpc.go +++ b/cmd/frostfs-node/grpc.go @@ -240,6 +240,10 @@ func initRPCLimiter(c *cfg) error { limits = append(limits, limiting.KeyLimit{Keys: l.Methods, Limit: l.MaxOps}) } + if err := validateRPCLimits(c, limits); err != nil { + return fmt.Errorf("validate RPC limits: %w", err) + } + limiter, err := limiting.NewSemaphoreLimiter(limits) if err != nil { return fmt.Errorf("create RPC limiter: %w", err) @@ -248,3 +252,35 @@ func initRPCLimiter(c *cfg) error { c.cfgGRPC.limiter.Store(limiter) return nil } + +func validateRPCLimits(c *cfg, limits []limiting.KeyLimit) error { + availableMethods := getAvailableMethods(c.cfgGRPC.servers) + for _, limit := range limits { + for _, method := range limit.Keys { + if _, ok := availableMethods[method]; !ok { + return fmt.Errorf("set limit on an unknown method %q", method) + } + } + } + return nil +} + +func getAvailableMethods(servers []grpcServer) map[string]struct{} { + res := make(map[string]struct{}) + for _, server := range servers { + for _, method := range getMethodsForServer(server.Server) { + res[method] = struct{}{} + } + } + return res +} + +func getMethodsForServer(server *grpc.Server) []string { + var res []string + for service, info := range server.GetServiceInfo() { + for _, method := range info.Methods { + res = append(res, fmt.Sprintf("/%s/%s", service, method.Name)) + } + } + return res +} From 0991077cb364c94e40f6838035acab4754b4d8af Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 28 Feb 2025 14:51:35 +0300 Subject: [PATCH 386/591] [#1657] engine: Fix data race in evacuation tests Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/engine/evacuate_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index b9d7888e7..45c4b696b 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -475,7 +475,7 @@ func TestEvacuateObjectsAsync(t *testing.T) { eg, egCtx := errgroup.WithContext(context.Background()) eg.Go(func() error { require.NoError(t, e.Evacuate(egCtx, prm), "first evacuation failed") - st = testWaitForEvacuationCompleted(t, e) + st := testWaitForEvacuationCompleted(t, e) require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count") return nil }) From 98d61250292bf3304043da4fe4007dc7bff9c99f Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 4 Feb 2025 18:20:48 +0300 Subject: [PATCH 387/591] [#1636] config: Add `shard.limits` config Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 12 ++ cmd/frostfs-node/config/engine/config_test.go | 86 ++++++++++++ .../config/engine/shard/config.go | 9 ++ .../config/engine/shard/limits/config.go | 130 ++++++++++++++++++ config/example/node.env | 41 ++++++ config/example/node.json | 70 ++++++++++ config/example/node.yaml | 46 +++++++ docs/storage-node-configuration.md | 59 ++++++++ internal/qos/validate.go | 92 +++++++++++++ 9 files changed, 545 insertions(+) create mode 100644 cmd/frostfs-node/config/engine/shard/limits/config.go create mode 100644 internal/qos/validate.go diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 3e1bfb4f2..004c8f128 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -33,6 +33,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid" @@ -277,6 +278,9 @@ func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig a.setMetabaseConfig(&newConfig, oldConfig) a.setGCConfig(&newConfig, oldConfig) + if err := a.setLimits(&newConfig, oldConfig); err != nil { + return err + } a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig) @@ -370,6 +374,14 @@ func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *s newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() } +func (a *applicationConfiguration) setLimits(newConfig *shardCfg, oldConfig *shardconfig.Config) error { + limitsConfig := oldConfig.Limits() + if err := qos.ValidateConfig(limitsConfig); err != nil { + return err + } + return nil +} + // internals contains application-specific internals that are created // on application startup and are shared b/w the components during // the application life cycle. diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go index ef6380a62..b912b5d7d 100644 --- a/cmd/frostfs-node/config/engine/config_test.go +++ b/cmd/frostfs-node/config/engine/config_test.go @@ -11,6 +11,7 @@ import ( blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" + limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" @@ -76,6 +77,7 @@ func TestEngineSection(t *testing.T) { ss := blob.Storages() pl := sc.Pilorama() gc := sc.GC() + limits := sc.Limits() switch num { case 0: @@ -134,6 +136,75 @@ func TestEngineSection(t *testing.T) { require.Equal(t, false, sc.RefillMetabase()) require.Equal(t, mode.ReadOnly, sc.Mode()) require.Equal(t, 100, sc.RefillMetabaseWorkersCount()) + + readLimits := limits.Read() + writeLimits := limits.Write() + require.Equal(t, 30*time.Second, readLimits.IdleTimeout) + require.Equal(t, int64(10_000), readLimits.MaxRunningOps) + require.Equal(t, int64(1_000), readLimits.MaxWaitingOps) + require.Equal(t, 45*time.Second, writeLimits.IdleTimeout) + require.Equal(t, int64(1_000), writeLimits.MaxRunningOps) + require.Equal(t, int64(100), writeLimits.MaxWaitingOps) + require.ElementsMatch(t, readLimits.Tags, + []limitsconfig.IOTagConfig{ + { + Tag: "internal", + Weight: toPtr(20), + ReservedOps: toPtr(1000), + LimitOps: toPtr(0), + }, + { + Tag: "client", + Weight: toPtr(70), + ReservedOps: toPtr(10000), + }, + { + Tag: "background", + Weight: toPtr(5), + LimitOps: toPtr(10000), + ReservedOps: toPtr(0), + }, + { + Tag: "writecache", + Weight: toPtr(5), + LimitOps: toPtr(25000), + }, + { + Tag: "policer", + Weight: toPtr(5), + LimitOps: toPtr(25000), + }, + }) + require.ElementsMatch(t, writeLimits.Tags, + []limitsconfig.IOTagConfig{ + { + Tag: "internal", + Weight: toPtr(200), + ReservedOps: toPtr(100), + LimitOps: toPtr(0), + }, + { + Tag: "client", + Weight: toPtr(700), + ReservedOps: toPtr(1000), + }, + { + Tag: "background", + Weight: toPtr(50), + LimitOps: toPtr(1000), + ReservedOps: toPtr(0), + }, + { + Tag: "writecache", + Weight: toPtr(50), + LimitOps: toPtr(2500), + }, + { + Tag: "policer", + Weight: toPtr(50), + LimitOps: toPtr(2500), + }, + }) case 1: require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path()) require.Equal(t, fs.FileMode(0o644), pl.Perm()) @@ -188,6 +259,17 @@ func TestEngineSection(t *testing.T) { require.Equal(t, true, sc.RefillMetabase()) require.Equal(t, mode.ReadWrite, sc.Mode()) require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount()) + + readLimits := limits.Read() + writeLimits := limits.Write() + require.Equal(t, limitsconfig.DefaultIdleTimeout, readLimits.IdleTimeout) + require.Equal(t, limitsconfig.NoLimit, readLimits.MaxRunningOps) + require.Equal(t, limitsconfig.NoLimit, readLimits.MaxWaitingOps) + require.Equal(t, limitsconfig.DefaultIdleTimeout, writeLimits.IdleTimeout) + require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxRunningOps) + require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxWaitingOps) + require.Equal(t, 0, len(readLimits.Tags)) + require.Equal(t, 0, len(writeLimits.Tags)) } return nil }) @@ -201,3 +283,7 @@ func TestEngineSection(t *testing.T) { configtest.ForEnvFileType(t, path, fileConfigTest) }) } + +func toPtr(v float64) *float64 { + return &v +} diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go index 0620c9f63..e50d56b95 100644 --- a/cmd/frostfs-node/config/engine/shard/config.go +++ b/cmd/frostfs-node/config/engine/shard/config.go @@ -4,6 +4,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" blobstorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" + limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" @@ -125,6 +126,14 @@ func (x *Config) GC() *gcconfig.Config { ) } +// Limits returns "limits" subsection as a limitsconfig.Config. +func (x *Config) Limits() *limitsconfig.Config { + return limitsconfig.From( + (*config.Config)(x). + Sub("limits"), + ) +} + // RefillMetabase returns the value of "resync_metabase" config parameter. // // Returns false if the value is not a valid bool. diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go new file mode 100644 index 000000000..b9b5c4382 --- /dev/null +++ b/cmd/frostfs-node/config/engine/shard/limits/config.go @@ -0,0 +1,130 @@ +package limits + +import ( + "math" + "strconv" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "github.com/spf13/cast" +) + +const ( + NoLimit int64 = math.MaxInt64 + DefaultIdleTimeout = 5 * time.Minute +) + +// From wraps config section into Config. +func From(c *config.Config) *Config { + return (*Config)(c) +} + +// Config is a wrapper over the config section +// which provides access to Shard's limits configurations. +type Config config.Config + +// Read returns the value of "read" limits config section. +func (x *Config) Read() OpConfig { + return x.parse("read") +} + +// Write returns the value of "write" limits config section. +func (x *Config) Write() OpConfig { + return x.parse("write") +} + +func (x *Config) parse(sub string) OpConfig { + c := (*config.Config)(x).Sub(sub) + var result OpConfig + + if s := config.Int(c, "max_waiting_ops"); s > 0 { + result.MaxWaitingOps = s + } else { + result.MaxWaitingOps = NoLimit + } + + if s := config.Int(c, "max_running_ops"); s > 0 { + result.MaxRunningOps = s + } else { + result.MaxRunningOps = NoLimit + } + + if s := config.DurationSafe(c, "idle_timeout"); s > 0 { + result.IdleTimeout = s + } else { + result.IdleTimeout = DefaultIdleTimeout + } + + result.Tags = tags(c) + + return result +} + +type OpConfig struct { + // MaxWaitingOps returns the value of "max_waiting_ops" config parameter. + // + // Equals NoLimit if the value is not a positive number. + MaxWaitingOps int64 + // MaxRunningOps returns the value of "max_running_ops" config parameter. + // + // Equals NoLimit if the value is not a positive number. + MaxRunningOps int64 + // IdleTimeout returns the value of "idle_timeout" config parameter. + // + // Equals DefaultIdleTimeout if the value is not a valid duration. + IdleTimeout time.Duration + // Tags returns the value of "tags" config parameter. + // + // Equals nil if the value is not a valid tags config slice. + Tags []IOTagConfig +} + +type IOTagConfig struct { + Tag string + Weight *float64 + LimitOps *float64 + ReservedOps *float64 +} + +func tags(c *config.Config) []IOTagConfig { + c = c.Sub("tags") + var result []IOTagConfig + for i := 0; ; i++ { + tag := config.String(c, strconv.Itoa(i)+".tag") + if tag == "" { + return result + } + + var tagConfig IOTagConfig + tagConfig.Tag = tag + + v := c.Value(strconv.Itoa(i) + ".weight") + if v != nil { + w, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.Weight = &w + } + + v = c.Value(strconv.Itoa(i) + ".limit_ops") + if v != nil { + l, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.LimitOps = &l + } + + v = c.Value(strconv.Itoa(i) + ".reserved_ops") + if v != nil { + r, err := cast.ToFloat64E(v) + panicOnErr(err) + tagConfig.ReservedOps = &r + } + + result = append(result, tagConfig) + } +} + +func panicOnErr(err error) { + if err != nil { + panic(err) + } +} diff --git a/config/example/node.env b/config/example/node.env index 2ebef181a..9bd645344 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -157,6 +157,47 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500 #### Limit of concurrent workers collecting expired objects by the garbage collector FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15 +#### Limits config +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_RUNNING_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_MAX_WAITING_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_RUNNING_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_MAX_WAITING_OPS=100 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_IDLE_TIMEOUT=45s +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_IDLE_TIMEOUT=30s +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_TAG=internal +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_WEIGHT=20 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_LIMIT_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_0_RESERVED_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_TAG=client +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_WEIGHT=70 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_1_RESERVED_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_TAG=background +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_LIMIT_OPS=10000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_2_RESERVED_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_TAG=writecache +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_RESERVED_OPS=100 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_TAG=client +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_WEIGHT=700 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_1_RESERVED_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_TAG=background +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_LIMIT_OPS=1000 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_2_RESERVED_OPS=0 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_TAG=writecache +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500 ## 1 shard ### Flag to refill Metabase from BlobStor diff --git a/config/example/node.json b/config/example/node.json index 0ed72effc..6b799b318 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -221,6 +221,76 @@ "remover_sleep_interval": "2m", "expired_collector_batch_size": 1500, "expired_collector_worker_count": 15 + }, + "limits": { + "read": { + "max_running_ops": 10000, + "max_waiting_ops": 1000, + "idle_timeout": "30s", + "tags": [ + { + "tag": "internal", + "weight": 20, + "limit_ops": 0, + "reserved_ops": 1000 + }, + { + "tag": "client", + "weight": 70, + "reserved_ops": 10000 + }, + { + "tag": "background", + "weight": 5, + "limit_ops": 10000, + "reserved_ops": 0 + }, + { + "tag": "writecache", + "weight": 5, + "limit_ops": 25000 + }, + { + "tag": "policer", + "weight": 5, + "limit_ops": 25000 + } + ] + }, + "write": { + "max_running_ops": 1000, + "max_waiting_ops": 100, + "idle_timeout": "45s", + "tags": [ + { + "tag": "internal", + "weight": 200, + "limit_ops": 0, + "reserved_ops": 100 + }, + { + "tag": "client", + "weight": 700, + "reserved_ops": 1000 + }, + { + "tag": "background", + "weight": 50, + "limit_ops": 1000, + "reserved_ops": 0 + }, + { + "tag": "writecache", + "weight": 50, + "limit_ops": 2500 + }, + { + "tag": "policer", + "weight": 50, + "limit_ops": 2500 + } + ] + } } }, "1": { diff --git a/config/example/node.yaml b/config/example/node.yaml index 6b810653e..2552a419c 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -227,6 +227,52 @@ storage: expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector + limits: + read: + max_running_ops: 10000 + max_waiting_ops: 1000 + idle_timeout: 30s + tags: + - tag: internal + weight: 20 + limit_ops: 0 + reserved_ops: 1000 + - tag: client + weight: 70 + reserved_ops: 10000 + - tag: background + weight: 5 + limit_ops: 10000 + reserved_ops: 0 + - tag: writecache + weight: 5 + limit_ops: 25000 + - tag: policer + weight: 5 + limit_ops: 25000 + write: + max_running_ops: 1000 + max_waiting_ops: 100 + idle_timeout: 45s + tags: + - tag: internal + weight: 200 + limit_ops: 0 + reserved_ops: 100 + - tag: client + weight: 700 + reserved_ops: 1000 + - tag: background + weight: 50 + limit_ops: 1000 + reserved_ops: 0 + - tag: writecache + weight: 50 + limit_ops: 2500 + - tag: policer + weight: 50 + limit_ops: 2500 + 1: writecache: path: tmp/1/cache # write-cache root directory diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 1eb5437ba..271cc6532 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -195,6 +195,7 @@ The following table describes configuration for each shard. | `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | | `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | | `gc` | [GC config](#gc-subsection) | | GC configuration. | +| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. | ### `blobstor` subsection @@ -301,6 +302,64 @@ writecache: | `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. | | `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. | +### `limits` subsection + +```yaml +limits: + max_read_running_ops: 10000 + max_read_waiting_ops: 1000 + max_write_running_ops: 1000 + max_write_waiting_ops: 100 + read: + - tag: internal + weight: 20 + limit_ops: 0 + reserved_ops: 1000 + - tag: client + weight: 70 + reserved_ops: 10000 + - tag: background + weight: 5 + limit_ops: 10000 + reserved_ops: 0 + - tag: writecache + weight: 5 + limit_ops: 25000 + - tag: policer + weight: 5 + limit_ops: 25000 + write: + - tag: internal + weight: 200 + limit_ops: 0 + reserved_ops: 100 + - tag: client + weight: 700 + reserved_ops: 1000 + - tag: background + weight: 50 + limit_ops: 1000 + reserved_ops: 0 + - tag: writecache + weight: 50 + limit_ops: 2500 + - tag: policer + weight: 50 + limit_ops: 2500 +``` + +| Parameter | Type | Default value | Description | +| ----------------------- | -------- | -------------- | --------------------------------------------------------------------------------------------------------------- | +| `max_read_running_ops` | `int` | 0 (no limit) | The maximum number of runnig read operations. | +| `max_read_waiting_ops` | `int` | 0 (no limit) | The maximum number of waiting read operations. | +| `max_write_running_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | +| `max_write_waiting_ops` | `int` | 0 (no limit) | The maximum number of running write operations. | +| `read` | `[]tag` | empty | Array of shard read settings for tags. | +| `write` | `[]tag` | empty | Array of shard write settings for tags. | +| `tag.tag` | `string` | empty | Tag name. Allowed values: `client`, `internal`, `background`, `writecache`, `policer`. | +| `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. | +| `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. | +| `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. | # `node` section diff --git a/internal/qos/validate.go b/internal/qos/validate.go new file mode 100644 index 000000000..afced345b --- /dev/null +++ b/internal/qos/validate.go @@ -0,0 +1,92 @@ +package qos + +import ( + "errors" + "fmt" + "math" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" +) + +var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any") + +type tagConfig struct { + Shares, Limit, Reserved *float64 +} + +func ValidateConfig(c *limits.Config) error { + if err := validateOpConfig(c.Read()); err != nil { + return fmt.Errorf("limits 'read' section validation error: %w", err) + } + if err := validateOpConfig(c.Write()); err != nil { + return fmt.Errorf("limits 'write' section validation error: %w", err) + } + return nil +} + +func validateOpConfig(c limits.OpConfig) error { + if c.MaxRunningOps <= 0 { + return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps) + } + if c.MaxWaitingOps <= 0 { + return fmt.Errorf("invalid 'max_waiting_ops = %d': must be greater than zero", c.MaxWaitingOps) + } + if c.IdleTimeout <= 0 { + return fmt.Errorf("invalid 'idle_timeout = %s': must be greater than zero", c.IdleTimeout.String()) + } + if err := validateTags(c.Tags); err != nil { + return fmt.Errorf("'tags' config section validation error: %w", err) + } + return nil +} + +func validateTags(configTags []limits.IOTagConfig) error { + tags := map[IOTag]tagConfig{ + IOTagClient: {}, + IOTagInternal: {}, + IOTagBackground: {}, + IOTagWritecache: {}, + IOTagPolicer: {}, + } + for _, t := range configTags { + tag, err := FromRawString(t.Tag) + if err != nil { + return fmt.Errorf("invalid tag %s: %w", t.Tag, err) + } + if _, ok := tags[tag]; !ok { + return fmt.Errorf("tag %s is not configurable", t.Tag) + } + tags[tag] = tagConfig{ + Shares: t.Weight, + Limit: t.LimitOps, + Reserved: t.ReservedOps, + } + } + idx := 0 + var shares float64 + for t, v := range tags { + if idx == 0 { + idx++ + shares = float64Value(v.Shares) + } else if (shares != 0 && float64Value(v.Shares) == 0) || (shares == 0 && float64Value(v.Shares) != 0) { + return errWeightsMustBeSpecified + } + if float64Value(v.Shares) < 0 || math.IsNaN(float64Value(v.Shares)) { + return fmt.Errorf("invalid weight for tag %s: must be positive value", t.String()) + } + if float64Value(v.Limit) < 0 || math.IsNaN(float64Value(v.Limit)) { + return fmt.Errorf("invalid limit_ops for tag %s: must be positive value", t.String()) + } + if float64Value(v.Reserved) < 0 || math.IsNaN(float64Value(v.Reserved)) { + return fmt.Errorf("invalid reserved_ops for tag %s: must be positive value", t.String()) + } + } + return nil +} + +func float64Value(f *float64) float64 { + if f == nil { + return 0.0 + } + return *f +} From 92a67a6716723f81dabb4f809ff040ef7014903a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 5 Feb 2025 12:31:01 +0300 Subject: [PATCH 388/591] [#1636] qos: Add Limiter Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 12 +++- internal/assert/cond.go | 9 +++ internal/qos/limiter.go | 132 +++++++++++++++++++++++++++++++++++++ internal/qos/validate.go | 11 +++- 4 files changed, 160 insertions(+), 4 deletions(-) create mode 100644 internal/assert/cond.go create mode 100644 internal/qos/limiter.go diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 004c8f128..c625a041f 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -136,6 +136,7 @@ type shardCfg struct { refillMetabase bool refillMetabaseWorkersCount int mode shardmode.Mode + limiter qos.Limiter metaCfg struct { path string @@ -278,7 +279,7 @@ func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig a.setMetabaseConfig(&newConfig, oldConfig) a.setGCConfig(&newConfig, oldConfig) - if err := a.setLimits(&newConfig, oldConfig); err != nil { + if err := a.setLimiter(&newConfig, oldConfig); err != nil { return err } @@ -374,11 +375,16 @@ func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *s newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() } -func (a *applicationConfiguration) setLimits(newConfig *shardCfg, oldConfig *shardconfig.Config) error { +func (a *applicationConfiguration) setLimiter(newConfig *shardCfg, oldConfig *shardconfig.Config) error { limitsConfig := oldConfig.Limits() - if err := qos.ValidateConfig(limitsConfig); err != nil { + limiter, err := qos.NewLimiter(limitsConfig) + if err != nil { return err } + if newConfig.limiter != nil { + newConfig.limiter.Close() + } + newConfig.limiter = limiter return nil } diff --git a/internal/assert/cond.go b/internal/assert/cond.go new file mode 100644 index 000000000..701036fa8 --- /dev/null +++ b/internal/assert/cond.go @@ -0,0 +1,9 @@ +package assert + +import "strings" + +func True(cond bool, details ...string) { + if !cond { + panic(strings.Join(details, " ")) + } +} diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go new file mode 100644 index 000000000..d2a1919f1 --- /dev/null +++ b/internal/qos/limiter.go @@ -0,0 +1,132 @@ +package qos + +import ( + "context" + "fmt" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" + "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" +) + +const ( + defaultIdleTimeout time.Duration = 0 + defaultShare float64 = 1.0 +) + +type ReleaseFunc scheduling.ReleaseFunc + +type Limiter interface { + ReadRequest(context.Context) (ReleaseFunc, error) + WriteRequest(context.Context) (ReleaseFunc, error) + Close() +} + +func NewLimiter(c *limits.Config) (Limiter, error) { + if err := validateConfig(c); err != nil { + return nil, err + } + read, write := c.Read(), c.Write() + if isNoop(read, write) { + return noopLimiterInstance, nil + } + readScheduler, err := scheduling.NewMClock( + uint64(read.MaxRunningOps), uint64(read.MaxWaitingOps), + converToSchedulingTags(read.Tags), read.IdleTimeout) + if err != nil { + return nil, fmt.Errorf("create read scheduler: %w", err) + } + writeScheduler, err := scheduling.NewMClock( + uint64(write.MaxRunningOps), uint64(write.MaxWaitingOps), + converToSchedulingTags(write.Tags), write.IdleTimeout) + if err != nil { + return nil, fmt.Errorf("create write scheduler: %w", err) + } + return &mClockLimiter{ + readScheduler: readScheduler, + writeScheduler: writeScheduler, + }, nil +} + +func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo { + result := make(map[string]scheduling.TagInfo) + for _, tag := range []IOTag{IOTagClient, IOTagBackground, IOTagInternal, IOTagPolicer, IOTagWritecache} { + result[tag.String()] = scheduling.TagInfo{ + Share: defaultShare, + } + } + for _, l := range limits { + v := result[l.Tag] + if l.Weight != nil && *l.Weight != 0 { + v.Share = *l.Weight + } + if l.LimitOps != nil && *l.LimitOps != 0 { + v.LimitIOPS = l.LimitOps + } + if l.ReservedOps != nil && *l.ReservedOps != 0 { + v.ReservedIOPS = l.ReservedOps + } + result[l.Tag] = v + } + return result +} + +var ( + _ Limiter = (*noopLimiter)(nil) + releaseStub ReleaseFunc = func() {} + noopLimiterInstance = &noopLimiter{} +) + +type noopLimiter struct{} + +func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) { + return releaseStub, nil +} + +func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) { + return releaseStub, nil +} + +func (n *noopLimiter) Close() {} + +var _ Limiter = (*mClockLimiter)(nil) + +type mClockLimiter struct { + readScheduler *scheduling.MClock + writeScheduler *scheduling.MClock +} + +func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) { + tag, ok := tagging.IOTagFromContext(ctx) + assert.True(ok, "request has no tag") + if tag == IOTagCritical.String() { + return releaseStub, nil + } + rel, err := n.readScheduler.RequestArrival(ctx, tag) + if err != nil { + return nil, err + } + return ReleaseFunc(rel), nil +} + +func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) { + tag, ok := tagging.IOTagFromContext(ctx) + if !ok { + tag = IOTagClient.String() + } + if tag == IOTagCritical.String() { + return releaseStub, nil + } + rel, err := n.writeScheduler.RequestArrival(ctx, tag) + if err != nil { + return nil, err + } + return ReleaseFunc(rel), nil +} + +func (n *mClockLimiter) Close() { + n.readScheduler.Close() + n.writeScheduler.Close() +} diff --git a/internal/qos/validate.go b/internal/qos/validate.go index afced345b..43aa74942 100644 --- a/internal/qos/validate.go +++ b/internal/qos/validate.go @@ -14,7 +14,7 @@ type tagConfig struct { Shares, Limit, Reserved *float64 } -func ValidateConfig(c *limits.Config) error { +func validateConfig(c *limits.Config) error { if err := validateOpConfig(c.Read()); err != nil { return fmt.Errorf("limits 'read' section validation error: %w", err) } @@ -90,3 +90,12 @@ func float64Value(f *float64) float64 { } return *f } + +func isNoop(read, write limits.OpConfig) bool { + return read.MaxRunningOps == limits.NoLimit && + read.MaxWaitingOps == limits.NoLimit && + write.MaxRunningOps == limits.NoLimit && + write.MaxWaitingOps == limits.NoLimit && + len(read.Tags) == 0 && + len(write.Tags) == 0 +} From e0dc3c3d0c4b948152cd054bda5545c64f413153 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 5 Feb 2025 15:57:27 +0300 Subject: [PATCH 389/591] [#1636] shard: Add limiter usage Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 1 + internal/qos/limiter.go | 4 + pkg/local_object_storage/engine/container.go | 2 +- pkg/local_object_storage/engine/inhume.go | 2 +- pkg/local_object_storage/shard/container.go | 26 ++- pkg/local_object_storage/shard/control.go | 4 + pkg/local_object_storage/shard/count.go | 6 + pkg/local_object_storage/shard/delete.go | 6 + pkg/local_object_storage/shard/exists.go | 17 +- pkg/local_object_storage/shard/gc.go | 177 +++++++++++++------ pkg/local_object_storage/shard/get.go | 6 + pkg/local_object_storage/shard/head.go | 6 + pkg/local_object_storage/shard/inhume.go | 6 + pkg/local_object_storage/shard/list.go | 40 ++++- pkg/local_object_storage/shard/lock.go | 21 ++- pkg/local_object_storage/shard/put.go | 6 + pkg/local_object_storage/shard/range.go | 6 + pkg/local_object_storage/shard/rebuild.go | 1 + pkg/local_object_storage/shard/select.go | 6 + pkg/local_object_storage/shard/shard.go | 10 ++ pkg/local_object_storage/shard/tree.go | 85 +++++++++ pkg/local_object_storage/shard/writecache.go | 19 ++ 22 files changed, 390 insertions(+), 67 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index c625a041f..b42275538 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1072,6 +1072,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID return pool }), + shard.WithLimiter(shCfg.limiter), } return sh } diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go index d2a1919f1..996cebea1 100644 --- a/internal/qos/limiter.go +++ b/internal/qos/limiter.go @@ -79,6 +79,10 @@ var ( noopLimiterInstance = &noopLimiter{} ) +func NewNoopLimiter() Limiter { + return &noopLimiter{} +} + type noopLimiter struct{} func (n *noopLimiter) ReadRequest(context.Context) (ReleaseFunc, error) { diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index b2d7a1037..3160d7f83 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -74,7 +74,7 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) var csPrm shard.ContainerSizePrm csPrm.SetContainerID(prm.cnr) - csRes, err := sh.Shard.ContainerSize(csPrm) + csRes, err := sh.Shard.ContainerSize(ctx, csPrm) if err != nil { e.reportShardError(ctx, sh, "can't get container size", err, zap.Stringer("container_id", prm.cnr)) diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index fb802ef2a..c8ee33b53 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -339,7 +339,7 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid var drop []cid.ID for id := range idMap { prm.SetContainerID(id) - s, err := sh.ContainerSize(prm) + s, err := sh.ContainerSize(ctx, prm) if err != nil { e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err)) failed = true diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go index 0309f0c81..b4015ae8d 100644 --- a/pkg/local_object_storage/shard/container.go +++ b/pkg/local_object_storage/shard/container.go @@ -26,7 +26,7 @@ func (r ContainerSizeRes) Size() uint64 { return r.size } -func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { +func (s *Shard) ContainerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { s.m.RLock() defer s.m.RUnlock() @@ -34,6 +34,12 @@ func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { return ContainerSizeRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ContainerSizeRes{}, err + } + defer release() + size, err := s.metaBase.ContainerSize(prm.cnr) if err != nil { return ContainerSizeRes{}, fmt.Errorf("get container size: %w", err) @@ -69,6 +75,12 @@ func (s *Shard) ContainerCount(ctx context.Context, prm ContainerCountPrm) (Cont return ContainerCountRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ContainerCountRes{}, err + } + defer release() + counters, err := s.metaBase.ContainerCount(ctx, prm.ContainerID) if err != nil { return ContainerCountRes{}, fmt.Errorf("get container counters: %w", err) @@ -100,6 +112,12 @@ func (s *Shard) DeleteContainerSize(ctx context.Context, id cid.ID) error { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.metaBase.DeleteContainerSize(ctx, id) } @@ -122,5 +140,11 @@ func (s *Shard) DeleteContainerCount(ctx context.Context, id cid.ID) error { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.metaBase.DeleteContainerCount(ctx, id) } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index fedde2206..3520277c0 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -395,6 +395,10 @@ func (s *Shard) Close(ctx context.Context) error { s.gc.stop(ctx) } + if s.opsLimiter != nil { + s.opsLimiter.Close() + } + return lastErr } diff --git a/pkg/local_object_storage/shard/count.go b/pkg/local_object_storage/shard/count.go index b3bc6a30b..8dc1f0522 100644 --- a/pkg/local_object_storage/shard/count.go +++ b/pkg/local_object_storage/shard/count.go @@ -23,6 +23,12 @@ func (s *Shard) LogicalObjectsCount(ctx context.Context) (uint64, error) { return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() + cc, err := s.metaBase.ObjectCounters() if err != nil { return 0, err diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go index 55231b032..0101817a8 100644 --- a/pkg/local_object_storage/shard/delete.go +++ b/pkg/local_object_storage/shard/delete.go @@ -54,6 +54,12 @@ func (s *Shard) delete(ctx context.Context, prm DeletePrm, skipFailed bool) (Del return DeleteRes{}, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return DeleteRes{}, err + } + defer release() + result := DeleteRes{} for _, addr := range prm.addr { select { diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go index 82ce48dde..2c11b6b01 100644 --- a/pkg/local_object_storage/shard/exists.go +++ b/pkg/local_object_storage/shard/exists.go @@ -53,10 +53,6 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { )) defer span.End() - var exists bool - var locked bool - var err error - s.m.RLock() defer s.m.RUnlock() @@ -64,7 +60,18 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) { return ExistsRes{}, ErrShardDisabled } else if s.info.EvacuationInProgress { return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound)) - } else if s.info.Mode.NoMetabase() { + } + + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ExistsRes{}, err + } + defer release() + + var exists bool + var locked bool + + if s.info.Mode.NoMetabase() { var p common.ExistsPrm p.Address = prm.Address diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 4a5ec7a71..32a377cd5 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -291,28 +291,7 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { s.log.Debug(ctx, logs.ShardGCRemoveGarbageStarted) defer s.log.Debug(ctx, logs.ShardGCRemoveGarbageCompleted) - buf := make([]oid.Address, 0, s.rmBatchSize) - - var iterPrm meta.GarbageIterationPrm - iterPrm.SetHandler(func(g meta.GarbageObject) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - buf = append(buf, g.Address()) - - if len(buf) == s.rmBatchSize { - return meta.ErrInterruptIterator - } - - return nil - }) - - // iterate over metabase's objects with GC mark - // (no more than s.rmBatchSize objects) - err := s.metaBase.IterateOverGarbage(ctx, iterPrm) + buf, err := s.getGarbage(ctx) if err != nil { s.log.Warn(ctx, logs.ShardIteratorOverMetabaseGraveyardFailed, zap.Error(err), @@ -344,6 +323,39 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) { return } +func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + + buf := make([]oid.Address, 0, s.rmBatchSize) + + var iterPrm meta.GarbageIterationPrm + iterPrm.SetHandler(func(g meta.GarbageObject) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + buf = append(buf, g.Address()) + + if len(buf) == s.rmBatchSize { + return meta.ErrInterruptIterator + } + + return nil + }) + + if err := s.metaBase.IterateOverGarbage(ctx, iterPrm); err != nil { + return nil, err + } + + return buf, nil +} + func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) { workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount) batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize) @@ -422,18 +434,9 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) return } - var inhumePrm meta.InhumePrm - - inhumePrm.SetAddresses(expired...) - inhumePrm.SetGCMark() - - // inhume the collected objects - res, err := s.metaBase.Inhume(ctx, inhumePrm) + res, err := s.inhumeGC(ctx, expired) if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, - zap.Error(err), - ) - + s.log.Warn(ctx, logs.ShardCouldNotInhumeTheObjects, zap.Error(err)) return } @@ -451,6 +454,12 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) } func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) ([]oid.Address, error) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + result := make([]oid.Address, 0, len(source)) parentToChildren, err := s.metaBase.GetChildren(ctx, source) if err != nil { @@ -464,6 +473,19 @@ func (s *Shard) getExpiredWithLinked(ctx context.Context, source []oid.Address) return result, nil } +func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeRes, error) { + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return meta.InhumeRes{}, err + } + defer release() + + var inhumePrm meta.InhumePrm + inhumePrm.SetAddresses(addrs...) + inhumePrm.SetGCMark() + return s.metaBase.Inhume(ctx, inhumePrm) +} + func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { var err error startedAt := time.Now() @@ -505,11 +527,17 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { return } - err = s.metaBase.IterateOverGraveyard(ctx, iterPrm) + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) + s.m.RUnlock() + return + } + err = s.metaBase.IterateOverGraveyard(ctx, iterPrm) + release() if err != nil { log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) s.m.RUnlock() - return } @@ -598,7 +626,13 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo return ErrDegradedMode } - err := s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + + err = s.metaBase.IterateExpired(ctx, epoch, func(expiredObject *meta.ExpiredObject) error { select { case <-ctx.Done(): return meta.ErrInterruptIterator @@ -621,6 +655,12 @@ func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + return s.metaBase.FilterExpired(ctx, epoch, addresses) } @@ -636,12 +676,15 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston return } - res, err := s.metaBase.InhumeTombstones(ctx, tss) + release, err := s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, - zap.Error(err), - ) - + s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) + return + } + res, err := s.metaBase.InhumeTombstones(ctx, tss) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardCouldNotMarkTombstonesAsGarbage, zap.Error(err)) return } @@ -664,11 +707,16 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] if s.GetMode().NoMetabase() { return } - unlocked, err := s.metaBase.FreeLockedBy(lockers) + + release, err := s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, - zap.Error(err), - ) + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) + return + } + unlocked, err := s.metaBase.FreeLockedBy(lockers) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) return } @@ -676,13 +724,15 @@ func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers [] var pInhume meta.InhumePrm pInhume.SetAddresses(lockers...) pInhume.SetForceGCMark() - - res, err := s.metaBase.Inhume(ctx, pInhume) + release, err = s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, - zap.Error(err), - ) - + s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) + return + } + res, err := s.metaBase.Inhume(ctx, pInhume) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToMarkLockersAsGarbage, zap.Error(err)) return } @@ -721,12 +771,15 @@ func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { return } - _, err := s.metaBase.FreeLockedBy(lockers) + release, err := s.opsLimiter.WriteRequest(ctx) if err != nil { - s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, - zap.Error(err), - ) - + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) + return + } + _, err = s.metaBase.FreeLockedBy(lockers) + release() + if err != nil { + s.log.Warn(ctx, logs.ShardFailureToUnlockObjects, zap.Error(err)) return } } @@ -750,7 +803,13 @@ func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) { } func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch uint64) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + return + } ids, err := s.metaBase.ZeroSizeContainers(ctx) + release() if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroSizeContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return @@ -762,7 +821,13 @@ func (s *Shard) collectExpiredContainerSizeMetrics(ctx context.Context, epoch ui } func (s *Shard) collectExpiredContainerCountMetrics(ctx context.Context, epoch uint64) { + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) + return + } ids, err := s.metaBase.ZeroCountContainers(ctx) + release() if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToCollectZeroCountContainers, zap.Uint64("epoch", epoch), zap.Error(err)) return diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go index 05823c62b..28f8912be 100644 --- a/pkg/local_object_storage/shard/get.go +++ b/pkg/local_object_storage/shard/get.go @@ -111,6 +111,12 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) { return c.Get(ctx, prm.addr) } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return GetRes{}, err + } + defer release() + skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go index ff57e3bf9..34b8290d6 100644 --- a/pkg/local_object_storage/shard/head.go +++ b/pkg/local_object_storage/shard/head.go @@ -81,6 +81,12 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) { headParams.SetAddress(prm.addr) headParams.SetRaw(prm.raw) + release, limitErr := s.opsLimiter.ReadRequest(ctx) + if limitErr != nil { + return HeadRes{}, limitErr + } + defer release() + var res meta.GetRes res, err = s.metaBase.Get(ctx, headParams) obj = res.Header() diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go index 9d5f66063..c0fd65f4b 100644 --- a/pkg/local_object_storage/shard/inhume.go +++ b/pkg/local_object_storage/shard/inhume.go @@ -81,6 +81,12 @@ func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) { return InhumeRes{}, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return InhumeRes{}, err + } + defer release() + if s.hasWriteCache() { for i := range prm.target { _ = s.writeCache.Delete(ctx, prm.target[i]) diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index 7bc5ead1d..af87981ca 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -106,6 +106,12 @@ func (s *Shard) List(ctx context.Context) (res SelectRes, err error) { return SelectRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return SelectRes{}, err + } + defer release() + lst, err := s.metaBase.Containers(ctx) if err != nil { return res, fmt.Errorf("list stored containers: %w", err) @@ -145,6 +151,12 @@ func (s *Shard) ListContainers(ctx context.Context, _ ListContainersPrm) (ListCo return ListContainersRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ListContainersRes{}, err + } + defer release() + containers, err := s.metaBase.Containers(ctx) if err != nil { return ListContainersRes{}, fmt.Errorf("get list of containers: %w", err) @@ -173,6 +185,12 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List return ListWithCursorRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return ListWithCursorRes{}, err + } + defer release() + var metaPrm meta.ListPrm metaPrm.SetCount(prm.count) metaPrm.SetCursor(prm.cursor) @@ -202,9 +220,15 @@ func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContai return ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + var metaPrm meta.IterateOverContainersPrm metaPrm.Handler = prm.Handler - err := s.metaBase.IterateOverContainers(ctx, metaPrm) + err = s.metaBase.IterateOverContainers(ctx, metaPrm) if err != nil { return fmt.Errorf("iterate over containers: %w", err) } @@ -227,11 +251,17 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv return ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return err + } + defer release() + var metaPrm meta.IterateOverObjectsInContainerPrm metaPrm.ContainerID = prm.ContainerID metaPrm.ObjectType = prm.ObjectType metaPrm.Handler = prm.Handler - err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) + err = s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm) if err != nil { return fmt.Errorf("iterate over objects: %w", err) } @@ -251,6 +281,12 @@ func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAlive return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() + var metaPrm meta.CountAliveObjectsInContainerPrm metaPrm.ObjectType = prm.ObjectType metaPrm.ContainerID = prm.ContainerID diff --git a/pkg/local_object_storage/shard/lock.go b/pkg/local_object_storage/shard/lock.go index 31ca16aa1..9c392fdac 100644 --- a/pkg/local_object_storage/shard/lock.go +++ b/pkg/local_object_storage/shard/lock.go @@ -38,7 +38,13 @@ func (s *Shard) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked [] return ErrDegradedMode } - err := s.metaBase.Lock(ctx, idCnr, locker, locked) + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + + err = s.metaBase.Lock(ctx, idCnr, locker, locked) if err != nil { return fmt.Errorf("metabase lock: %w", err) } @@ -61,6 +67,12 @@ func (s *Shard) IsLocked(ctx context.Context, addr oid.Address) (bool, error) { return false, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } + defer release() + var prm meta.IsLockedPrm prm.SetAddress(addr) @@ -86,5 +98,12 @@ func (s *Shard) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error if m.NoMetabase() { return nil, ErrDegradedMode } + + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() + return s.metaBase.GetLocks(ctx, addr) } diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index 3f23111af..f8cb00a31 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -67,6 +67,12 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) { var res common.PutRes + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return PutRes{}, err + } + defer release() + // exist check are not performed there, these checks should be executed // ahead of `Put` by storage engine tryCache := s.hasWriteCache() && !m.NoMetabase() diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go index 701268820..443689104 100644 --- a/pkg/local_object_storage/shard/range.go +++ b/pkg/local_object_storage/shard/range.go @@ -131,6 +131,12 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) { return obj, nil } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return RngRes{}, err + } + defer release() + skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() obj, hasMeta, err := s.fetchObjectData(ctx, prm.addr, skipMeta, cb, wc) diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go index 0593f5894..9fe1bbe8c 100644 --- a/pkg/local_object_storage/shard/rebuild.go +++ b/pkg/local_object_storage/shard/rebuild.go @@ -106,6 +106,7 @@ func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *lo } log.Info(ctx, logs.BlobstoreRebuildStarted) ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) + // TODO use shard limiter if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil { log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err)) } else { diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go index c7c7e11c2..fbc751e26 100644 --- a/pkg/local_object_storage/shard/select.go +++ b/pkg/local_object_storage/shard/select.go @@ -60,6 +60,12 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) { return SelectRes{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return SelectRes{}, nil + } + defer release() + var selectPrm meta.SelectPrm selectPrm.SetFilters(prm.filters) selectPrm.SetContainerID(prm.cnr) diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index 1eb7f14d0..b9ec05f01 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -7,6 +7,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -98,6 +99,8 @@ type cfg struct { reportErrorFunc func(ctx context.Context, selfID string, message string, err error) containerInfo container.InfoProvider + + opsLimiter qos.Limiter } func defaultCfg() *cfg { @@ -109,6 +112,7 @@ func defaultCfg() *cfg { zeroSizeContainersCallback: func(context.Context, []cid.ID) {}, zeroCountContainersCallback: func(context.Context, []cid.ID) {}, metricsWriter: noopMetrics{}, + opsLimiter: qos.NewNoopLimiter(), } } @@ -368,6 +372,12 @@ func WithContainerInfoProvider(containerInfo container.InfoProvider) Option { } } +func WithLimiter(l qos.Limiter) Option { + return func(c *cfg) { + c.opsLimiter = l + } +} + func (s *Shard) fillInfo() { s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo() s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo() diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go index 01a014cec..e9cd5f8c1 100644 --- a/pkg/local_object_storage/shard/tree.go +++ b/pkg/local_object_storage/shard/tree.go @@ -43,6 +43,11 @@ func (s *Shard) TreeMove(ctx context.Context, d pilorama.CIDDescriptor, treeID s if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeMove(ctx, d, treeID, m) } @@ -75,6 +80,11 @@ func (s *Shard) TreeAddByPath(ctx context.Context, d pilorama.CIDDescriptor, tre if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeAddByPath(ctx, d, treeID, attr, path, meta) } @@ -103,6 +113,11 @@ func (s *Shard) TreeApply(ctx context.Context, cnr cidSDK.ID, treeID string, m * if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApply(ctx, cnr, treeID, m, backgroundSync) } @@ -130,6 +145,11 @@ func (s *Shard) TreeApplyBatch(ctx context.Context, cnr cidSDK.ID, treeID string if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApplyBatch(ctx, cnr, treeID, m) } @@ -157,6 +177,11 @@ func (s *Shard) TreeGetByPath(ctx context.Context, cid cidSDK.ID, treeID string, if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeGetByPath(ctx, cid, treeID, attr, path, latest) } @@ -182,6 +207,11 @@ func (s *Shard) TreeGetMeta(ctx context.Context, cid cidSDK.ID, treeID string, n if s.info.Mode.NoMetabase() { return pilorama.Meta{}, 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return pilorama.Meta{}, 0, err + } + defer release() return s.pilorama.TreeGetMeta(ctx, cid, treeID, nodeID) } @@ -207,6 +237,11 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeGetChildren(ctx, cid, treeID, nodeID) } @@ -231,6 +266,11 @@ func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID if s.info.Mode.NoMetabase() { return nil, last, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, last, err + } + defer release() return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) } @@ -256,6 +296,11 @@ func (s *Shard) TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, if s.info.Mode.NoMetabase() { return pilorama.Move{}, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return pilorama.Move{}, err + } + defer release() return s.pilorama.TreeGetOpLog(ctx, cid, treeID, height) } @@ -280,6 +325,11 @@ func (s *Shard) TreeDrop(ctx context.Context, cid cidSDK.ID, treeID string) erro if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeDrop(ctx, cid, treeID) } @@ -303,6 +353,11 @@ func (s *Shard) TreeList(ctx context.Context, cid cidSDK.ID) ([]string, error) { if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeList(ctx, cid) } @@ -326,6 +381,11 @@ func (s *Shard) TreeHeight(ctx context.Context, cid cidSDK.ID, treeID string) (u if s.pilorama == nil { return 0, ErrPiloramaDisabled } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() return s.pilorama.TreeHeight(ctx, cid, treeID) } @@ -350,6 +410,11 @@ func (s *Shard) TreeExists(ctx context.Context, cid cidSDK.ID, treeID string) (b if s.info.Mode.NoMetabase() { return false, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } + defer release() return s.pilorama.TreeExists(ctx, cid, treeID) } @@ -378,6 +443,11 @@ func (s *Shard) TreeUpdateLastSyncHeight(ctx context.Context, cid cidSDK.ID, tre if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeUpdateLastSyncHeight(ctx, cid, treeID, height) } @@ -402,6 +472,11 @@ func (s *Shard) TreeLastSyncHeight(ctx context.Context, cid cidSDK.ID, treeID st if s.info.Mode.NoMetabase() { return 0, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return 0, err + } + defer release() return s.pilorama.TreeLastSyncHeight(ctx, cid, treeID) } @@ -423,6 +498,11 @@ func (s *Shard) TreeListTrees(ctx context.Context, prm pilorama.TreeListTreesPrm if s.info.Mode.NoMetabase() { return nil, ErrDegradedMode } + release, err := s.opsLimiter.ReadRequest(ctx) + if err != nil { + return nil, err + } + defer release() return s.pilorama.TreeListTrees(ctx, prm) } @@ -452,5 +532,10 @@ func (s *Shard) TreeApplyStream(ctx context.Context, cnr cidSDK.ID, treeID strin if s.info.Mode.NoMetabase() { return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() return s.pilorama.TreeApplyStream(ctx, cnr, treeID, source) } diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go index f655e477a..9edb89df8 100644 --- a/pkg/local_object_storage/shard/writecache.go +++ b/pkg/local_object_storage/shard/writecache.go @@ -67,6 +67,12 @@ func (s *Shard) FlushWriteCache(ctx context.Context, p FlushWriteCachePrm) error return ErrDegradedMode } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.writeCache.Flush(ctx, p.ignoreErrors, p.seal) } @@ -124,6 +130,13 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { close(started) defer cleanup() + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) + return + } + defer release() + s.log.Info(ctx, logs.StartedWritecacheSealAsync) if err := s.writeCache.Seal(ctx, prm); err != nil { s.log.Warn(ctx, logs.FailedToSealWritecacheAsync, zap.Error(err)) @@ -138,5 +151,11 @@ func (s *Shard) SealWriteCache(ctx context.Context, p SealWriteCachePrm) error { return nil } } + release, err := s.opsLimiter.WriteRequest(ctx) + if err != nil { + return err + } + defer release() + return s.writeCache.Seal(ctx, prm) } From ceff5e1f6a08c12d84a04fcfa2e23f097bfa97fa Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 6 Feb 2025 17:24:23 +0300 Subject: [PATCH 390/591] [#1636] storage: Refactor shard rebuild Drop redundant interfaces. Rename fields. Signed-off-by: Dmitrii Stepanov --- .../blobstor/blobovniczatree/rebuild.go | 13 ++--- .../blobovniczatree/rebuild_failover_test.go | 6 +-- .../blobstor/blobovniczatree/rebuild_test.go | 33 ++++++------- .../blobstor/common/rebuild.go | 17 ++++--- pkg/local_object_storage/blobstor/rebuild.go | 13 ++--- pkg/local_object_storage/engine/rebuild.go | 22 ++++++++- pkg/local_object_storage/shard/rebuild.go | 47 ++++--------------- 7 files changed, 72 insertions(+), 79 deletions(-) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index 16ef2b180..cbd45c3b4 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -79,7 +79,7 @@ func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common. var completedDBCount uint32 for _, db := range dbs { b.log.Debug(ctx, logs.BlobovniczaTreeRebuildingBlobovnicza, zap.String("path", db)) - movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.WorkerLimiter) + movedObjects, err := b.rebuildDB(ctx, db, prm.MetaStorage, prm.Limiter) res.ObjectsMoved += movedObjects if err != nil { b.log.Warn(ctx, logs.BlobovniczaTreeRebuildingBlobovniczaFailed, zap.String("path", db), zap.Uint64("moved_objects_count", movedObjects), zap.Error(err)) @@ -195,7 +195,7 @@ func (b *Blobovniczas) rebuildBySize(ctx context.Context, path string, targetFil return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil } -func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) { +func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, concLimiter common.RebuildLimiter) (uint64, error) { shDB := b.getBlobovnicza(ctx, path) blz, err := shDB.Open(ctx) if err != nil { @@ -212,7 +212,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M if err != nil { return 0, err } - migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, limiter) + migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, concLimiter) if err != nil { return migratedObjects, err } @@ -238,7 +238,7 @@ func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (fun }, nil } -func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) { +func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, concLimiter common.RebuildLimiter) (uint64, error) { var result atomic.Uint64 batch := make(map[oid.Address][]byte) @@ -265,12 +265,13 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn eg, egCtx := errgroup.WithContext(ctx) for addr, data := range batch { - if err := limiter.AcquireWorkSlot(egCtx); err != nil { + release, err := concLimiter.AcquireWorkSlot(egCtx) + if err != nil { _ = eg.Wait() return result.Load(), err } eg.Go(func() error { - defer limiter.ReleaseWorkSlot() + defer release() err := b.moveObject(egCtx, blz, blzPath, addr, data, meta) if err == nil { result.Add(1) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go index 2f58624aa..91578d5e8 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go @@ -162,9 +162,9 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object guard: &sync.Mutex{}, } rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 1, + MetaStorage: metaStub, + Limiter: &rebuildLimiterStub{}, + FillPercent: 1, }) require.NoError(t, err) require.Equal(t, uint64(1), rRes.ObjectsMoved) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index aae72b5ff..e26c485ba 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -77,9 +77,9 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { guard: &sync.Mutex{}, } rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 60, + MetaStorage: metaStub, + Limiter: &rebuildLimiterStub{}, + FillPercent: 60, }) require.NoError(t, err) dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 @@ -129,9 +129,9 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { guard: &sync.Mutex{}, } rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 90, // 64KB / 100KB = 64% + MetaStorage: metaStub, + Limiter: &rebuildLimiterStub{}, + FillPercent: 90, // 64KB / 100KB = 64% }) require.NoError(t, err) dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0 @@ -194,9 +194,9 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { guard: &sync.Mutex{}, } rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 80, + MetaStorage: metaStub, + Limiter: &rebuildLimiterStub{}, + FillPercent: 80, }) require.NoError(t, err) require.Equal(t, uint64(49), rRes.FilesRemoved) @@ -267,9 +267,9 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, b.Init()) rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ - MetaStorage: metaStub, - WorkerLimiter: &rebuildLimiterStub{}, - FillPercent: 80, + MetaStorage: metaStub, + Limiter: &rebuildLimiterStub{}, + FillPercent: 80, }) require.NoError(t, err) require.Equal(t, uint64(49), rRes.FilesRemoved) @@ -340,7 +340,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { } var rPrm common.RebuildPrm rPrm.MetaStorage = metaStub - rPrm.WorkerLimiter = &rebuildLimiterStub{} + rPrm.Limiter = &rebuildLimiterStub{} rPrm.FillPercent = 1 rRes, err := b.Rebuild(context.Background(), rPrm) require.NoError(t, err) @@ -429,7 +429,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta } var rPrm common.RebuildPrm rPrm.MetaStorage = metaStub - rPrm.WorkerLimiter = &rebuildLimiterStub{} + rPrm.Limiter = &rebuildLimiterStub{} rPrm.FillPercent = 1 rRes, err := b.Rebuild(context.Background(), rPrm) require.NoError(t, err) @@ -464,5 +464,6 @@ func (s *storageIDUpdateStub) UpdateStorageID(ctx context.Context, addr oid.Addr type rebuildLimiterStub struct{} -func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) error { return nil } -func (s *rebuildLimiterStub) ReleaseWorkSlot() {} +func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) (common.ReleaseFunc, error) { + return func() {}, nil +} diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go index 19e181ee7..4615190f7 100644 --- a/pkg/local_object_storage/blobstor/common/rebuild.go +++ b/pkg/local_object_storage/blobstor/common/rebuild.go @@ -12,16 +12,21 @@ type RebuildRes struct { } type RebuildPrm struct { - MetaStorage MetaStorage - WorkerLimiter ConcurrentWorkersLimiter - FillPercent int + MetaStorage MetaStorage + Limiter RebuildLimiter + FillPercent int } type MetaStorage interface { UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error } -type ConcurrentWorkersLimiter interface { - AcquireWorkSlot(ctx context.Context) error - ReleaseWorkSlot() +type ReleaseFunc func() + +type ConcurrencyLimiter interface { + AcquireWorkSlot(ctx context.Context) (ReleaseFunc, error) +} + +type RebuildLimiter interface { + ConcurrencyLimiter } diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go index 2a6b94789..f28816555 100644 --- a/pkg/local_object_storage/blobstor/rebuild.go +++ b/pkg/local_object_storage/blobstor/rebuild.go @@ -13,19 +13,14 @@ type StorageIDUpdate interface { UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error } -type ConcurrentWorkersLimiter interface { - AcquireWorkSlot(ctx context.Context) error - ReleaseWorkSlot() -} - -func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter, fillPercent int) error { +func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, concLimiter common.RebuildLimiter, fillPercent int) error { var summary common.RebuildRes var rErr error for _, storage := range b.storage { res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{ - MetaStorage: upd, - WorkerLimiter: limiter, - FillPercent: fillPercent, + MetaStorage: upd, + Limiter: concLimiter, + FillPercent: fillPercent, }) summary.FilesRemoved += res.FilesRemoved summary.ObjectsMoved += res.ObjectsMoved diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go index 83c6a54ed..a29dd7ed9 100644 --- a/pkg/local_object_storage/engine/rebuild.go +++ b/pkg/local_object_storage/engine/rebuild.go @@ -4,6 +4,7 @@ import ( "context" "sync" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "go.opentelemetry.io/otel/attribute" @@ -41,7 +42,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes } resGuard := &sync.Mutex{} - limiter := shard.NewRebuildLimiter(prm.ConcurrencyLimit) + concLimiter := &concurrencyLimiter{semaphore: make(chan struct{}, prm.ConcurrencyLimit)} eg, egCtx := errgroup.WithContext(ctx) for _, shardID := range prm.ShardIDs { @@ -61,7 +62,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes } err := sh.ScheduleRebuild(egCtx, shard.RebuildPrm{ - ConcurrencyLimiter: limiter, + ConcurrencyLimiter: concLimiter, TargetFillPercent: prm.TargetFillPercent, }) @@ -88,3 +89,20 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes } return res, nil } + +type concurrencyLimiter struct { + semaphore chan struct{} +} + +func (l *concurrencyLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) { + select { + case l.semaphore <- struct{}{}: + return l.releaseWorkSlot, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (l *concurrencyLimiter) releaseWorkSlot() { + <-l.semaphore +} diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go index 9fe1bbe8c..3aa94d5a3 100644 --- a/pkg/local_object_storage/shard/rebuild.go +++ b/pkg/local_object_storage/shard/rebuild.go @@ -8,6 +8,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -20,37 +21,9 @@ import ( var ErrRebuildInProgress = errors.New("shard rebuild in progress") -type RebuildWorkerLimiter interface { - AcquireWorkSlot(ctx context.Context) error - ReleaseWorkSlot() -} - -type rebuildLimiter struct { - semaphore chan struct{} -} - -func NewRebuildLimiter(workersCount uint32) RebuildWorkerLimiter { - return &rebuildLimiter{ - semaphore: make(chan struct{}, workersCount), - } -} - -func (l *rebuildLimiter) AcquireWorkSlot(ctx context.Context) error { - select { - case l.semaphore <- struct{}{}: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func (l *rebuildLimiter) ReleaseWorkSlot() { - <-l.semaphore -} - type rebuildTask struct { - limiter RebuildWorkerLimiter - fillPercent int + concurrencyLimiter common.RebuildLimiter + fillPercent int } type rebuilder struct { @@ -90,14 +63,14 @@ func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.D if !ok { continue } - runRebuild(ctx, bs, mb, log, t.fillPercent, t.limiter) + runRebuild(ctx, bs, mb, log, t.fillPercent, t.concurrencyLimiter) } } }() } func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger, - fillPercent int, limiter RebuildWorkerLimiter, + fillPercent int, concLimiter common.RebuildLimiter, ) { select { case <-ctx.Done(): @@ -107,21 +80,21 @@ func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *lo log.Info(ctx, logs.BlobstoreRebuildStarted) ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) // TODO use shard limiter - if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil { + if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, concLimiter, fillPercent); err != nil { log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err)) } else { log.Info(ctx, logs.BlobstoreRebuildCompletedSuccessfully) } } -func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, fillPercent int, +func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter common.RebuildLimiter, fillPercent int, ) error { select { case <-ctx.Done(): return ctx.Err() case r.tasks <- rebuildTask{ - limiter: limiter, - fillPercent: fillPercent, + concurrencyLimiter: limiter, + fillPercent: fillPercent, }: return nil default: @@ -170,7 +143,7 @@ func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Addres } type RebuildPrm struct { - ConcurrencyLimiter RebuildWorkerLimiter + ConcurrencyLimiter common.ConcurrencyLimiter TargetFillPercent uint32 } From b9360be1dcd8b08ab20e2a73a150ceadeeeea966 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 7 Feb 2025 17:25:47 +0300 Subject: [PATCH 391/591] [#1636] blobovniczatree: Use RebuildLimiter Signed-off-by: Dmitrii Stepanov --- .../blobstor/blobovniczatree/rebuild.go | 40 +++++++++++++++---- .../blobstor/blobovniczatree/rebuild_test.go | 8 ++++ .../blobstor/common/rebuild.go | 6 +++ pkg/local_object_storage/shard/rebuild.go | 28 ++++++++++++- 4 files changed, 73 insertions(+), 9 deletions(-) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index cbd45c3b4..7ef3317fd 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -50,7 +50,7 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm var res common.RebuildRes b.log.Debug(ctx, logs.BlobovniczaTreeCompletingPreviousRebuild) - completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage) + completedPreviosMoves, err := b.completeIncompletedMove(ctx, prm.MetaStorage, prm.Limiter) res.ObjectsMoved += completedPreviosMoves if err != nil { b.log.Warn(ctx, logs.BlobovniczaTreeCompletedPreviousRebuildFailed, zap.Error(err)) @@ -238,7 +238,7 @@ func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (fun }, nil } -func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, concLimiter common.RebuildLimiter) (uint64, error) { +func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.RebuildLimiter) (uint64, error) { var result atomic.Uint64 batch := make(map[oid.Address][]byte) @@ -253,7 +253,12 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn }) for { - _, err := blz.Iterate(ctx, prm) + release, err := limiter.ReadRequest(ctx) + if err != nil { + return result.Load(), err + } + _, err = blz.Iterate(ctx, prm) + release() if err != nil && !errors.Is(err, errBatchFull) { return result.Load(), err } @@ -265,14 +270,19 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn eg, egCtx := errgroup.WithContext(ctx) for addr, data := range batch { - release, err := concLimiter.AcquireWorkSlot(egCtx) + release, err := limiter.AcquireWorkSlot(egCtx) if err != nil { _ = eg.Wait() return result.Load(), err } eg.Go(func() error { defer release() - err := b.moveObject(egCtx, blz, blzPath, addr, data, meta) + moveRelease, err := limiter.WriteRequest(ctx) + if err != nil { + return err + } + err = b.moveObject(egCtx, blz, blzPath, addr, data, meta) + moveRelease() if err == nil { result.Add(1) } @@ -360,7 +370,7 @@ func (b *Blobovniczas) dropDirectoryIfEmpty(path string) error { return b.dropDirectoryIfEmpty(filepath.Dir(path)) } -func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage) (uint64, error) { +func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage, rateLimiter common.RateLimiter) (uint64, error) { var count uint64 var rebuildTempFilesToRemove []string err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) { @@ -373,13 +383,24 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co } defer shDB.Close(ctx) + release, err := rateLimiter.ReadRequest(ctx) + if err != nil { + return false, err + } incompletedMoves, err := blz.ListMoveInfo(ctx) + release() if err != nil { return true, err } for _, move := range incompletedMoves { - if err := b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore); err != nil { + release, err := rateLimiter.WriteRequest(ctx) + if err != nil { + return false, err + } + err = b.performMove(ctx, blz, shDB.SystemPath(), move, metaStore) + release() + if err != nil { return true, err } count++ @@ -389,9 +410,14 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co return false, nil }) for _, tmp := range rebuildTempFilesToRemove { + release, err := rateLimiter.WriteRequest(ctx) + if err != nil { + return count, err + } if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil { b.log.Warn(ctx, logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err)) } + release() } return count, err } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index e26c485ba..865d04a8f 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -467,3 +467,11 @@ type rebuildLimiterStub struct{} func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) (common.ReleaseFunc, error) { return func() {}, nil } + +func (s *rebuildLimiterStub) ReadRequest(context.Context) (common.ReleaseFunc, error) { + return func() {}, nil +} + +func (s *rebuildLimiterStub) WriteRequest(context.Context) (common.ReleaseFunc, error) { + return func() {}, nil +} diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go index 4615190f7..788fe66f2 100644 --- a/pkg/local_object_storage/blobstor/common/rebuild.go +++ b/pkg/local_object_storage/blobstor/common/rebuild.go @@ -27,6 +27,12 @@ type ConcurrencyLimiter interface { AcquireWorkSlot(ctx context.Context) (ReleaseFunc, error) } +type RateLimiter interface { + ReadRequest(context.Context) (ReleaseFunc, error) + WriteRequest(context.Context) (ReleaseFunc, error) +} + type RebuildLimiter interface { ConcurrencyLimiter + RateLimiter } diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go index 3aa94d5a3..20f1f2b6f 100644 --- a/pkg/local_object_storage/shard/rebuild.go +++ b/pkg/local_object_storage/shard/rebuild.go @@ -79,7 +79,6 @@ func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *lo } log.Info(ctx, logs.BlobstoreRebuildStarted) ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) - // TODO use shard limiter if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, concLimiter, fillPercent); err != nil { log.Warn(ctx, logs.FailedToRebuildBlobstore, zap.Error(err)) } else { @@ -165,5 +164,30 @@ func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error { return ErrDegradedMode } - return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, int(p.TargetFillPercent)) + limiter := &rebuildLimiter{ + concurrencyLimiter: p.ConcurrencyLimiter, + rateLimiter: s.opsLimiter, + } + return s.rb.ScheduleRebuild(ctx, limiter, int(p.TargetFillPercent)) +} + +var _ common.RebuildLimiter = (*rebuildLimiter)(nil) + +type rebuildLimiter struct { + concurrencyLimiter common.ConcurrencyLimiter + rateLimiter qos.Limiter +} + +func (r *rebuildLimiter) AcquireWorkSlot(ctx context.Context) (common.ReleaseFunc, error) { + return r.concurrencyLimiter.AcquireWorkSlot(ctx) +} + +func (r *rebuildLimiter) ReadRequest(ctx context.Context) (common.ReleaseFunc, error) { + release, err := r.rateLimiter.ReadRequest(ctx) + return common.ReleaseFunc(release), err +} + +func (r *rebuildLimiter) WriteRequest(ctx context.Context) (common.ReleaseFunc, error) { + release, err := r.rateLimiter.WriteRequest(ctx) + return common.ReleaseFunc(release), err } From 2162f8e189a318550f55825755862a4e2a6a3e7b Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 10 Feb 2025 17:55:34 +0300 Subject: [PATCH 392/591] [#1636] object: Fix IO tag adjustment for Put/Patch There was no tag adjustment for CloseAndRecv. Signed-off-by: Dmitrii Stepanov --- pkg/services/object/qos.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/pkg/services/object/qos.go b/pkg/services/object/qos.go index 145a316e2..01eb1ea8d 100644 --- a/pkg/services/object/qos.go +++ b/pkg/services/object/qos.go @@ -3,6 +3,8 @@ package object import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" ) @@ -120,13 +122,24 @@ type qosSendRecv[TReq qosVerificationHeader, TResp any] interface { type qosWriteStream[TReq qosVerificationHeader, TResp any] struct { s qosSendRecv[TReq, TResp] adj AdjustIOTag + + ioTag string + ioTagDefined bool } func (q *qosWriteStream[TReq, TResp]) CloseAndRecv(ctx context.Context) (TResp, error) { + if q.ioTagDefined { + ctx = tagging.ContextWithIOTag(ctx, q.ioTag) + } return q.s.CloseAndRecv(ctx) } func (q *qosWriteStream[TReq, TResp]) Send(ctx context.Context, req TReq) error { - ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + if !q.ioTagDefined { + ctx = q.adj.AdjustIncomingTag(ctx, req.GetVerificationHeader().GetBodySignature().GetKey()) + q.ioTag, q.ioTagDefined = tagging.IOTagFromContext(ctx) + } + assert.True(q.ioTagDefined, "io tag undefined after incoming tag adjustment") + ctx = tagging.ContextWithIOTag(ctx, q.ioTag) return q.s.Send(ctx, req) } From c2d855aedd11258c37126c5920e7e0ffde42425d Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 12 Feb 2025 16:45:39 +0300 Subject: [PATCH 393/591] [#1636] qos: Return Resource Exhausted error Signed-off-by: Dmitrii Stepanov --- internal/qos/limiter.go | 8 ++++++++ pkg/local_object_storage/engine/engine.go | 6 +++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go index 996cebea1..3b6c6547c 100644 --- a/internal/qos/limiter.go +++ b/internal/qos/limiter.go @@ -2,6 +2,7 @@ package qos import ( "context" + "errors" "fmt" "time" @@ -9,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" ) const ( @@ -110,6 +112,9 @@ func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) { } rel, err := n.readScheduler.RequestArrival(ctx, tag) if err != nil { + if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) { + return nil, &apistatus.ResourceExhausted{} + } return nil, err } return ReleaseFunc(rel), nil @@ -125,6 +130,9 @@ func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) { } rel, err := n.writeScheduler.RequestArrival(ctx, tag) if err != nil { + if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) { + return nil, &apistatus.ResourceExhausted{} + } return nil, err } return ReleaseFunc(rel), nil diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 85652b3ae..e13252b82 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" ) @@ -176,7 +177,10 @@ func (e *StorageEngine) reportShardError( } func isLogical(err error) bool { - return errors.As(err, &logicerr.Logical{}) || errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) + return errors.As(err, &logicerr.Logical{}) || + errors.Is(err, context.Canceled) || + errors.Is(err, context.DeadlineExceeded) || + errors.As(err, new(*apistatus.ResourceExhausted)) } // Option represents StorageEngine's constructor option. From 8ed71a969e256ee2410a1f089c8e41103ac63143 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 14 Feb 2025 10:05:06 +0300 Subject: [PATCH 394/591] [#1636] qos: Add semaphore limiter If no tags specified, then limiter could be optimized to use atomic semaphore. Signed-off-by: Dmitrii Stepanov --- go.mod | 2 +- go.sum | 4 ++-- internal/qos/limiter.go | 50 ++++++++++++++++++++------------------- internal/qos/semaphore.go | 39 ++++++++++++++++++++++++++++++ 4 files changed, 68 insertions(+), 27 deletions(-) create mode 100644 internal/qos/semaphore.go diff --git a/go.mod b/go.mod index 2bfc3abfe..bbb817957 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 - git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250213125059-356851eed3bf + git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 diff --git a/go.sum b/go.sum index 4a7dfd4dc..0c66f4555 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250213125059-356851eed3bf h1:ik2aMBpTJJpoZe2ffcGShXRkrvny65NEPLVt67KmH/A= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250213125059-356851eed3bf/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3 h1:QnAt5b2R6+hQthMOIn5ECfLAlVD8IAE5JRm1NCCOmuE= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4 h1:dOZHuOywvH1ms8U38lDCWpysgkCCeJ02RLI7zDhPcyw= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go index 3b6c6547c..b1406a7f3 100644 --- a/internal/qos/limiter.go +++ b/internal/qos/limiter.go @@ -7,7 +7,6 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -26,6 +25,11 @@ type Limiter interface { Close() } +type scheduler interface { + RequestArrival(ctx context.Context, tag string) (scheduling.ReleaseFunc, error) + Close() +} + func NewLimiter(c *limits.Config) (Limiter, error) { if err := validateConfig(c); err != nil { return nil, err @@ -34,15 +38,11 @@ func NewLimiter(c *limits.Config) (Limiter, error) { if isNoop(read, write) { return noopLimiterInstance, nil } - readScheduler, err := scheduling.NewMClock( - uint64(read.MaxRunningOps), uint64(read.MaxWaitingOps), - converToSchedulingTags(read.Tags), read.IdleTimeout) + readScheduler, err := createScheduler(c.Read()) if err != nil { return nil, fmt.Errorf("create read scheduler: %w", err) } - writeScheduler, err := scheduling.NewMClock( - uint64(write.MaxRunningOps), uint64(write.MaxWaitingOps), - converToSchedulingTags(write.Tags), write.IdleTimeout) + writeScheduler, err := createScheduler(c.Write()) if err != nil { return nil, fmt.Errorf("create write scheduler: %w", err) } @@ -52,6 +52,15 @@ func NewLimiter(c *limits.Config) (Limiter, error) { }, nil } +func createScheduler(config limits.OpConfig) (scheduler, error) { + if len(config.Tags) == 0 && config.MaxWaitingOps == limits.NoLimit { + return newSemaphoreScheduler(config.MaxRunningOps), nil + } + return scheduling.NewMClock( + uint64(config.MaxRunningOps), uint64(config.MaxWaitingOps), + converToSchedulingTags(config.Tags), config.IdleTimeout) +} + func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo { result := make(map[string]scheduling.TagInfo) for _, tag := range []IOTag{IOTagClient, IOTagBackground, IOTagInternal, IOTagPolicer, IOTagWritecache} { @@ -100,27 +109,19 @@ func (n *noopLimiter) Close() {} var _ Limiter = (*mClockLimiter)(nil) type mClockLimiter struct { - readScheduler *scheduling.MClock - writeScheduler *scheduling.MClock + readScheduler scheduler + writeScheduler scheduler } func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) { - tag, ok := tagging.IOTagFromContext(ctx) - assert.True(ok, "request has no tag") - if tag == IOTagCritical.String() { - return releaseStub, nil - } - rel, err := n.readScheduler.RequestArrival(ctx, tag) - if err != nil { - if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) { - return nil, &apistatus.ResourceExhausted{} - } - return nil, err - } - return ReleaseFunc(rel), nil + return requestArrival(ctx, n.readScheduler) } func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) { + return requestArrival(ctx, n.writeScheduler) +} + +func requestArrival(ctx context.Context, s scheduler) (ReleaseFunc, error) { tag, ok := tagging.IOTagFromContext(ctx) if !ok { tag = IOTagClient.String() @@ -128,9 +129,10 @@ func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) { if tag == IOTagCritical.String() { return releaseStub, nil } - rel, err := n.writeScheduler.RequestArrival(ctx, tag) + rel, err := s.RequestArrival(ctx, tag) if err != nil { - if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) { + if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) || + errors.Is(err, errSemaphoreLimitExceeded) { return nil, &apistatus.ResourceExhausted{} } return nil, err diff --git a/internal/qos/semaphore.go b/internal/qos/semaphore.go new file mode 100644 index 000000000..74e6928f3 --- /dev/null +++ b/internal/qos/semaphore.go @@ -0,0 +1,39 @@ +package qos + +import ( + "context" + "errors" + + qosSemaphore "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting/semaphore" + "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" +) + +var ( + _ scheduler = (*semaphore)(nil) + errSemaphoreLimitExceeded = errors.New("semaphore limit exceeded") +) + +type semaphore struct { + s *qosSemaphore.Semaphore +} + +func newSemaphoreScheduler(size int64) *semaphore { + return &semaphore{ + s: qosSemaphore.NewSemaphore(size), + } +} + +func (s *semaphore) Close() {} + +func (s *semaphore) RequestArrival(ctx context.Context, _ string) (scheduling.ReleaseFunc, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if s.s.Acquire() { + return s.s.Release, nil + } + return nil, errSemaphoreLimitExceeded +} From 401d96a89e179026b210729d261399b7d5874905 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 21 Feb 2025 16:37:00 +0300 Subject: [PATCH 395/591] [#1636] config: Refactor newConfig and oldConfig `newConfig` is actually target config to set config values from source (which is called `oldConfig`). Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 80 +++++++++++++++++++------------------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index b42275538..afde0bbc0 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -256,42 +256,42 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error { return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) }) } -func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error { - var newConfig shardCfg +func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *shardconfig.Config) error { + var target shardCfg - newConfig.refillMetabase = oldConfig.RefillMetabase() - newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount() - newConfig.mode = oldConfig.Mode() - newConfig.compress = oldConfig.Compress() - newConfig.estimateCompressibility = oldConfig.EstimateCompressibility() - newConfig.estimateCompressibilityThreshold = oldConfig.EstimateCompressibilityThreshold() - newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes() - newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit() + target.refillMetabase = source.RefillMetabase() + target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount() + target.mode = source.Mode() + target.compress = source.Compress() + target.estimateCompressibility = source.EstimateCompressibility() + target.estimateCompressibilityThreshold = source.EstimateCompressibilityThreshold() + target.uncompressableContentType = source.UncompressableContentTypes() + target.smallSizeObjectLimit = source.SmallSizeLimit() - a.setShardWriteCacheConfig(&newConfig, oldConfig) + a.setShardWriteCacheConfig(&target, source) - a.setShardPiloramaConfig(c, &newConfig, oldConfig) + a.setShardPiloramaConfig(c, &target, source) - if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil { + if err := a.setShardStorageConfig(&target, source); err != nil { return err } - a.setMetabaseConfig(&newConfig, oldConfig) + a.setMetabaseConfig(&target, source) - a.setGCConfig(&newConfig, oldConfig) - if err := a.setLimiter(&newConfig, oldConfig); err != nil { + a.setGCConfig(&target, source) + if err := a.setLimiter(&target, source); err != nil { return err } - a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig) + a.EngineCfg.shards = append(a.EngineCfg.shards, target) return nil } -func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { - writeCacheCfg := oldConfig.WriteCache() +func (a *applicationConfiguration) setShardWriteCacheConfig(target *shardCfg, source *shardconfig.Config) { + writeCacheCfg := source.WriteCache() if writeCacheCfg.Enabled() { - wc := &newConfig.writecacheCfg + wc := &target.writecacheCfg wc.enabled = true wc.path = writeCacheCfg.Path() @@ -304,10 +304,10 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, } } -func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) { +func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, target *shardCfg, source *shardconfig.Config) { if config.BoolSafe(c.Sub("tree"), "enabled") { - piloramaCfg := oldConfig.Pilorama() - pr := &newConfig.piloramaCfg + piloramaCfg := source.Pilorama() + pr := &target.piloramaCfg pr.enabled = true pr.path = piloramaCfg.Path() @@ -318,8 +318,8 @@ func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newC } } -func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error { - blobStorCfg := oldConfig.BlobStor() +func (a *applicationConfiguration) setShardStorageConfig(target *shardCfg, source *shardconfig.Config) error { + blobStorCfg := source.BlobStor() storagesCfg := blobStorCfg.Storages() ss := make([]subStorageCfg, 0, len(storagesCfg)) @@ -353,13 +353,13 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol ss = append(ss, sCfg) } - newConfig.subStorages = ss + target.subStorages = ss return nil } -func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { - metabaseCfg := oldConfig.Metabase() - m := &newConfig.metaCfg +func (a *applicationConfiguration) setMetabaseConfig(target *shardCfg, source *shardconfig.Config) { + metabaseCfg := source.Metabase() + m := &target.metaCfg m.path = metabaseCfg.Path() m.perm = metabaseCfg.BoltDB().Perm() @@ -367,24 +367,24 @@ func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldCon m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize() } -func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { - gcCfg := oldConfig.GC() - newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() - newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() - newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() - newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() +func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardconfig.Config) { + gcCfg := source.GC() + target.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() + target.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() + target.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() + target.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount() } -func (a *applicationConfiguration) setLimiter(newConfig *shardCfg, oldConfig *shardconfig.Config) error { - limitsConfig := oldConfig.Limits() +func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error { + limitsConfig := source.Limits() limiter, err := qos.NewLimiter(limitsConfig) if err != nil { return err } - if newConfig.limiter != nil { - newConfig.limiter.Close() + if target.limiter != nil { + target.limiter.Close() } - newConfig.limiter = limiter + target.limiter = limiter return nil } From 6c6e463b73d0d7afbdd7f7142d0c9f20a59f871c Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 24 Feb 2025 16:06:07 +0300 Subject: [PATCH 396/591] [#1636] shard: Change ops limiter on shard reload Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/shard/control.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 3520277c0..6dee2da3f 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -449,6 +449,10 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error { return err } } + if c.opsLimiter != nil { + s.opsLimiter.Close() + s.opsLimiter = c.opsLimiter + } return s.setMode(ctx, c.info.Mode) } From eb8b9b2b3bd933e4fa7bfc7ffc8ea80beb49d522 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 28 Feb 2025 10:04:24 +0300 Subject: [PATCH 397/591] [#1636] blobovniczatree: Validate limiter release in rebuild unit tests Signed-off-by: Dmitrii Stepanov --- .../blobovniczatree/rebuild_failover_test.go | 4 +- .../blobstor/blobovniczatree/rebuild_test.go | 54 +++++++++++++++---- 2 files changed, 47 insertions(+), 11 deletions(-) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go index 91578d5e8..8832603c4 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go @@ -161,9 +161,10 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object storageIDs: make(map[oid.Address][]byte), guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ MetaStorage: metaStub, - Limiter: &rebuildLimiterStub{}, + Limiter: limiter, FillPercent: 1, }) require.NoError(t, err) @@ -171,6 +172,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object require.Equal(t, uint64(0), rRes.FilesRemoved) require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) blz := blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "1.db"))) require.NoError(t, blz.Open(context.Background())) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index 865d04a8f..9c971bfb6 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -2,7 +2,9 @@ package blobovniczatree import ( "context" + "fmt" "sync" + "sync/atomic" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" @@ -76,9 +78,10 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ MetaStorage: metaStub, - Limiter: &rebuildLimiterStub{}, + Limiter: limiter, FillPercent: 60, }) require.NoError(t, err) @@ -94,6 +97,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) }) t.Run("no rebuild single db", func(t *testing.T) { @@ -128,9 +132,10 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ MetaStorage: metaStub, - Limiter: &rebuildLimiterStub{}, + Limiter: limiter, FillPercent: 90, // 64KB / 100KB = 64% }) require.NoError(t, err) @@ -146,6 +151,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) }) t.Run("rebuild by fill percent", func(t *testing.T) { @@ -193,9 +199,10 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ MetaStorage: metaStub, - Limiter: &rebuildLimiterStub{}, + Limiter: limiter, FillPercent: 80, }) require.NoError(t, err) @@ -215,6 +222,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) }) t.Run("rebuild by overflow", func(t *testing.T) { @@ -266,9 +274,10 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, b.Open(mode.ComponentReadWrite)) require.NoError(t, b.Init()) + limiter := &rebuildLimiterStub{} rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{ MetaStorage: metaStub, - Limiter: &rebuildLimiterStub{}, + Limiter: limiter, FillPercent: 80, }) require.NoError(t, err) @@ -285,6 +294,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) }) } @@ -338,9 +348,10 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} var rPrm common.RebuildPrm rPrm.MetaStorage = metaStub - rPrm.Limiter = &rebuildLimiterStub{} + rPrm.Limiter = limiter rPrm.FillPercent = 1 rRes, err := b.Rebuild(context.Background(), rPrm) require.NoError(t, err) @@ -356,6 +367,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) } func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) { @@ -427,9 +439,10 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta storageIDs: storageIDs, guard: &sync.Mutex{}, } + limiter := &rebuildLimiterStub{} var rPrm common.RebuildPrm rPrm.MetaStorage = metaStub - rPrm.Limiter = &rebuildLimiterStub{} + rPrm.Limiter = limiter rPrm.FillPercent = 1 rRes, err := b.Rebuild(context.Background(), rPrm) require.NoError(t, err) @@ -445,6 +458,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta } require.NoError(t, b.Close(context.Background())) + require.NoError(t, limiter.ValidateReleased()) } type storageIDUpdateStub struct { @@ -462,16 +476,36 @@ func (s *storageIDUpdateStub) UpdateStorageID(ctx context.Context, addr oid.Addr return nil } -type rebuildLimiterStub struct{} +type rebuildLimiterStub struct { + slots atomic.Int64 + readRequests atomic.Int64 + writeRequests atomic.Int64 +} func (s *rebuildLimiterStub) AcquireWorkSlot(context.Context) (common.ReleaseFunc, error) { - return func() {}, nil + s.slots.Add(1) + return func() { s.slots.Add(-1) }, nil } func (s *rebuildLimiterStub) ReadRequest(context.Context) (common.ReleaseFunc, error) { - return func() {}, nil + s.readRequests.Add(1) + return func() { s.readRequests.Add(-1) }, nil } func (s *rebuildLimiterStub) WriteRequest(context.Context) (common.ReleaseFunc, error) { - return func() {}, nil + s.writeRequests.Add(1) + return func() { s.writeRequests.Add(-1) }, nil +} + +func (s *rebuildLimiterStub) ValidateReleased() error { + if v := s.slots.Load(); v != 0 { + return fmt.Errorf("invalid slots value %d", v) + } + if v := s.readRequests.Load(); v != 0 { + return fmt.Errorf("invalid read requests value %d", v) + } + if v := s.writeRequests.Load(); v != 0 { + return fmt.Errorf("invalid write requests value %d", v) + } + return nil } From 4685afb1dc5737a70833231031582fda29476835 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 28 Feb 2025 10:21:30 +0300 Subject: [PATCH 398/591] [#1636] engine: Validate limiter release in unit tests Signed-off-by: Dmitrii Stepanov --- .../engine/engine_test.go | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 926ff43f3..7ddde1f02 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -3,8 +3,10 @@ package engine import ( "context" "path/filepath" + "sync/atomic" "testing" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" @@ -90,6 +92,7 @@ func testGetDefaultShardOptions(t testing.TB) []shard.Option { ), shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))), shard.WithMetaBaseOptions(testGetDefaultMetabaseOptions(t)...), + shard.WithLimiter(&testQoSLimiter{t: t}), } } @@ -151,3 +154,26 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes }, }, smallFileStorage, largeFileStorage } + +var _ qos.Limiter = (*testQoSLimiter)(nil) + +type testQoSLimiter struct { + t testing.TB + read atomic.Int64 + write atomic.Int64 +} + +func (t *testQoSLimiter) Close() { + require.Equal(t.t, int64(0), t.read.Load(), "read requests count after limiter close must be 0") + require.Equal(t.t, int64(0), t.write.Load(), "write requests count after limiter close must be 0") +} + +func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) { + t.read.Add(1) + return func() { t.read.Add(-1) }, nil +} + +func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) { + t.write.Add(1) + return func() { t.write.Add(-1) }, nil +} From 5ba0e2918e60146fe64d9e5c159629f279fe079d Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 4 Mar 2025 10:16:20 +0300 Subject: [PATCH 399/591] [#1661] cli: Clarify `--rpc-endpoint` values `multiaddr` is not something an average user knows. Personally, I have never used it in CLI. On the other hand, we need to connect with TLS quite often, so it needs to be mentioned in help. Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-cli/internal/commonflags/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/frostfs-cli/internal/commonflags/flags.go b/cmd/frostfs-cli/internal/commonflags/flags.go index cd46d63eb..fad1f6183 100644 --- a/cmd/frostfs-cli/internal/commonflags/flags.go +++ b/cmd/frostfs-cli/internal/commonflags/flags.go @@ -28,7 +28,7 @@ const ( RPC = "rpc-endpoint" RPCShorthand = "r" RPCDefault = "" - RPCUsage = "Remote node address (as 'multiaddr' or ':')" + RPCUsage = "Remote node address (':' or 'grpcs://:')" Timeout = "timeout" TimeoutShorthand = "t" From bf8914fedc401fc7a8d17e7a8dc23f6776102a66 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Mon, 11 Nov 2024 12:33:31 +0300 Subject: [PATCH 400/591] [#1416] lens/explorer: Support metabase schema v3 Signed-off-by: Aleksey Savchuk --- cmd/frostfs-lens/internal/meta/tui.go | 49 ++++++++++++++++++- .../schema/metabase/buckets/parsers.go | 17 ++++++- .../schema/metabase/buckets/prefix.go | 40 ++++++++------- .../schema/metabase/buckets/string.go | 6 +-- .../internal/schema/metabase/buckets/types.go | 19 +++++-- .../internal/schema/metabase/parser.go | 27 +++++++++- .../schema/metabase/records/detailed.go | 8 +++ .../schema/metabase/records/filter.go | 23 +++++++++ .../schema/metabase/records/parsers.go | 42 ++++++++++++++++ .../schema/metabase/records/string.go | 20 ++++++++ .../internal/schema/metabase/records/types.go | 11 +++++ 11 files changed, 233 insertions(+), 29 deletions(-) diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go index 5a41f945c..7b0e25f3d 100644 --- a/cmd/frostfs-lens/internal/meta/tui.go +++ b/cmd/frostfs-lens/internal/meta/tui.go @@ -2,13 +2,17 @@ package meta import ( "context" + "encoding/binary" + "errors" "fmt" common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal" + schemaCommon "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui" "github.com/rivo/tview" "github.com/spf13/cobra" + "go.etcd.io/bbolt" ) var tuiCMD = &cobra.Command{ @@ -27,6 +31,11 @@ Available search filters: var initialPrompt string +var parserPerSchemaVersion = map[uint64]schemaCommon.Parser{ + 2: schema.MetabaseParserV2, + 3: schema.MetabaseParserV3, +} + func init() { common.AddComponentPathFlag(tuiCMD, &vPath) @@ -49,12 +58,22 @@ func runTUI(cmd *cobra.Command) error { } defer db.Close() + schemaVersion, hasVersion := lookupSchemaVersion(cmd, db) + if !hasVersion { + return errors.New("couldn't detect schema version") + } + + metabaseParser, ok := parserPerSchemaVersion[schemaVersion] + if !ok { + return fmt.Errorf("unknown schema version %d", schemaVersion) + } + // Need if app was stopped with Ctrl-C. ctx, cancel := context.WithCancel(cmd.Context()) defer cancel() app := tview.NewApplication() - ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil) + ui := tui.NewUI(ctx, app, db, metabaseParser, nil) _ = ui.AddFilter("cid", tui.CIDParser, "CID") _ = ui.AddFilter("oid", tui.OIDParser, "OID") @@ -69,3 +88,31 @@ func runTUI(cmd *cobra.Command) error { app.SetRoot(ui, true).SetFocus(ui) return app.Run() } + +var ( + shardInfoBucket = []byte{5} + versionRecord = []byte("version") +) + +func lookupSchemaVersion(cmd *cobra.Command, db *bbolt.DB) (version uint64, ok bool) { + err := db.View(func(tx *bbolt.Tx) error { + bkt := tx.Bucket(shardInfoBucket) + if bkt == nil { + return nil + } + rec := bkt.Get(versionRecord) + if rec == nil { + return nil + } + + version = binary.LittleEndian.Uint64(rec) + ok = true + + return nil + }) + if err != nil { + common.ExitOnErr(cmd, fmt.Errorf("couldn't lookup version: %w", err)) + } + + return +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go index 24cc0e52d..4e6bbf08a 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go @@ -80,10 +80,15 @@ var ( }, ) - UserAttributeParser = NewUserAttributeKeyBucketParser( + UserAttributeParserV2 = NewUserAttributeKeyBucketParser( NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), ) + UserAttributeParserV3 = NewUserAttributeKeyBucketParserWithSpecificKeys( + NewUserAttributeValueBucketParser(records.UserAttributeRecordParser), + []string{"FilePath", "S3-Access-Box-CRDT-Name"}, + ) + PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{ cidResolver: StrictResolver, oidResolver: StrictResolver, @@ -108,4 +113,14 @@ var ( cidResolver: StrictResolver, oidResolver: LenientResolver, }) + + ExpirationEpochToObjectParser = NewPrefixBucketParser(ExpirationEpochToObject, records.ExpirationEpochToObjectRecordParser, Resolvers{ + cidResolver: LenientResolver, + oidResolver: LenientResolver, + }) + + ObjectToExpirationEpochParser = NewPrefixContainerBucketParser(ObjectToExpirationEpoch, records.ObjectToExpirationEpochRecordParser, Resolvers{ + cidResolver: StrictResolver, + oidResolver: LenientResolver, + }) ) diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go index 2fb122940..42a24c594 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go @@ -22,27 +22,31 @@ const ( Split ContainerCounters ECInfo + ExpirationEpochToObject + ObjectToExpirationEpoch ) var x = map[Prefix]string{ - Graveyard: "Graveyard", - Garbage: "Garbage", - ToMoveIt: "To Move It", - ContainerVolume: "Container Volume", - Locked: "Locked", - ShardInfo: "Shard Info", - Primary: "Primary", - Lockers: "Lockers", - Tombstone: "Tombstone", - Small: "Small", - Root: "Root", - Owner: "Owner", - UserAttribute: "User Attribute", - PayloadHash: "Payload Hash", - Parent: "Parent", - Split: "Split", - ContainerCounters: "Container Counters", - ECInfo: "EC Info", + Graveyard: "Graveyard", + Garbage: "Garbage", + ToMoveIt: "To Move It", + ContainerVolume: "Container Volume", + Locked: "Locked", + ShardInfo: "Shard Info", + Primary: "Primary", + Lockers: "Lockers", + Tombstone: "Tombstone", + Small: "Small", + Root: "Root", + Owner: "Owner", + UserAttribute: "User Attribute", + PayloadHash: "Payload Hash", + Parent: "Parent", + Split: "Split", + ContainerCounters: "Container Counters", + ECInfo: "EC Info", + ExpirationEpochToObject: "Exp. Epoch to Object", + ObjectToExpirationEpoch: "Object to Exp. Epoch", } func (p Prefix) String() string { diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go index db90bddbd..62d126f88 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go @@ -9,7 +9,7 @@ import ( func (b *PrefixBucket) String() string { return common.FormatSimple( - fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime, + fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, ) } @@ -17,7 +17,7 @@ func (b *PrefixContainerBucket) String() string { return fmt.Sprintf( "%s CID %s", common.FormatSimple( - fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime, + fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, ), common.FormatSimple(b.id.String(), tcell.ColorAqua), ) @@ -34,7 +34,7 @@ func (b *ContainerBucket) String() string { func (b *UserAttributeKeyBucket) String() string { return fmt.Sprintf("%s CID %s ATTR-KEY %s", common.FormatSimple( - fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime, + fmt.Sprintf("(%2d %-20s)", b.prefix, b.prefix), tcell.ColorLime, ), common.FormatSimple( fmt.Sprintf("%-44s", b.id), tcell.ColorAqua, diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go index 82b47dd85..7355c3d9e 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go +++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go @@ -2,6 +2,7 @@ package buckets import ( "errors" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -57,10 +58,11 @@ var ( ) var ( - ErrNotBucket = errors.New("not a bucket") - ErrInvalidKeyLength = errors.New("invalid key length") - ErrInvalidValueLength = errors.New("invalid value length") - ErrInvalidPrefix = errors.New("invalid prefix") + ErrNotBucket = errors.New("not a bucket") + ErrInvalidKeyLength = errors.New("invalid key length") + ErrInvalidValueLength = errors.New("invalid value length") + ErrInvalidPrefix = errors.New("invalid prefix") + ErrUnexpectedAttributeKey = errors.New("unexpected attribute key") ) func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser { @@ -132,6 +134,10 @@ func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Pa } func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser { + return NewUserAttributeKeyBucketParserWithSpecificKeys(next, nil) +} + +func NewUserAttributeKeyBucketParserWithSpecificKeys(next common.Parser, keys []string) common.Parser { return func(key, value []byte) (common.SchemaEntry, common.Parser, error) { if value != nil { return nil, nil, ErrNotBucket @@ -147,6 +153,11 @@ func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser { return nil, nil, err } b.key = string(key[33:]) + + if len(keys) != 0 && !slices.Contains(keys, b.key) { + return nil, nil, ErrUnexpectedAttributeKey + } + return &b, next, nil } } diff --git a/cmd/frostfs-lens/internal/schema/metabase/parser.go b/cmd/frostfs-lens/internal/schema/metabase/parser.go index ea095e207..4cc9e8765 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/parser.go +++ b/cmd/frostfs-lens/internal/schema/metabase/parser.go @@ -5,7 +5,30 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets" ) -var MetabaseParser = common.WithFallback( +var MetabaseParserV3 = common.WithFallback( + common.Any( + buckets.GraveyardParser, + buckets.GarbageParser, + buckets.ContainerVolumeParser, + buckets.LockedParser, + buckets.ShardInfoParser, + buckets.PrimaryParser, + buckets.LockersParser, + buckets.TombstoneParser, + buckets.SmallParser, + buckets.RootParser, + buckets.UserAttributeParserV3, + buckets.ParentParser, + buckets.SplitParser, + buckets.ContainerCountersParser, + buckets.ECInfoParser, + buckets.ExpirationEpochToObjectParser, + buckets.ObjectToExpirationEpochParser, + ), + common.RawParser.ToFallbackParser(), +) + +var MetabaseParserV2 = common.WithFallback( common.Any( buckets.GraveyardParser, buckets.GarbageParser, @@ -18,7 +41,7 @@ var MetabaseParser = common.WithFallback( buckets.SmallParser, buckets.RootParser, buckets.OwnerParser, - buckets.UserAttributeParser, + buckets.UserAttributeParserV2, buckets.PayloadHashParser, buckets.ParentParser, buckets.SplitParser, diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go index 2dda15b4f..477c4fc9d 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go @@ -63,3 +63,11 @@ func (r *ContainerCountersRecord) DetailedString() string { func (r *ECInfoRecord) DetailedString() string { return spew.Sdump(*r) } + +func (r *ExpirationEpochToObjectRecord) DetailedString() string { + return spew.Sdump(*r) +} + +func (r *ObjectToExpirationEpochRecord) DetailedString() string { + return spew.Sdump(*r) +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go index 880a7a8ff..e038911d7 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go @@ -143,3 +143,26 @@ func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult { return common.No } } + +func (r *ExpirationEpochToObjectRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "cid": + id := val.(cid.ID) + return common.IfThenElse(r.cnt.Equals(id), common.Yes, common.No) + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No) + default: + return common.No + } +} + +func (r *ObjectToExpirationEpochRecord) Filter(typ string, val any) common.FilterResult { + switch typ { + case "oid": + id := val.(oid.ID) + return common.IfThenElse(r.obj.Equals(id), common.Yes, common.No) + default: + return common.No + } +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go index 1b070e2a0..5d846cb75 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go @@ -249,3 +249,45 @@ func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, e } return &r, nil, nil } + +func ExpirationEpochToObjectRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) != 72 { + return nil, nil, ErrInvalidKeyLength + } + + var ( + r ExpirationEpochToObjectRecord + err error + ) + + r.epoch = binary.BigEndian.Uint64(key[:8]) + if err = r.cnt.Decode(key[8:40]); err != nil { + return nil, nil, err + } + if err = r.obj.Decode(key[40:]); err != nil { + return nil, nil, err + } + + return &r, nil, nil +} + +func ObjectToExpirationEpochRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) { + if len(key) != 32 { + return nil, nil, ErrInvalidKeyLength + } + if len(value) != 8 { + return nil, nil, ErrInvalidValueLength + } + + var ( + r ObjectToExpirationEpochRecord + err error + ) + + if err = r.obj.Decode(key); err != nil { + return nil, nil, err + } + r.epoch = binary.LittleEndian.Uint64(value) + + return &r, nil, nil +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go index ec0ab8e1a..f71244625 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/string.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/string.go @@ -2,6 +2,7 @@ package records import ( "fmt" + "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" "github.com/gdamore/tcell/v2" @@ -133,3 +134,22 @@ func (r *ECInfoRecord) String() string { len(r.ids), ) } + +func (r *ExpirationEpochToObjectRecord) String() string { + return fmt.Sprintf( + "exp. epoch %s %c CID %s OID %s", + common.FormatSimple(fmt.Sprintf("%-20d", r.epoch), tcell.ColorAqua), + tview.Borders.Vertical, + common.FormatSimple(fmt.Sprintf("%-44s", r.cnt), tcell.ColorAqua), + common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua), + ) +} + +func (r *ObjectToExpirationEpochRecord) String() string { + return fmt.Sprintf( + "OID %s %c exp. epoch %s", + common.FormatSimple(fmt.Sprintf("%-44s", r.obj), tcell.ColorAqua), + tview.Borders.Vertical, + common.FormatSimple(strconv.FormatUint(r.epoch, 10), tcell.ColorAqua), + ) +} diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/types.go b/cmd/frostfs-lens/internal/schema/metabase/records/types.go index 34c1c29fd..0809cad1a 100644 --- a/cmd/frostfs-lens/internal/schema/metabase/records/types.go +++ b/cmd/frostfs-lens/internal/schema/metabase/records/types.go @@ -79,4 +79,15 @@ type ( id oid.ID ids []oid.ID } + + ExpirationEpochToObjectRecord struct { + epoch uint64 + cnt cid.ID + obj oid.ID + } + + ObjectToExpirationEpochRecord struct { + obj oid.ID + epoch uint64 + } ) From 4c8f9580a16eb5d30320d670849585fe25c1d04d Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Wed, 5 Mar 2025 17:09:02 +0300 Subject: [PATCH 401/591] [#1662] object: Fix `CloseAndRecv` for patch streamer * A client may open stream to server, not send anything and close the open stream immediatly. This shouldn't cause a panic; * Return the error if `s.patcher` is uninitialized. Uninitialized patcher cannot be closed, this causes a panic. Signed-off-by: Airat Arifullin --- pkg/services/object/patch/streamer.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go index 5aba13f66..642b9f9fa 100644 --- a/pkg/services/object/patch/streamer.go +++ b/pkg/services/object/patch/streamer.go @@ -214,6 +214,9 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error { } func (s *Streamer) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) { + if s.patcher == nil { + return nil, errors.New("uninitialized patch streamer") + } patcherResp, err := s.patcher.Close(ctx) if err != nil { return nil, err From 471aeeaff3bdb8df231416bd69e659ca38e1e0c2 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Mon, 3 Mar 2025 04:21:53 +0300 Subject: [PATCH 402/591] [#1659] audit: Fix duplicated request logs When we do `object put` with audit enabled we get several entries in logs: with and without object id. `object put` request is logged in 2 places: 1. `(*auditPutStream) CloseAndRecv()` - when the client closes the request stream or when stream gets aborted. 2. `(*auditPutStream) Send()` - when stream was NOT aborted. `Send()` does error check for `ErrAbortStream` because if there is any other error - CloseAndRecv will not be called and there won't be any audit log about failed request. It led to logging on every object chunck put, even if `err == nil`. Added check for `err != nil` in `Send()` to fix it. Signed-off-by: Ekaterina Lebedeva --- pkg/services/object/audit.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go index dde9f8fc0..367be0c0c 100644 --- a/pkg/services/object/audit.go +++ b/pkg/services/object/audit.go @@ -163,7 +163,7 @@ func (a *auditPutStream) Send(ctx context.Context, req *object.PutRequest) error if err != nil { a.failed = true } - if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here + if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Put_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) From a17c3356fa762c3d558ed9d6fcd76f44db2f1e40 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 6 Mar 2025 15:07:37 +0300 Subject: [PATCH 403/591] [#1665] go.mod: Update sdk-go Signed-off-by: Evgenii Stratonikov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index bbb817957..18378466e 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 diff --git a/go.sum b/go.sum index 0c66f4555..5205dddef 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e13 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3 h1:QnAt5b2R6+hQthMOIn5ECfLAlVD8IAE5JRm1NCCOmuE= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4 h1:dOZHuOywvH1ms8U38lDCWpysgkCCeJ02RLI7zDhPcyw= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250217152255-c3f7378887a4/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= From 6260d703ce02a12a812d5071b601a3c925eb8860 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 6 Mar 2025 15:13:38 +0300 Subject: [PATCH 404/591] [#1665] treesvc: Disable service config query By default, gRPC fetches TXT report while resolving a domain. https://github.com/grpc/grpc-go/blob/0914bba6c5c5a545d34bd11e5dee0bbb8eaadd3f/internal/resolver/dns/dns_resolver.go#L336 This leads to a hanging dial if DNS is unavailable, even though the host may be specified in `/etc/hosts` (hello, localhost!). SDK client for the main API uses these options by default. Refs TrueCloudLab/frostfs-sdk-go#342 Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-cli/modules/tree/client.go | 1 + pkg/services/tree/cache.go | 1 + pkg/services/tree/sync.go | 1 + 3 files changed, 3 insertions(+) diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go index c6953f126..933378df6 100644 --- a/cmd/frostfs-cli/modules/tree/client.go +++ b/cmd/frostfs-cli/modules/tree/client.go @@ -39,6 +39,7 @@ func _client() (tree.TreeServiceClient, error) { tracing.NewStreamClientInterceptor(), ), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + grpc.WithDisableServiceConfig(), } if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") { diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index 125871fc4..70f4a843b 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -110,6 +110,7 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (* ), grpc.WithContextDialer(c.ds.GrpcContextDialer()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + grpc.WithDisableServiceConfig(), } if !netAddr.IsTLSEnabled() { diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 3e0a45385..89450b739 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -355,6 +355,7 @@ func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) { ), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), + grpc.WithDisableServiceConfig(), ) } From 9e31cb249f10c06cd0908db1269f600f4bd51cda Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Tue, 4 Feb 2025 21:21:31 +0300 Subject: [PATCH 405/591] [#1635] control: Add method to search shards by object Added method `ListShardsForObject` to ControlService and to StorageEngine. It returns information about shards storing object on the node. Signed-off-by: Ekaterina Lebedeva --- internal/logs/logs.go | 1 + pkg/local_object_storage/engine/shards.go | 46 ++ pkg/services/control/rpc.go | 20 + .../control/server/list_shards_for_object.go | 66 ++ pkg/services/control/service.proto | 23 + pkg/services/control/service_frostfs.pb.go | 724 ++++++++++++++++++ pkg/services/control/service_grpc.pb.go | 39 + 7 files changed, 919 insertions(+) create mode 100644 pkg/services/control/server/list_shards_for_object.go diff --git a/internal/logs/logs.go b/internal/logs/logs.go index d48a4da9b..d07f47fbf 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -252,6 +252,7 @@ const ( ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage" ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects" ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase" + ShardCouldNotFindObject = "could not find object" WritecacheWaitingForChannelsToFlush = "waiting for channels to flush" WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache" BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level" diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 8e191f72c..28f0287bc 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -11,6 +11,9 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/hrw" "github.com/google/uuid" @@ -442,3 +445,46 @@ func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]ha func (s hashedShard) Hash() uint64 { return s.hash } + +func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address) ([]shard.Info, error) { + var err error + var info []shard.Info + prm := shard.ExistsPrm{ + Address: obj, + } + var siErr *objectSDK.SplitInfoError + var ecErr *objectSDK.ECInfoError + + e.iterateOverUnsortedShards(func(hs hashedShard) (stop bool) { + res, exErr := hs.Exists(ctx, prm) + if exErr != nil { + if client.IsErrObjectAlreadyRemoved(exErr) { + err = new(apistatus.ObjectAlreadyRemoved) + return true + } + + // Check if error is either SplitInfoError or ECInfoError. + // True means the object is virtual. + if errors.As(exErr, &siErr) || errors.As(exErr, &ecErr) { + info = append(info, hs.DumpInfo()) + return false + } + + if shard.IsErrObjectExpired(exErr) { + err = exErr + return true + } + + if !client.IsErrObjectNotFound(exErr) { + e.reportShardError(ctx, hs, "could not check existence of object in shard", exErr, zap.Stringer("address", prm.Address)) + } + + return false + } + if res.Exists() { + info = append(info, hs.DumpInfo()) + } + return false + }) + return info, err +} diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go index bbf2cf0cc..0c4236d0e 100644 --- a/pkg/services/control/rpc.go +++ b/pkg/services/control/rpc.go @@ -32,6 +32,7 @@ const ( rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides" rpcDetachShards = "DetachShards" rpcStartShardRebuild = "StartShardRebuild" + rpcListShardsForObject = "ListShardsForObject" ) // HealthCheck executes ControlService.HealthCheck RPC. @@ -364,3 +365,22 @@ func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts . return wResp.message, nil } + +// ListShardsForObject executes ControlService.ListShardsForObject RPC. +func ListShardsForObject( + cli *client.Client, + req *ListShardsForObjectRequest, + opts ...client.CallOption, +) (*ListShardsForObjectResponse, error) { + wResp := newResponseWrapper[ListShardsForObjectResponse]() + + wReq := &requestWrapper{ + m: req, + } + err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcListShardsForObject), wReq, wResp, opts...) + if err != nil { + return nil, err + } + + return wResp.message, nil +} diff --git a/pkg/services/control/server/list_shards_for_object.go b/pkg/services/control/server/list_shards_for_object.go new file mode 100644 index 000000000..84469772f --- /dev/null +++ b/pkg/services/control/server/list_shards_for_object.go @@ -0,0 +1,66 @@ +package control + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" + + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (s *Server) ListShardsForObject(ctx context.Context, req *control.ListShardsForObjectRequest) (*control.ListShardsForObjectResponse, error) { + err := s.isValidRequest(req) + if err != nil { + return nil, status.Error(codes.PermissionDenied, err.Error()) + } + + var obj oid.ID + err = obj.DecodeString(req.GetBody().GetObjectId()) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + var cnr cid.ID + err = cnr.DecodeString(req.GetBody().GetContainerId()) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + resp := new(control.ListShardsForObjectResponse) + body := new(control.ListShardsForObjectResponse_Body) + resp.SetBody(body) + + var objAddr oid.Address + objAddr.SetContainer(cnr) + objAddr.SetObject(obj) + info, err := s.s.ListShardsForObject(ctx, objAddr) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if len(info) == 0 { + return nil, status.Error(codes.NotFound, logs.ShardCouldNotFindObject) + } + + body.SetShard_ID(shardInfoToProto(info)) + + // Sign the response + if err := ctrlmessage.Sign(s.key, resp); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + return resp, nil +} + +func shardInfoToProto(infos []shard.Info) [][]byte { + shardInfos := make([][]byte, 0, len(infos)) + for _, info := range infos { + shardInfos = append(shardInfos, *info.ID) + } + + return shardInfos +} diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto index 97ecf9a8c..4c539acfc 100644 --- a/pkg/services/control/service.proto +++ b/pkg/services/control/service.proto @@ -89,6 +89,9 @@ service ControlService { // StartShardRebuild starts shard rebuild process. rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse); + + // ListShardsForObject returns shard info where object is stored. + rpc ListShardsForObject(ListShardsForObjectRequest) returns (ListShardsForObjectResponse); } // Health check request. @@ -729,3 +732,23 @@ message StartShardRebuildResponse { Signature signature = 2; } + +message ListShardsForObjectRequest { + message Body { + string object_id = 1; + string container_id = 2; + } + + Body body = 1; + Signature signature = 2; +} + +message ListShardsForObjectResponse { + message Body { + // List of the node's shards storing object. + repeated bytes shard_ID = 1; + } + + Body body = 1; + Signature signature = 2; +} diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go index 0b4e3cf32..44849d591 100644 --- a/pkg/services/control/service_frostfs.pb.go +++ b/pkg/services/control/service_frostfs.pb.go @@ -17303,3 +17303,727 @@ func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { in.Consumed() } } + +type ListShardsForObjectRequest_Body struct { + ObjectId string `json:"objectId"` + ContainerId string `json:"containerId"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest_Body)(nil) + _ json.Marshaler = (*ListShardsForObjectRequest_Body)(nil) + _ json.Unmarshaler = (*ListShardsForObjectRequest_Body)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectRequest_Body) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.StringSize(1, x.ObjectId) + size += proto.StringSize(2, x.ContainerId) + return size +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectRequest_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if len(x.ObjectId) != 0 { + mm.AppendString(1, x.ObjectId) + } + if len(x.ContainerId) != 0 { + mm.AppendString(2, x.ContainerId) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectRequest_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest_Body") + } + switch fc.FieldNum { + case 1: // ObjectId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ObjectId") + } + x.ObjectId = data + case 2: // ContainerId + data, ok := fc.String() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "ContainerId") + } + x.ContainerId = data + } + } + return nil +} +func (x *ListShardsForObjectRequest_Body) GetObjectId() string { + if x != nil { + return x.ObjectId + } + return "" +} +func (x *ListShardsForObjectRequest_Body) SetObjectId(v string) { + x.ObjectId = v +} +func (x *ListShardsForObjectRequest_Body) GetContainerId() string { + if x != nil { + return x.ContainerId + } + return "" +} +func (x *ListShardsForObjectRequest_Body) SetContainerId(v string) { + x.ContainerId = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectRequest_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectRequest_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"objectId\":" + out.RawString(prefix) + out.String(x.ObjectId) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"containerId\":" + out.RawString(prefix) + out.String(x.ContainerId) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectRequest_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "objectId": + { + var f string + f = in.String() + x.ObjectId = f + } + case "containerId": + { + var f string + f = in.String() + x.ContainerId = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsForObjectRequest struct { + Body *ListShardsForObjectRequest_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectRequest)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectRequest)(nil) + _ json.Marshaler = (*ListShardsForObjectRequest)(nil) + _ json.Unmarshaler = (*ListShardsForObjectRequest)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectRequest) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.NestedStructureSize(1, x.Body) + size += proto.NestedStructureSize(2, x.Signature) + return size +} + +// ReadSignedData fills buf with signed data of x. +// If buffer length is less than x.SignedDataSize(), new buffer is allocated. +// +// Returns any error encountered which did not allow writing the data completely. +// Otherwise, returns the buffer in which the data is written. +// +// Structures with the same field values have the same signed data. +func (x *ListShardsForObjectRequest) SignedDataSize() int { + return x.GetBody().StableSize() +} + +// SignedDataSize returns size of the request signed data in bytes. +// +// Structures with the same field values have the same signed data size. +func (x *ListShardsForObjectRequest) ReadSignedData(buf []byte) ([]byte, error) { + return x.GetBody().MarshalProtobuf(buf), nil +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectRequest) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectRequest) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectRequest") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListShardsForObjectRequest_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListShardsForObjectRequest) GetBody() *ListShardsForObjectRequest_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListShardsForObjectRequest) SetBody(v *ListShardsForObjectRequest_Body) { + x.Body = v +} +func (x *ListShardsForObjectRequest) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListShardsForObjectRequest) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectRequest) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectRequest) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectRequest) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectRequest) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListShardsForObjectRequest_Body + f = new(ListShardsForObjectRequest_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsForObjectResponse_Body struct { + Shard_ID [][]byte `json:"shardID"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse_Body)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse_Body)(nil) + _ json.Marshaler = (*ListShardsForObjectResponse_Body)(nil) + _ json.Unmarshaler = (*ListShardsForObjectResponse_Body)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectResponse_Body) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.RepeatedBytesSize(1, x.Shard_ID) + return size +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectResponse_Body) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + for j := range x.Shard_ID { + mm.AppendBytes(1, x.Shard_ID[j]) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectResponse_Body) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse_Body") + } + switch fc.FieldNum { + case 1: // Shard_ID + data, ok := fc.Bytes() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Shard_ID") + } + x.Shard_ID = append(x.Shard_ID, data) + } + } + return nil +} +func (x *ListShardsForObjectResponse_Body) GetShard_ID() [][]byte { + if x != nil { + return x.Shard_ID + } + return nil +} +func (x *ListShardsForObjectResponse_Body) SetShard_ID(v [][]byte) { + x.Shard_ID = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectResponse_Body) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectResponse_Body) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"shardID\":" + out.RawString(prefix) + out.RawByte('[') + for i := range x.Shard_ID { + if i != 0 { + out.RawByte(',') + } + if x.Shard_ID[i] != nil { + out.Base64Bytes(x.Shard_ID[i]) + } else { + out.String("") + } + } + out.RawByte(']') + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectResponse_Body) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "shardID": + { + var f []byte + var list [][]byte + in.Delim('[') + for !in.IsDelim(']') { + { + tmp := in.Bytes() + if len(tmp) == 0 { + tmp = nil + } + f = tmp + } + list = append(list, f) + in.WantComma() + } + x.Shard_ID = list + in.Delim(']') + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} + +type ListShardsForObjectResponse struct { + Body *ListShardsForObjectResponse_Body `json:"body"` + Signature *Signature `json:"signature"` +} + +var ( + _ encoding.ProtoMarshaler = (*ListShardsForObjectResponse)(nil) + _ encoding.ProtoUnmarshaler = (*ListShardsForObjectResponse)(nil) + _ json.Marshaler = (*ListShardsForObjectResponse)(nil) + _ json.Unmarshaler = (*ListShardsForObjectResponse)(nil) +) + +// StableSize returns the size of x in protobuf format. +// +// Structures with the same field values have the same binary size. +func (x *ListShardsForObjectResponse) StableSize() (size int) { + if x == nil { + return 0 + } + size += proto.NestedStructureSize(1, x.Body) + size += proto.NestedStructureSize(2, x.Signature) + return size +} + +// ReadSignedData fills buf with signed data of x. +// If buffer length is less than x.SignedDataSize(), new buffer is allocated. +// +// Returns any error encountered which did not allow writing the data completely. +// Otherwise, returns the buffer in which the data is written. +// +// Structures with the same field values have the same signed data. +func (x *ListShardsForObjectResponse) SignedDataSize() int { + return x.GetBody().StableSize() +} + +// SignedDataSize returns size of the request signed data in bytes. +// +// Structures with the same field values have the same signed data size. +func (x *ListShardsForObjectResponse) ReadSignedData(buf []byte) ([]byte, error) { + return x.GetBody().MarshalProtobuf(buf), nil +} + +// MarshalProtobuf implements the encoding.ProtoMarshaler interface. +func (x *ListShardsForObjectResponse) MarshalProtobuf(dst []byte) []byte { + m := pool.MarshalerPool.Get() + defer pool.MarshalerPool.Put(m) + x.EmitProtobuf(m.MessageMarshaler()) + dst = m.Marshal(dst) + return dst +} + +func (x *ListShardsForObjectResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) { + if x == nil { + return + } + if x.Body != nil { + x.Body.EmitProtobuf(mm.AppendMessage(1)) + } + if x.Signature != nil { + x.Signature.EmitProtobuf(mm.AppendMessage(2)) + } +} + +// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface. +func (x *ListShardsForObjectResponse) UnmarshalProtobuf(src []byte) (err error) { + var fc easyproto.FieldContext + for len(src) > 0 { + src, err = fc.NextField(src) + if err != nil { + return fmt.Errorf("cannot read next field in %s", "ListShardsForObjectResponse") + } + switch fc.FieldNum { + case 1: // Body + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Body") + } + x.Body = new(ListShardsForObjectResponse_Body) + if err := x.Body.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + case 2: // Signature + data, ok := fc.MessageData() + if !ok { + return fmt.Errorf("cannot unmarshal field %s", "Signature") + } + x.Signature = new(Signature) + if err := x.Signature.UnmarshalProtobuf(data); err != nil { + return fmt.Errorf("unmarshal: %w", err) + } + } + } + return nil +} +func (x *ListShardsForObjectResponse) GetBody() *ListShardsForObjectResponse_Body { + if x != nil { + return x.Body + } + return nil +} +func (x *ListShardsForObjectResponse) SetBody(v *ListShardsForObjectResponse_Body) { + x.Body = v +} +func (x *ListShardsForObjectResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} +func (x *ListShardsForObjectResponse) SetSignature(v *Signature) { + x.Signature = v +} + +// MarshalJSON implements the json.Marshaler interface. +func (x *ListShardsForObjectResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + x.MarshalEasyJSON(&w) + return w.Buffer.BuildBytes(), w.Error +} +func (x *ListShardsForObjectResponse) MarshalEasyJSON(out *jwriter.Writer) { + if x == nil { + out.RawString("null") + return + } + first := true + out.RawByte('{') + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"body\":" + out.RawString(prefix) + x.Body.MarshalEasyJSON(out) + } + { + if !first { + out.RawByte(',') + } else { + first = false + } + const prefix string = "\"signature\":" + out.RawString(prefix) + x.Signature.MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (x *ListShardsForObjectResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + x.UnmarshalEasyJSON(&r) + return r.Error() +} +func (x *ListShardsForObjectResponse) UnmarshalEasyJSON(in *jlexer.Lexer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "body": + { + var f *ListShardsForObjectResponse_Body + f = new(ListShardsForObjectResponse_Body) + f.UnmarshalEasyJSON(in) + x.Body = f + } + case "signature": + { + var f *Signature + f = new(Signature) + f.UnmarshalEasyJSON(in) + x.Signature = f + } + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go index 987e08c59..045662ccf 100644 --- a/pkg/services/control/service_grpc.pb.go +++ b/pkg/services/control/service_grpc.pb.go @@ -41,6 +41,7 @@ const ( ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache" ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards" ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild" + ControlService_ListShardsForObject_FullMethodName = "/control.ControlService/ListShardsForObject" ) // ControlServiceClient is the client API for ControlService service. @@ -95,6 +96,8 @@ type ControlServiceClient interface { DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error) // StartShardRebuild starts shard rebuild process. StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error) + // ListShardsForObject returns shard info where object is stored. + ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) } type controlServiceClient struct { @@ -303,6 +306,15 @@ func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartS return out, nil } +func (c *controlServiceClient) ListShardsForObject(ctx context.Context, in *ListShardsForObjectRequest, opts ...grpc.CallOption) (*ListShardsForObjectResponse, error) { + out := new(ListShardsForObjectResponse) + err := c.cc.Invoke(ctx, ControlService_ListShardsForObject_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ControlServiceServer is the server API for ControlService service. // All implementations should embed UnimplementedControlServiceServer // for forward compatibility @@ -355,6 +367,8 @@ type ControlServiceServer interface { DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error) // StartShardRebuild starts shard rebuild process. StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) + // ListShardsForObject returns shard info where object is stored. + ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) } // UnimplementedControlServiceServer should be embedded to have forward compatible implementations. @@ -427,6 +441,9 @@ func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachSh func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented") } +func (UnimplementedControlServiceServer) ListShardsForObject(context.Context, *ListShardsForObjectRequest) (*ListShardsForObjectResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListShardsForObject not implemented") +} // UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to ControlServiceServer will @@ -835,6 +852,24 @@ func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Cont return interceptor(ctx, in, info, handler) } +func _ControlService_ListShardsForObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListShardsForObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServiceServer).ListShardsForObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ControlService_ListShardsForObject_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServiceServer).ListShardsForObject(ctx, req.(*ListShardsForObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + // ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -930,6 +965,10 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{ MethodName: "StartShardRebuild", Handler: _ControlService_StartShardRebuild_Handler, }, + { + MethodName: "ListShardsForObject", + Handler: _ControlService_ListShardsForObject_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "pkg/services/control/service.proto", From aab8addae006ea7667d0851170b278c65f2643e8 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Tue, 18 Feb 2025 13:04:48 +0300 Subject: [PATCH 406/591] [#1635] cli: Make `object.readObjectAddress()` public This method will be useful for upcoming control command. Signed-off-by: Ekaterina Lebedeva --- cmd/frostfs-cli/modules/object/delete.go | 2 +- cmd/frostfs-cli/modules/object/get.go | 2 +- cmd/frostfs-cli/modules/object/hash.go | 2 +- cmd/frostfs-cli/modules/object/head.go | 2 +- cmd/frostfs-cli/modules/object/nodes.go | 2 +- cmd/frostfs-cli/modules/object/patch.go | 2 +- cmd/frostfs-cli/modules/object/range.go | 2 +- cmd/frostfs-cli/modules/object/util.go | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/frostfs-cli/modules/object/delete.go b/cmd/frostfs-cli/modules/object/delete.go index e4e9cddb8..08a9ac4c8 100644 --- a/cmd/frostfs-cli/modules/object/delete.go +++ b/cmd/frostfs-cli/modules/object/delete.go @@ -55,7 +55,7 @@ func deleteObject(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "", fmt.Errorf("required flag \"%s\" not set", commonflags.OIDFlag)) } - objAddr = readObjectAddress(cmd, &cnr, &obj) + objAddr = ReadObjectAddress(cmd, &cnr, &obj) } pk := key.GetOrGenerate(cmd) diff --git a/cmd/frostfs-cli/modules/object/get.go b/cmd/frostfs-cli/modules/object/get.go index f1edccba2..7312f5384 100644 --- a/cmd/frostfs-cli/modules/object/get.go +++ b/cmd/frostfs-cli/modules/object/get.go @@ -46,7 +46,7 @@ func getObject(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) filename := cmd.Flag(fileFlag).Value.String() out, closer := createOutWriter(cmd, filename) diff --git a/cmd/frostfs-cli/modules/object/hash.go b/cmd/frostfs-cli/modules/object/hash.go index d8ea449eb..25df375d4 100644 --- a/cmd/frostfs-cli/modules/object/hash.go +++ b/cmd/frostfs-cli/modules/object/hash.go @@ -52,7 +52,7 @@ func getObjectHash(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeList(cmd) commonCmd.ExitOnErr(cmd, "", err) diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go index 70c273443..97e996cad 100644 --- a/cmd/frostfs-cli/modules/object/head.go +++ b/cmd/frostfs-cli/modules/object/head.go @@ -47,7 +47,7 @@ func getObjectHeader(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index 1500830a2..bc34b370d 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -101,7 +101,7 @@ func initObjectNodesCmd() { func objectNodes(cmd *cobra.Command, _ []string) { var cnrID cid.ID var objID oid.ID - readObjectAddress(cmd, &cnrID, &objID) + ReadObjectAddress(cmd, &cnrID, &objID) pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) diff --git a/cmd/frostfs-cli/modules/object/patch.go b/cmd/frostfs-cli/modules/object/patch.go index ebc415b2f..d98182679 100644 --- a/cmd/frostfs-cli/modules/object/patch.go +++ b/cmd/frostfs-cli/modules/object/patch.go @@ -56,7 +56,7 @@ func patch(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeSlice(cmd) commonCmd.ExitOnErr(cmd, "", err) diff --git a/cmd/frostfs-cli/modules/object/range.go b/cmd/frostfs-cli/modules/object/range.go index 8f59906ca..be4fee4cf 100644 --- a/cmd/frostfs-cli/modules/object/range.go +++ b/cmd/frostfs-cli/modules/object/range.go @@ -47,7 +47,7 @@ func getObjectRange(cmd *cobra.Command, _ []string) { var cnr cid.ID var obj oid.ID - objAddr := readObjectAddress(cmd, &cnr, &obj) + objAddr := ReadObjectAddress(cmd, &cnr, &obj) ranges, err := getRangeList(cmd) commonCmd.ExitOnErr(cmd, "", err) diff --git a/cmd/frostfs-cli/modules/object/util.go b/cmd/frostfs-cli/modules/object/util.go index b090c9f8c..3955f8ee1 100644 --- a/cmd/frostfs-cli/modules/object/util.go +++ b/cmd/frostfs-cli/modules/object/util.go @@ -74,7 +74,7 @@ func parseXHeaders(cmd *cobra.Command) []string { return xs } -func readObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address { +func ReadObjectAddress(cmd *cobra.Command, cnr *cid.ID, obj *oid.ID) oid.Address { readCID(cmd, cnr) readOID(cmd, obj) From df6d9da82aa965dbbf45114149b8279655276127 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Tue, 4 Feb 2025 21:23:00 +0300 Subject: [PATCH 407/591] [#1635] cli: Add command to get object's shard info Added `frostfs-cli object locate` subcommand. It lists info about shards storing an object. Signed-off-by: Ekaterina Lebedeva --- cmd/frostfs-cli/modules/control/locate.go | 118 ++++++++++++++++++++++ cmd/frostfs-cli/modules/control/root.go | 2 + go.mod | 4 +- go.sum | 8 +- 4 files changed, 126 insertions(+), 6 deletions(-) create mode 100644 cmd/frostfs-cli/modules/control/locate.go diff --git a/cmd/frostfs-cli/modules/control/locate.go b/cmd/frostfs-cli/modules/control/locate.go new file mode 100644 index 000000000..d10e2a896 --- /dev/null +++ b/cmd/frostfs-cli/modules/control/locate.go @@ -0,0 +1,118 @@ +package control + +import ( + "bytes" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" + object "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/modules/object" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" + rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + + "github.com/mr-tron/base58" + "github.com/spf13/cobra" +) + +const ( + FullInfoFlag = "full" + FullInfoFlagUsage = "Print full ShardInfo." +) + +var locateObjectCmd = &cobra.Command{ + Use: "locate-object", + Short: "List shards storing the object", + Long: "List shards storing the object", + Run: locateObject, +} + +func initControlLocateObjectCmd() { + initControlFlags(locateObjectCmd) + + flags := locateObjectCmd.Flags() + + flags.String(commonflags.CIDFlag, "", commonflags.CIDFlagUsage) + _ = locateObjectCmd.MarkFlagRequired(commonflags.CIDFlag) + + flags.String(commonflags.OIDFlag, "", commonflags.OIDFlagUsage) + _ = locateObjectCmd.MarkFlagRequired(commonflags.OIDFlag) + + flags.Bool(commonflags.JSON, false, "Print shard info as a JSON array. Requires --full flag.") + flags.Bool(FullInfoFlag, false, FullInfoFlagUsage) +} + +func locateObject(cmd *cobra.Command, _ []string) { + var cnr cid.ID + var obj oid.ID + + _ = object.ReadObjectAddress(cmd, &cnr, &obj) + + pk := key.Get(cmd) + + body := new(control.ListShardsForObjectRequest_Body) + body.SetContainerId(cnr.EncodeToString()) + body.SetObjectId(obj.EncodeToString()) + req := new(control.ListShardsForObjectRequest) + req.SetBody(body) + signRequest(cmd, pk, req) + + cli := getClient(cmd, pk) + + var err error + var resp *control.ListShardsForObjectResponse + err = cli.ExecRaw(func(client *rawclient.Client) error { + resp, err = control.ListShardsForObject(client, req) + return err + }) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + + verifyResponse(cmd, resp.GetSignature(), resp.GetBody()) + + shardIDs := resp.GetBody().GetShard_ID() + + isFull, _ := cmd.Flags().GetBool(FullInfoFlag) + if !isFull { + for _, id := range shardIDs { + cmd.Println(base58.Encode(id)) + } + return + } + + // get full shard info + listShardsReq := new(control.ListShardsRequest) + listShardsReq.SetBody(new(control.ListShardsRequest_Body)) + signRequest(cmd, pk, listShardsReq) + var listShardsResp *control.ListShardsResponse + err = cli.ExecRaw(func(client *rawclient.Client) error { + listShardsResp, err = control.ListShards(client, listShardsReq) + return err + }) + commonCmd.ExitOnErr(cmd, "rpc error: %w", err) + + verifyResponse(cmd, listShardsResp.GetSignature(), listShardsResp.GetBody()) + + shards := listShardsResp.GetBody().GetShards() + sortShardsByID(shards) + shards = filterShards(shards, shardIDs) + + isJSON, _ := cmd.Flags().GetBool(commonflags.JSON) + if isJSON { + prettyPrintShardsJSON(cmd, shards) + } else { + prettyPrintShards(cmd, shards) + } +} + +func filterShards(info []control.ShardInfo, ids [][]byte) []control.ShardInfo { + var res []control.ShardInfo + for _, id := range ids { + for _, inf := range info { + if bytes.Equal(inf.Shard_ID, id) { + res = append(res, inf) + } + } + } + return res +} diff --git a/cmd/frostfs-cli/modules/control/root.go b/cmd/frostfs-cli/modules/control/root.go index b20d3618e..3abfe80cb 100644 --- a/cmd/frostfs-cli/modules/control/root.go +++ b/cmd/frostfs-cli/modules/control/root.go @@ -39,6 +39,7 @@ func init() { listRulesCmd, getRuleCmd, listTargetsCmd, + locateObjectCmd, ) initControlHealthCheckCmd() @@ -52,4 +53,5 @@ func init() { initControlListRulesCmd() initControGetRuleCmd() initControlListTargetsCmd() + initControlLocateObjectCmd() } diff --git a/go.mod b/go.mod index 18378466e..9d0988bcd 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.22 require ( code.gitea.io/sdk/gitea v0.17.1 - git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 + git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 @@ -12,7 +12,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 - git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 + git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b git.frostfs.info/TrueCloudLab/tzhash v1.8.0 git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 github.com/VictoriaMetrics/easyproto v0.1.4 diff --git a/go.sum b/go.sum index 5205dddef..3ec679ee7 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8= code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08 h1:tl1TT+zNk1lF/J5EaD3syDrTaYbQwvJKVOVENM4oQ+k= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1-0.20241205083807-762d7f9f9f08/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9eXEbUMyDLW8jEMzY+gxMc= +git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= @@ -18,8 +18,8 @@ git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/96 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b h1:M50kdfrf/h8c3cz0bJ2AEUcbXvAlPFVC1Wp1WkfZ/8E= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc= git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA= From bd61f7bf0a8e5a102175da73d0b79e9218de69f8 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Fri, 7 Mar 2025 12:29:28 +0300 Subject: [PATCH 408/591] [#1666] audit: Fix duplicated log in `Patch` method When we do `object patch` with audit enabled we get several duplicated entries in logs. `object patch` request is logged in 2 places: 1. `(*auditPatchStream) CloseAndRecv()` - when the client closes the request stream or when stream gets aborted. 2. `(*auditPatchStream) Send()` - when stream was NOT aborted. `Send()` doesn't check if `err != nil` before logging. It led to to logging on every `Send()` call. Signed-off-by: Ekaterina Lebedeva --- pkg/services/object/audit.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go index 367be0c0c..f8ee089fe 100644 --- a/pkg/services/object/audit.go +++ b/pkg/services/object/audit.go @@ -224,7 +224,7 @@ func (a *auditPatchStream) Send(ctx context.Context, req *object.PatchRequest) e if err != nil { a.failed = true } - if !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here + if err != nil && !errors.Is(err, util.ErrAbortStream) { // CloseAndRecv will not be called, so log here audit.LogRequestWithKey(ctx, a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key, audit.TargetFromContainerIDObjectID(a.containerID, a.objectID), !a.failed) From 8643e0abc569f9f540abac5b9d0fba9e05ae5640 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 10 Mar 2025 17:51:06 +0300 Subject: [PATCH 409/591] [#1668] writecache: Use object size to check free space Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/writecache/put.go | 11 ++++++++++- pkg/local_object_storage/writecache/state.go | 4 ---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go index 7da5c4d3a..2fbf50913 100644 --- a/pkg/local_object_storage/writecache/put.go +++ b/pkg/local_object_storage/writecache/put.go @@ -2,6 +2,7 @@ package writecache import ( "context" + "fmt" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -59,7 +60,15 @@ func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro // putBig writes object to FSTree and pushes it to the flush workers queue. func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error { - if !c.hasEnoughSpaceFS() { + if prm.RawData == nil { // foolproof: RawData should be marshalled by shard. + data, err := prm.Object.Marshal() + if err != nil { + return fmt.Errorf("cannot marshal object: %w", err) + } + prm.RawData = data + } + size := uint64(len(prm.RawData)) + if !c.hasEnoughSpace(size) { return ErrOutOfSpace } diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go index 44caa2603..7a52d3672 100644 --- a/pkg/local_object_storage/writecache/state.go +++ b/pkg/local_object_storage/writecache/state.go @@ -7,10 +7,6 @@ func (c *cache) estimateCacheSize() (uint64, uint64) { return count, size } -func (c *cache) hasEnoughSpaceFS() bool { - return c.hasEnoughSpace(c.maxObjectSize) -} - func (c *cache) hasEnoughSpace(objectSize uint64) bool { count, size := c.estimateCacheSize() if c.maxCacheCount > 0 && count+1 > c.maxCacheCount { From d36afa31c7b4d0a878ecbe8e41657f6f955fbdc9 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 18 Feb 2025 10:57:17 +0300 Subject: [PATCH 410/591] [#1653] qos: Fix logging Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/qos.go | 6 ++++-- internal/logs/logs.go | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-node/qos.go b/cmd/frostfs-node/qos.go index bfc278333..9663fc6ae 100644 --- a/cmd/frostfs-node/qos.go +++ b/cmd/frostfs-node/qos.go @@ -47,7 +47,7 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic } ioTag, err := qos.FromRawString(rawTag) if err != nil { - s.logger.Warn(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err)) + s.logger.Debug(ctx, logs.FailedToParseIncomingIOTag, zap.Error(err)) return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) } @@ -70,6 +70,7 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic return ctx } } + s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) case qos.IOTagInternal: for _, pk := range s.allowedInternalPubs { @@ -87,9 +88,10 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic return ctx } } + s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) default: - s.logger.Warn(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag)) + s.logger.Debug(ctx, logs.NotSupportedIncomingIOTagReplacedWithClient, zap.Stringer("io_tag", ioTag)) return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) } } diff --git a/internal/logs/logs.go b/internal/logs/logs.go index d07f47fbf..6115cdf90 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -513,4 +513,5 @@ const ( FailedToParseIncomingIOTag = "failed to parse incoming IO tag" NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`" FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`" + FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`" ) From 3727d60331d3d410087a146da1df2dfbef7fa7cd Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 5 Mar 2025 13:53:32 +0300 Subject: [PATCH 411/591] [#1653] qos: Add metrics Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 1 + internal/metrics/consts.go | 2 + internal/metrics/node.go | 6 ++ internal/metrics/qos.go | 52 +++++++++ internal/qos/limiter.go | 101 +++++++++++++++--- internal/qos/metrics.go | 31 ++++++ internal/qos/stats.go | 28 +++++ internal/qos/validate.go | 9 -- .../engine/engine_test.go | 4 + pkg/local_object_storage/shard/id.go | 1 + 10 files changed, 214 insertions(+), 21 deletions(-) create mode 100644 internal/metrics/qos.go create mode 100644 internal/qos/metrics.go create mode 100644 internal/qos/stats.go diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index afde0bbc0..92aa827f2 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -1048,6 +1048,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID } if c.metricsCollector != nil { mbOptions = append(mbOptions, meta.WithMetrics(lsmetrics.NewMetabaseMetrics(shCfg.metaCfg.path, c.metricsCollector.MetabaseMetrics()))) + shCfg.limiter.SetMetrics(c.metricsCollector.QoSMetrics()) } var sh shardOptsWithID diff --git a/internal/metrics/consts.go b/internal/metrics/consts.go index cb165de69..9123541ff 100644 --- a/internal/metrics/consts.go +++ b/internal/metrics/consts.go @@ -23,6 +23,7 @@ const ( policerSubsystem = "policer" commonCacheSubsystem = "common_cache" multinetSubsystem = "multinet" + qosSubsystem = "qos" successLabel = "success" shardIDLabel = "shard_id" @@ -43,6 +44,7 @@ const ( hitLabel = "hit" cacheLabel = "cache" sourceIPLabel = "source_ip" + ioTagLabel = "io_tag" readWriteMode = "READ_WRITE" readOnlyMode = "READ_ONLY" diff --git a/internal/metrics/node.go b/internal/metrics/node.go index 4ea3c7c24..8ade19eb2 100644 --- a/internal/metrics/node.go +++ b/internal/metrics/node.go @@ -26,6 +26,7 @@ type NodeMetrics struct { morphCache *morphCacheMetrics log logger.LogMetrics multinet *multinetMetrics + qos *QoSMetrics // nolint: unused appInfo *ApplicationInfo } @@ -55,6 +56,7 @@ func NewNodeMetrics() *NodeMetrics { log: logger.NewLogMetrics(namespace), appInfo: NewApplicationInfo(misc.Version), multinet: newMultinetMetrics(namespace), + qos: newQoSMetrics(), } } @@ -126,3 +128,7 @@ func (m *NodeMetrics) LogMetrics() logger.LogMetrics { func (m *NodeMetrics) MultinetMetrics() MultinetMetrics { return m.multinet } + +func (m *NodeMetrics) QoSMetrics() *QoSMetrics { + return m.qos +} diff --git a/internal/metrics/qos.go b/internal/metrics/qos.go new file mode 100644 index 000000000..17fb67a27 --- /dev/null +++ b/internal/metrics/qos.go @@ -0,0 +1,52 @@ +package metrics + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics" + "github.com/prometheus/client_golang/prometheus" +) + +type QoSMetrics struct { + opsCounter *prometheus.GaugeVec +} + +func newQoSMetrics() *QoSMetrics { + return &QoSMetrics{ + opsCounter: metrics.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: qosSubsystem, + Name: "operations_total", + Help: "Count of pending, in progree, completed and failed due of resource exhausted error operations for each shard", + }, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}), + } +} + +func (m *QoSMetrics) SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) { + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "pending", + }).Set(float64(pending)) + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "in_progress", + }).Set(float64(inProgress)) + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "completed", + }).Set(float64(completed)) + m.opsCounter.With(prometheus.Labels{ + shardIDLabel: shardID, + operationLabel: operation, + ioTagLabel: tag, + typeLabel: "resource_exhausted", + }).Set(float64(resourceExhausted)) +} + +func (m *QoSMetrics) Close(shardID string) { + m.opsCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID}) +} diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go index b1406a7f3..8f00791c5 100644 --- a/internal/qos/limiter.go +++ b/internal/qos/limiter.go @@ -4,6 +4,8 @@ import ( "context" "errors" "fmt" + "sync" + "sync/atomic" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" @@ -15,6 +17,9 @@ import ( const ( defaultIdleTimeout time.Duration = 0 defaultShare float64 = 1.0 + minusOne = ^uint64(0) + + defaultMetricsCollectTimeout = 5 * time.Second ) type ReleaseFunc scheduling.ReleaseFunc @@ -22,6 +27,8 @@ type ReleaseFunc scheduling.ReleaseFunc type Limiter interface { ReadRequest(context.Context) (ReleaseFunc, error) WriteRequest(context.Context) (ReleaseFunc, error) + SetParentID(string) + SetMetrics(Metrics) Close() } @@ -34,10 +41,6 @@ func NewLimiter(c *limits.Config) (Limiter, error) { if err := validateConfig(c); err != nil { return nil, err } - read, write := c.Read(), c.Write() - if isNoop(read, write) { - return noopLimiterInstance, nil - } readScheduler, err := createScheduler(c.Read()) if err != nil { return nil, fmt.Errorf("create read scheduler: %w", err) @@ -46,10 +49,18 @@ func NewLimiter(c *limits.Config) (Limiter, error) { if err != nil { return nil, fmt.Errorf("create write scheduler: %w", err) } - return &mClockLimiter{ + l := &mClockLimiter{ readScheduler: readScheduler, writeScheduler: writeScheduler, - }, nil + closeCh: make(chan struct{}), + wg: &sync.WaitGroup{}, + readStats: createStats(), + writeStats: createStats(), + } + l.shardID.Store(&shardID{}) + l.metrics.Store(&metricsHolder{metrics: &noopMetrics{}}) + l.startMetricsCollect() + return l, nil } func createScheduler(config limits.OpConfig) (scheduler, error) { @@ -91,7 +102,7 @@ var ( ) func NewNoopLimiter() Limiter { - return &noopLimiter{} + return noopLimiterInstance } type noopLimiter struct{} @@ -104,43 +115,109 @@ func (n *noopLimiter) WriteRequest(context.Context) (ReleaseFunc, error) { return releaseStub, nil } +func (n *noopLimiter) SetParentID(string) {} + func (n *noopLimiter) Close() {} +func (n *noopLimiter) SetMetrics(Metrics) {} + var _ Limiter = (*mClockLimiter)(nil) +type shardID struct { + id string +} + type mClockLimiter struct { readScheduler scheduler writeScheduler scheduler + + readStats map[string]*stat + writeStats map[string]*stat + + shardID atomic.Pointer[shardID] + metrics atomic.Pointer[metricsHolder] + closeCh chan struct{} + wg *sync.WaitGroup } func (n *mClockLimiter) ReadRequest(ctx context.Context) (ReleaseFunc, error) { - return requestArrival(ctx, n.readScheduler) + return requestArrival(ctx, n.readScheduler, n.readStats) } func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) { - return requestArrival(ctx, n.writeScheduler) + return requestArrival(ctx, n.writeScheduler, n.writeStats) } -func requestArrival(ctx context.Context, s scheduler) (ReleaseFunc, error) { +func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) { tag, ok := tagging.IOTagFromContext(ctx) if !ok { tag = IOTagClient.String() } + stat := getStat(tag, stats) + stat.pending.Add(1) if tag == IOTagCritical.String() { - return releaseStub, nil + stat.inProgress.Add(1) + return func() { + stat.completed.Add(1) + }, nil } rel, err := s.RequestArrival(ctx, tag) + stat.inProgress.Add(1) if err != nil { if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) || errors.Is(err, errSemaphoreLimitExceeded) { + stat.resourceExhausted.Add(1) return nil, &apistatus.ResourceExhausted{} } + stat.completed.Add(1) return nil, err } - return ReleaseFunc(rel), nil + return func() { + rel() + stat.completed.Add(1) + }, nil } func (n *mClockLimiter) Close() { n.readScheduler.Close() n.writeScheduler.Close() + close(n.closeCh) + n.wg.Wait() + n.metrics.Load().metrics.Close(n.shardID.Load().id) +} + +func (n *mClockLimiter) SetParentID(parentID string) { + n.shardID.Store(&shardID{id: parentID}) +} + +func (n *mClockLimiter) SetMetrics(m Metrics) { + n.metrics.Store(&metricsHolder{metrics: m}) +} + +func (n *mClockLimiter) startMetricsCollect() { + n.wg.Add(1) + go func() { + defer n.wg.Done() + + ticker := time.NewTicker(defaultMetricsCollectTimeout) + defer ticker.Stop() + for { + select { + case <-n.closeCh: + return + case <-ticker.C: + shardID := n.shardID.Load().id + if shardID == "" { + continue + } + metrics := n.metrics.Load().metrics + for tag, s := range n.readStats { + metrics.SetOperationTagCounters(shardID, "read", tag, s.pending.Load(), s.inProgress.Load(), s.completed.Load(), s.resourceExhausted.Load()) + } + for tag, s := range n.writeStats { + metrics.SetOperationTagCounters(shardID, "write", tag, s.pending.Load(), s.inProgress.Load(), s.completed.Load(), s.resourceExhausted.Load()) + } + } + } + }() } diff --git a/internal/qos/metrics.go b/internal/qos/metrics.go new file mode 100644 index 000000000..c00da51b7 --- /dev/null +++ b/internal/qos/metrics.go @@ -0,0 +1,31 @@ +package qos + +import "sync/atomic" + +type Metrics interface { + SetOperationTagCounters(shardID, operation, tag string, pending, inProgress, completed, resourceExhausted uint64) + Close(shardID string) +} + +var _ Metrics = (*noopMetrics)(nil) + +type noopMetrics struct{} + +func (n *noopMetrics) SetOperationTagCounters(string, string, string, uint64, uint64, uint64, uint64) { +} + +func (n *noopMetrics) Close(string) {} + +// stat presents limiter statistics cumulative counters. +// +// Each operation changes its status as follows: `pending` -> `in_progress` -> `completed` or `resource_exhausted`. +type stat struct { + completed atomic.Uint64 + pending atomic.Uint64 + resourceExhausted atomic.Uint64 + inProgress atomic.Uint64 +} + +type metricsHolder struct { + metrics Metrics +} diff --git a/internal/qos/stats.go b/internal/qos/stats.go new file mode 100644 index 000000000..f077f552b --- /dev/null +++ b/internal/qos/stats.go @@ -0,0 +1,28 @@ +package qos + +const unknownStatsTag = "unknown" + +var statTags = map[string]struct{}{ + IOTagClient.String(): {}, + IOTagBackground.String(): {}, + IOTagInternal.String(): {}, + IOTagPolicer.String(): {}, + IOTagWritecache.String(): {}, + IOTagCritical.String(): {}, + unknownStatsTag: {}, +} + +func createStats() map[string]*stat { + result := make(map[string]*stat) + for tag := range statTags { + result[tag] = &stat{} + } + return result +} + +func getStat(tag string, stats map[string]*stat) *stat { + if v, ok := stats[tag]; ok { + return v + } + return stats[unknownStatsTag] +} diff --git a/internal/qos/validate.go b/internal/qos/validate.go index 43aa74942..3fa4ebbd1 100644 --- a/internal/qos/validate.go +++ b/internal/qos/validate.go @@ -90,12 +90,3 @@ func float64Value(f *float64) float64 { } return *f } - -func isNoop(read, write limits.OpConfig) bool { - return read.MaxRunningOps == limits.NoLimit && - read.MaxWaitingOps == limits.NoLimit && - write.MaxRunningOps == limits.NoLimit && - write.MaxWaitingOps == limits.NoLimit && - len(read.Tags) == 0 && - len(write.Tags) == 0 -} diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 7ddde1f02..3f9196128 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -163,6 +163,8 @@ type testQoSLimiter struct { write atomic.Int64 } +func (t *testQoSLimiter) SetMetrics(qos.Metrics) {} + func (t *testQoSLimiter) Close() { require.Equal(t.t, int64(0), t.read.Load(), "read requests count after limiter close must be 0") require.Equal(t.t, int64(0), t.write.Load(), "write requests count after limiter close must be 0") @@ -177,3 +179,5 @@ func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) t.write.Add(1) return func() { t.write.Add(-1) }, nil } + +func (t *testQoSLimiter) SetParentID(string) {} diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go index 26492cf01..b233b705c 100644 --- a/pkg/local_object_storage/shard/id.go +++ b/pkg/local_object_storage/shard/id.go @@ -61,6 +61,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { if s.pilorama != nil { s.pilorama.SetParentID(s.info.ID.String()) } + s.opsLimiter.SetParentID(s.info.ID.String()) if len(idFromMetabase) == 0 && !modeDegraded { if setErr := s.metaBase.SetShardID(ctx, *s.info.ID, s.GetMode()); setErr != nil { From 4ed2bbdb0f72589e59b5127ce237b65151f13b57 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 5 Mar 2025 15:25:35 +0300 Subject: [PATCH 412/591] [#1653] objectSvc: Add operations by IO tag metric Signed-off-by: Dmitrii Stepanov --- internal/metrics/object.go | 19 +++++++++++++++---- internal/qos/tags.go | 15 ++++++++++++++- pkg/services/object/metrics.go | 21 +++++++++++---------- 3 files changed, 40 insertions(+), 15 deletions(-) diff --git a/internal/metrics/object.go b/internal/metrics/object.go index 0ba994ed3..e4f6dfde1 100644 --- a/internal/metrics/object.go +++ b/internal/metrics/object.go @@ -9,13 +9,14 @@ import ( ) type ObjectServiceMetrics interface { - AddRequestDuration(method string, d time.Duration, success bool) + AddRequestDuration(method string, d time.Duration, success bool, ioTag string) AddPayloadSize(method string, size int) } type objectServiceMetrics struct { - methodDuration *prometheus.HistogramVec - payloadCounter *prometheus.CounterVec + methodDuration *prometheus.HistogramVec + payloadCounter *prometheus.CounterVec + ioTagOpsCounter *prometheus.CounterVec } func newObjectServiceMetrics() *objectServiceMetrics { @@ -32,14 +33,24 @@ func newObjectServiceMetrics() *objectServiceMetrics { Name: "request_payload_bytes", Help: "Object Service request payload", }, []string{methodLabel}), + ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: objectSubsystem, + Name: "requests_total", + Help: "Count of requests for each IO tag", + }, []string{methodLabel, ioTagLabel}), } } -func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool) { +func (m *objectServiceMetrics) AddRequestDuration(method string, d time.Duration, success bool, ioTag string) { m.methodDuration.With(prometheus.Labels{ methodLabel: method, successLabel: strconv.FormatBool(success), }).Observe(d.Seconds()) + m.ioTagOpsCounter.With(prometheus.Labels{ + ioTagLabel: ioTag, + methodLabel: method, + }).Inc() } func (m *objectServiceMetrics) AddPayloadSize(method string, size int) { diff --git a/internal/qos/tags.go b/internal/qos/tags.go index 6a9a7f7a4..9db45f190 100644 --- a/internal/qos/tags.go +++ b/internal/qos/tags.go @@ -1,6 +1,11 @@ package qos -import "fmt" +import ( + "context" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" +) type IOTag string @@ -37,3 +42,11 @@ func FromRawString(s string) (IOTag, error) { func (t IOTag) String() string { return string(t) } + +func IOTagFromContext(ctx context.Context) string { + tag, ok := tagging.IOTagFromContext(ctx) + if !ok { + tag = "undefined" + } + return tag +} diff --git a/pkg/services/object/metrics.go b/pkg/services/object/metrics.go index 19748e938..6a6ee0f0f 100644 --- a/pkg/services/object/metrics.go +++ b/pkg/services/object/metrics.go @@ -4,6 +4,7 @@ import ( "context" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" ) @@ -34,7 +35,7 @@ type ( } MetricRegister interface { - AddRequestDuration(string, time.Duration, bool) + AddRequestDuration(string, time.Duration, bool, string) AddPayloadSize(string, int) } ) @@ -51,7 +52,7 @@ func (m MetricCollector) Get(req *object.GetRequest, stream GetObjectStream) (er if m.enabled { t := time.Now() defer func() { - m.metrics.AddRequestDuration("Get", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Get", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) }() err = m.next.Get(req, &getStreamMetric{ ServerStream: stream, @@ -106,7 +107,7 @@ func (m MetricCollector) PutSingle(ctx context.Context, request *object.PutSingl res, err := m.next.PutSingle(ctx, request) - m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil) + m.metrics.AddRequestDuration("PutSingle", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) if err == nil { m.metrics.AddPayloadSize("PutSingle", len(request.GetBody().GetObject().GetPayload())) } @@ -122,7 +123,7 @@ func (m MetricCollector) Head(ctx context.Context, request *object.HeadRequest) res, err := m.next.Head(ctx, request) - m.metrics.AddRequestDuration("Head", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Head", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) return res, err } @@ -135,7 +136,7 @@ func (m MetricCollector) Search(req *object.SearchRequest, stream SearchStream) err := m.next.Search(req, stream) - m.metrics.AddRequestDuration("Search", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Search", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) return err } @@ -148,7 +149,7 @@ func (m MetricCollector) Delete(ctx context.Context, request *object.DeleteReque res, err := m.next.Delete(ctx, request) - m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil) + m.metrics.AddRequestDuration("Delete", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) return res, err } return m.next.Delete(ctx, request) @@ -160,7 +161,7 @@ func (m MetricCollector) GetRange(req *object.GetRangeRequest, stream GetObjectR err := m.next.GetRange(req, stream) - m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil) + m.metrics.AddRequestDuration("GetRange", time.Since(t), err == nil, qos.IOTagFromContext(stream.Context())) return err } @@ -173,7 +174,7 @@ func (m MetricCollector) GetRangeHash(ctx context.Context, request *object.GetRa res, err := m.next.GetRangeHash(ctx, request) - m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil) + m.metrics.AddRequestDuration("GetRangeHash", time.Since(t), err == nil, qos.IOTagFromContext(ctx)) return res, err } @@ -209,7 +210,7 @@ func (s putStreamMetric) Send(ctx context.Context, req *object.PutRequest) error func (s putStreamMetric) CloseAndRecv(ctx context.Context) (*object.PutResponse, error) { res, err := s.stream.CloseAndRecv(ctx) - s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil) + s.metrics.AddRequestDuration("Put", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx)) return res, err } @@ -223,7 +224,7 @@ func (s patchStreamMetric) Send(ctx context.Context, req *object.PatchRequest) e func (s patchStreamMetric) CloseAndRecv(ctx context.Context) (*object.PatchResponse, error) { res, err := s.stream.CloseAndRecv(ctx) - s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil) + s.metrics.AddRequestDuration("Patch", time.Since(s.start), err == nil, qos.IOTagFromContext(ctx)) return res, err } From 597bce7a879f2c5258b819014ca80b5815f6a168 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 5 Mar 2025 15:44:15 +0300 Subject: [PATCH 413/591] [#1653] treeSvc: Add operations by IO tag metric Signed-off-by: Dmitrii Stepanov --- internal/metrics/treeservice.go | 15 +++++++++++++++ pkg/services/tree/metrics.go | 2 ++ pkg/services/tree/service.go | 9 +++++++++ 3 files changed, 26 insertions(+) diff --git a/internal/metrics/treeservice.go b/internal/metrics/treeservice.go index 6702aa83c..e192c4398 100644 --- a/internal/metrics/treeservice.go +++ b/internal/metrics/treeservice.go @@ -12,12 +12,14 @@ type TreeMetricsRegister interface { AddReplicateTaskDuration(time.Duration, bool) AddReplicateWaitDuration(time.Duration, bool) AddSyncDuration(time.Duration, bool) + AddOperation(string, string) } type treeServiceMetrics struct { replicateTaskDuration *prometheus.HistogramVec replicateWaitDuration *prometheus.HistogramVec syncOpDuration *prometheus.HistogramVec + ioTagOpsCounter *prometheus.CounterVec } var _ TreeMetricsRegister = (*treeServiceMetrics)(nil) @@ -42,6 +44,12 @@ func newTreeServiceMetrics() *treeServiceMetrics { Name: "sync_duration_seconds", Help: "Duration of synchronization operations", }, []string{successLabel}), + ioTagOpsCounter: metrics.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: treeServiceSubsystem, + Name: "requests_total", + Help: "Count of requests for each IO tag", + }, []string{methodLabel, ioTagLabel}), } } @@ -62,3 +70,10 @@ func (m *treeServiceMetrics) AddSyncDuration(d time.Duration, success bool) { successLabel: strconv.FormatBool(success), }).Observe(d.Seconds()) } + +func (m *treeServiceMetrics) AddOperation(op string, ioTag string) { + m.ioTagOpsCounter.With(prometheus.Labels{ + ioTagLabel: ioTag, + methodLabel: op, + }).Inc() +} diff --git a/pkg/services/tree/metrics.go b/pkg/services/tree/metrics.go index 0f0e4ee57..07503f8c3 100644 --- a/pkg/services/tree/metrics.go +++ b/pkg/services/tree/metrics.go @@ -6,6 +6,7 @@ type MetricsRegister interface { AddReplicateTaskDuration(time.Duration, bool) AddReplicateWaitDuration(time.Duration, bool) AddSyncDuration(time.Duration, bool) + AddOperation(string, string) } type defaultMetricsRegister struct{} @@ -13,3 +14,4 @@ type defaultMetricsRegister struct{} func (defaultMetricsRegister) AddReplicateTaskDuration(time.Duration, bool) {} func (defaultMetricsRegister) AddReplicateWaitDuration(time.Duration, bool) {} func (defaultMetricsRegister) AddSyncDuration(time.Duration, bool) {} +func (defaultMetricsRegister) AddOperation(string, string) {} diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 2e9722e79..f9b7395e7 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -105,6 +105,7 @@ func (s *Service) Shutdown() { } func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error) { + defer s.metrics.AddOperation("Add", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -148,6 +149,7 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error } func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByPathResponse, error) { + defer s.metrics.AddOperation("AddByPath", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -203,6 +205,7 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP } func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveResponse, error) { + defer s.metrics.AddOperation("Remove", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -247,6 +250,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon // Move applies client operation to the specified tree and pushes in queue // for replication on other nodes. func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, error) { + defer s.metrics.AddOperation("Move", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -290,6 +294,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er } func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) (*GetNodeByPathResponse, error) { + defer s.metrics.AddOperation("GetNodeByPath", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } @@ -363,6 +368,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) } func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeServer) error { + defer s.metrics.AddOperation("GetSubTree", qos.IOTagFromContext(srv.Context())) if !s.initialSyncDone.Load() { return ErrAlreadySyncing } @@ -590,6 +596,7 @@ func sortByFilename(nodes []pilorama.NodeInfo, d GetSubTreeRequest_Body_Order_Di // Apply locally applies operation from the remote node to the tree. func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { + defer s.metrics.AddOperation("Apply", qos.IOTagFromContext(ctx)) err := verifyMessage(req) if err != nil { return nil, err @@ -633,6 +640,7 @@ func (s *Service) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, } func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) error { + defer s.metrics.AddOperation("GetOpLog", qos.IOTagFromContext(srv.Context())) if !s.initialSyncDone.Load() { return ErrAlreadySyncing } @@ -697,6 +705,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) } func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeListResponse, error) { + defer s.metrics.AddOperation("TreeList", qos.IOTagFromContext(ctx)) if !s.initialSyncDone.Load() { return nil, ErrAlreadySyncing } From 2005fdda0982f7aa23d5938dfe7a40dd2a72fdb4 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 10 Mar 2025 13:04:39 +0300 Subject: [PATCH 414/591] [#1667] shard: Drop shard pool After adding an ops limiter, shard's `put` pool is redundant. Signed-off-by: Dmitrii Stepanov --- .../internal/modules/storagecfg/config.go | 2 - cmd/frostfs-node/config.go | 3 - cmd/frostfs-node/config/configdir_test.go | 7 +- cmd/frostfs-node/config/engine/config.go | 16 --- cmd/frostfs-node/config/engine/config_test.go | 2 - config/example/node.env | 1 - config/example/node.json | 1 - config/example/node.yaml | 1 - docs/storage-node-configuration.md | 1 - pkg/local_object_storage/engine/control.go | 10 +- .../engine/control_test.go | 4 - pkg/local_object_storage/engine/engine.go | 18 +-- .../engine/engine_test.go | 1 - pkg/local_object_storage/engine/error_test.go | 1 - pkg/local_object_storage/engine/evacuate.go | 43 +++---- .../engine/evacuate_test.go | 1 - .../engine/inhume_test.go | 2 +- pkg/local_object_storage/engine/put.go | 106 ++++++++---------- pkg/local_object_storage/engine/shards.go | 20 ---- .../engine/shards_test.go | 2 - 20 files changed, 71 insertions(+), 171 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/config.go b/cmd/frostfs-adm/internal/modules/storagecfg/config.go index 77183fb49..67e3414c2 100644 --- a/cmd/frostfs-adm/internal/modules/storagecfg/config.go +++ b/cmd/frostfs-adm/internal/modules/storagecfg/config.go @@ -40,8 +40,6 @@ morph: - address: wss://{{.}}/ws{{end}} {{if not .Relay }} storage: - shard_pool_size: 15 # size of per-shard worker pools used for PUT operations - shard: default: # section with the default shard parameters metabase: diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 92aa827f2..e2fe23135 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -117,7 +117,6 @@ type applicationConfiguration struct { EngineCfg struct { errorThreshold uint32 - shardPoolSize uint32 shards []shardCfg lowMem bool } @@ -250,7 +249,6 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error { // Storage Engine a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c) - a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c) a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c) return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) }) @@ -893,7 +891,6 @@ func (c *cfg) engineOpts() []engine.Option { var opts []engine.Option opts = append(opts, - engine.WithShardPoolSize(c.EngineCfg.shardPoolSize), engine.WithErrorThreshold(c.EngineCfg.errorThreshold), engine.WithLogger(c.log), engine.WithLowMemoryConsumption(c.EngineCfg.lowMem), diff --git a/cmd/frostfs-node/config/configdir_test.go b/cmd/frostfs-node/config/configdir_test.go index 35dae97d9..ee9d4268b 100644 --- a/cmd/frostfs-node/config/configdir_test.go +++ b/cmd/frostfs-node/config/configdir_test.go @@ -12,13 +12,10 @@ import ( func TestConfigDir(t *testing.T) { dir := t.TempDir() - cfgFileName0 := path.Join(dir, "cfg_00.json") - cfgFileName1 := path.Join(dir, "cfg_01.yml") + cfgFileName := path.Join(dir, "cfg_01.yml") - require.NoError(t, os.WriteFile(cfgFileName0, []byte(`{"storage":{"shard_pool_size":15}}`), 0o777)) - require.NoError(t, os.WriteFile(cfgFileName1, []byte("logger:\n level: debug"), 0o777)) + require.NoError(t, os.WriteFile(cfgFileName, []byte("logger:\n level: debug"), 0o777)) c := New("", dir, "") require.Equal(t, "debug", cast.ToString(c.Sub("logger").Value("level"))) - require.EqualValues(t, 15, cast.ToUint32(c.Sub("storage").Value("shard_pool_size"))) } diff --git a/cmd/frostfs-node/config/engine/config.go b/cmd/frostfs-node/config/engine/config.go index e5735e88b..7994e7809 100644 --- a/cmd/frostfs-node/config/engine/config.go +++ b/cmd/frostfs-node/config/engine/config.go @@ -11,10 +11,6 @@ import ( const ( subsection = "storage" - - // ShardPoolSizeDefault is a default value of routine pool size per-shard to - // process object PUT operations in a storage engine. - ShardPoolSizeDefault = 20 ) // ErrNoShardConfigured is returned when at least 1 shard is required but none are found. @@ -65,18 +61,6 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config) return nil } -// ShardPoolSize returns the value of "shard_pool_size" config parameter from "storage" section. -// -// Returns ShardPoolSizeDefault if the value is not a positive number. -func ShardPoolSize(c *config.Config) uint32 { - v := config.Uint32Safe(c.Sub(subsection), "shard_pool_size") - if v > 0 { - return v - } - - return ShardPoolSizeDefault -} - // ShardErrorThreshold returns the value of "shard_ro_error_threshold" config parameter from "storage" section. // // Returns 0 if the the value is missing. diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go index b912b5d7d..eaf2a294e 100644 --- a/cmd/frostfs-node/config/engine/config_test.go +++ b/cmd/frostfs-node/config/engine/config_test.go @@ -54,7 +54,6 @@ func TestEngineSection(t *testing.T) { require.False(t, handlerCalled) require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty)) - require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty)) require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode()) }) @@ -64,7 +63,6 @@ func TestEngineSection(t *testing.T) { num := 0 require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c)) - require.EqualValues(t, 15, engineconfig.ShardPoolSize(c)) err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error { defer func() { diff --git a/config/example/node.env b/config/example/node.env index 9bd645344..010b6840c 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -97,7 +97,6 @@ FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 # Storage engine section -FROSTFS_STORAGE_SHARD_POOL_SIZE=15 FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100 ## 0 shard ### Flag to refill Metabase from BlobStor diff --git a/config/example/node.json b/config/example/node.json index 6b799b318..b26c35d2c 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -158,7 +158,6 @@ ] }, "storage": { - "shard_pool_size": 15, "shard_ro_error_threshold": 100, "shard": { "0": { diff --git a/config/example/node.yaml b/config/example/node.yaml index 2552a419c..58b687d5c 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -135,7 +135,6 @@ rpc: storage: # note: shard configuration can be omitted for relay node (see `node.relay`) - shard_pool_size: 15 # size of per-shard worker pools used for PUT operations shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors) shard: diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 271cc6532..51f0a9669 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -170,7 +170,6 @@ Local storage engine configuration. | Parameter | Type | Default value | Description | |----------------------------|-----------------------------------|---------------|------------------------------------------------------------------------------------------------------------------| -| `shard_pool_size` | `int` | `20` | Pool size for shard workers. Limits the amount of concurrent `PUT` operations on each shard. | | `shard_ro_error_threshold` | `int` | `0` | Maximum amount of storage errors to encounter before shard automatically moves to `Degraded` or `ReadOnly` mode. | | `low_mem` | `bool` | `false` | Reduce memory consumption by reducing performance. | | `shard` | [Shard config](#shard-subsection) | | Configuration for separate shards. | diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index 6a416cfd9..7caa515d4 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -153,16 +153,10 @@ func (e *StorageEngine) Close(ctx context.Context) error { } // closes all shards. Never returns an error, shard errors are logged. -func (e *StorageEngine) close(ctx context.Context, releasePools bool) error { +func (e *StorageEngine) close(ctx context.Context) error { e.mtx.RLock() defer e.mtx.RUnlock() - if releasePools { - for _, p := range e.shardPools { - p.Release() - } - } - for id, sh := range e.shards { if err := sh.Close(ctx); err != nil { e.log.Debug(ctx, logs.EngineCouldNotCloseShard, @@ -213,7 +207,7 @@ func (e *StorageEngine) setBlockExecErr(ctx context.Context, err error) error { return e.open(ctx) } } else if prevErr == nil { // ok -> block - return e.close(ctx, errors.Is(err, errClosed)) + return e.close(ctx) } // otherwise do nothing diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go index c9efc312c..a0e658aeb 100644 --- a/pkg/local_object_storage/engine/control_test.go +++ b/pkg/local_object_storage/engine/control_test.go @@ -245,7 +245,6 @@ func TestReload(t *testing.T) { // no new paths => no new shards require.Equal(t, shardNum, len(e.shards)) - require.Equal(t, shardNum, len(e.shardPools)) newMeta := filepath.Join(addPath, fmt.Sprintf("%d.metabase", shardNum)) @@ -257,7 +256,6 @@ func TestReload(t *testing.T) { require.NoError(t, e.Reload(context.Background(), rcfg)) require.Equal(t, shardNum+1, len(e.shards)) - require.Equal(t, shardNum+1, len(e.shardPools)) require.NoError(t, e.Close(context.Background())) }) @@ -277,7 +275,6 @@ func TestReload(t *testing.T) { // removed one require.Equal(t, shardNum-1, len(e.shards)) - require.Equal(t, shardNum-1, len(e.shardPools)) require.NoError(t, e.Close(context.Background())) }) @@ -311,7 +308,6 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str } require.Equal(t, num, len(e.shards)) - require.Equal(t, num, len(e.shardPools)) return e, currShards } diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index e13252b82..a915c9bd6 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -12,7 +12,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -29,8 +28,6 @@ type StorageEngine struct { shards map[string]hashedShard - shardPools map[string]util.WorkerPool - closeCh chan struct{} setModeCh chan setModeRequest wg sync.WaitGroup @@ -193,8 +190,6 @@ type cfg struct { metrics MetricRegister - shardPoolSize uint32 - lowMem bool containerSource atomic.Pointer[containerSource] @@ -202,9 +197,8 @@ type cfg struct { func defaultCfg() *cfg { res := &cfg{ - log: logger.NewLoggerWrapper(zap.L()), - shardPoolSize: 20, - metrics: noopMetrics{}, + log: logger.NewLoggerWrapper(zap.L()), + metrics: noopMetrics{}, } res.containerSource.Store(&containerSource{}) return res @@ -221,7 +215,6 @@ func New(opts ...Option) *StorageEngine { return &StorageEngine{ cfg: c, shards: make(map[string]hashedShard), - shardPools: make(map[string]util.WorkerPool), closeCh: make(chan struct{}), setModeCh: make(chan setModeRequest), evacuateLimiter: &evacuationLimiter{}, @@ -241,13 +234,6 @@ func WithMetrics(v MetricRegister) Option { } } -// WithShardPoolSize returns option to specify size of worker pool for each shard. -func WithShardPoolSize(sz uint32) Option { - return func(c *cfg) { - c.shardPoolSize = sz - } -} - // WithErrorThreshold returns an option to specify size amount of errors after which // shard is moved to read-only mode. func WithErrorThreshold(sz uint32) Option { diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 3f9196128..6ef3846ee 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -57,7 +57,6 @@ func (te *testEngineWrapper) setShardsNumOpts( te.shardIDs[i] = shard.ID() } require.Len(t, te.engine.shards, num) - require.Len(t, te.engine.shardPools, num) return te } diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go index d68a7e826..57029dd5f 100644 --- a/pkg/local_object_storage/engine/error_test.go +++ b/pkg/local_object_storage/engine/error_test.go @@ -46,7 +46,6 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32) var testShards [2]*testShard te := testNewEngine(t, - WithShardPoolSize(1), WithErrorThreshold(errThreshold), ). setShardsNumOpts(t, 2, func(id int) []shard.Option { diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index 27eaea768..c08dfbf03 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -15,7 +15,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" @@ -201,11 +200,6 @@ func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes { return res } -type pooledShard struct { - hashedShard - pool util.WorkerPool -} - var errMustHaveTwoShards = errors.New("must have at least 1 spare shard") // Evacuate moves data from one shard to the others. @@ -252,7 +246,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro } var mtx sync.RWMutex - copyShards := func() []pooledShard { + copyShards := func() []hashedShard { mtx.RLock() defer mtx.RUnlock() t := slices.Clone(shards) @@ -266,7 +260,7 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) erro } func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) error { var err error ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards", @@ -388,7 +382,7 @@ func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, sha } func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, egContainer *errgroup.Group, egObject *errgroup.Group, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard", @@ -412,7 +406,7 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.Cancel } func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + shards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, egContainer *errgroup.Group, egObject *errgroup.Group, ) error { sh := shardsToEvacuate[shardID] @@ -485,7 +479,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context } func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes, - getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, + getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) error { sh := shardsToEvacuate[shardID] shards := getShards() @@ -515,7 +509,7 @@ func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, } func (e *StorageEngine) evacuateTrees(ctx context.Context, sh *shard.Shard, trees []pilorama.ContainerIDTreeID, - prm EvacuateShardPrm, res *EvacuateShardRes, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, + prm EvacuateShardPrm, res *EvacuateShardRes, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateTrees", trace.WithAttributes( @@ -583,7 +577,7 @@ func (e *StorageEngine) evacuateTreeToOtherNode(ctx context.Context, sh *shard.S } func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shard, tree pilorama.ContainerIDTreeID, - prm EvacuateShardPrm, shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, + prm EvacuateShardPrm, shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, ) (bool, string, error) { target, found, err := e.findShardToEvacuateTree(ctx, tree, shards, shardsToEvacuate) if err != nil { @@ -653,15 +647,15 @@ func (e *StorageEngine) tryEvacuateTreeLocal(ctx context.Context, sh *shard.Shar // findShardToEvacuateTree returns first shard according HRW or first shard with tree exists. func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilorama.ContainerIDTreeID, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, -) (pooledShard, bool, error) { + shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, +) (hashedShard, bool, error) { hrw.SortHasherSliceByValue(shards, hrw.StringHash(tree.CID.EncodeToString())) - var result pooledShard + var result hashedShard var found bool for _, target := range shards { select { case <-ctx.Done(): - return pooledShard{}, false, ctx.Err() + return hashedShard{}, false, ctx.Err() default: } @@ -689,7 +683,7 @@ func (e *StorageEngine) findShardToEvacuateTree(ctx context.Context, tree pilora return result, found, nil } -func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]pooledShard, error) { +func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) ([]hashedShard, error) { e.mtx.RLock() defer e.mtx.RUnlock() @@ -719,18 +713,15 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm) // We must have all shards, to have correct information about their // indexes in a sorted slice and set appropriate marks in the metabase. // Evacuated shard is skipped during put. - shards := make([]pooledShard, 0, len(e.shards)) + shards := make([]hashedShard, 0, len(e.shards)) for id := range e.shards { - shards = append(shards, pooledShard{ - hashedShard: e.shards[id], - pool: e.shardPools[id], - }) + shards = append(shards, e.shards[id]) } return shards, nil } func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes, - getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container, + getShards func() []hashedShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container, ) error { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects") defer span.End() @@ -800,7 +791,7 @@ func (e *StorageEngine) isNotRepOne(c *container.Container) bool { } func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard, - shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container, + shards []hashedShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container, ) (bool, error) { hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString())) for j := range shards { @@ -813,7 +804,7 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok { continue } - switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object, container.IsIndexedContainer(cnr)).status { + switch e.putToShard(ctx, shards[j], addr, object, container.IsIndexedContainer(cnr)).status { case putToShardSuccess: res.objEvacuated.Add(1) e.log.Debug(ctx, logs.EngineObjectIsMovedToAnotherShard, diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index 45c4b696b..ec7923297 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -196,7 +196,6 @@ func TestEvacuateShardObjects(t *testing.T) { e.mtx.Lock() delete(e.shards, evacuateShardID) - delete(e.shardPools, evacuateShardID) e.mtx.Unlock() checkHasObjects(t) diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 8c5d28b15..10cebfb52 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -205,7 +205,7 @@ func BenchmarkInhumeMultipart(b *testing.B) { func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { b.StopTimer() - engine := testNewEngine(b, WithShardPoolSize(uint32(numObjects))). + engine := testNewEngine(b). setShardsNum(b, numShards).prepare(b).engine defer func() { require.NoError(b, engine.Close(context.Background())) }() diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 64288a511..b348d13a2 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -9,7 +9,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -99,13 +98,13 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { var shRes putToShardRes e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { e.mtx.RLock() - pool, ok := e.shardPools[sh.ID().String()] + _, ok := e.shards[sh.ID().String()] e.mtx.RUnlock() if !ok { // Shard was concurrently removed, skip. return false } - shRes = e.putToShard(ctx, sh, pool, addr, prm.Object, prm.IsIndexedContainer) + shRes = e.putToShard(ctx, sh, addr, prm.Object, prm.IsIndexedContainer) return shRes.status != putToShardUnknown }) switch shRes.status { @@ -122,70 +121,59 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { // putToShard puts object to sh. // Return putToShardStatus and error if it is necessary to propagate an error upper. -func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool util.WorkerPool, +func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool, ) (res putToShardRes) { - exitCh := make(chan struct{}) + var existPrm shard.ExistsPrm + existPrm.Address = addr - if err := pool.Submit(func() { - defer close(exitCh) - - var existPrm shard.ExistsPrm - existPrm.Address = addr - - exists, err := sh.Exists(ctx, existPrm) - if err != nil { - if shard.IsErrObjectExpired(err) { - // object is already found but - // expired => do nothing with it - res.status = putToShardExists - } else { - e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence, - zap.Stringer("shard_id", sh.ID()), - zap.Error(err)) - } - - return // this is not ErrAlreadyRemoved error so we can go to the next shard - } - - if exists.Exists() { + exists, err := sh.Exists(ctx, existPrm) + if err != nil { + if shard.IsErrObjectExpired(err) { + // object is already found but + // expired => do nothing with it res.status = putToShardExists - return + } else { + e.log.Warn(ctx, logs.EngineCouldNotCheckObjectExistence, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err)) } - var putPrm shard.PutPrm - putPrm.SetObject(obj) - putPrm.SetIndexAttributes(isIndexedContainer) - - _, err = sh.Put(ctx, putPrm) - if err != nil { - if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || - errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { - e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, - zap.Stringer("shard_id", sh.ID()), - zap.Error(err)) - return - } - if client.IsErrObjectAlreadyRemoved(err) { - e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, - zap.Stringer("shard_id", sh.ID()), - zap.Error(err)) - res.status = putToShardRemoved - res.err = err - return - } - - e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr)) - return - } - - res.status = putToShardSuccess - }); err != nil { - e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, zap.Error(err)) - close(exitCh) + return // this is not ErrAlreadyRemoved error so we can go to the next shard } - <-exitCh + if exists.Exists() { + res.status = putToShardExists + return + } + + var putPrm shard.PutPrm + putPrm.SetObject(obj) + putPrm.SetIndexAttributes(isIndexedContainer) + + _, err = sh.Put(ctx, putPrm) + if err != nil { + if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || + errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err)) + return + } + if client.IsErrObjectAlreadyRemoved(err) { + e.log.Warn(ctx, logs.EngineCouldNotPutObjectToShard, + zap.Stringer("shard_id", sh.ID()), + zap.Error(err)) + res.status = putToShardRemoved + res.err = err + return + } + + e.reportShardError(ctx, sh, "could not put object to shard", err, zap.Stringer("address", addr)) + return + } + + res.status = putToShardSuccess return } diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 28f0287bc..a38c85151 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -17,7 +17,6 @@ import ( oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/hrw" "github.com/google/uuid" - "github.com/panjf2000/ants/v2" "go.uber.org/zap" "golang.org/x/sync/errgroup" ) @@ -181,11 +180,6 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error { e.mtx.Lock() defer e.mtx.Unlock() - pool, err := ants.NewPool(int(e.shardPoolSize), ants.WithNonblocking(true)) - if err != nil { - return fmt.Errorf("create pool: %w", err) - } - strID := sh.ID().String() if _, ok := e.shards[strID]; ok { return fmt.Errorf("shard with id %s was already added", strID) @@ -199,8 +193,6 @@ func (e *StorageEngine) addShard(sh *shard.Shard) error { hash: hrw.StringHash(strID), } - e.shardPools[strID] = pool - return nil } @@ -225,12 +217,6 @@ func (e *StorageEngine) removeShards(ctx context.Context, ids ...string) { ss = append(ss, sh) delete(e.shards, id) - pool, ok := e.shardPools[id] - if ok { - pool.Release() - delete(e.shardPools, id) - } - e.log.Info(ctx, logs.EngineShardHasBeenRemoved, zap.String("id", id)) } @@ -429,12 +415,6 @@ func (e *StorageEngine) deleteShards(ctx context.Context, ids []*shard.ID) ([]ha delete(e.shards, idStr) - pool, ok := e.shardPools[idStr] - if ok { - pool.Release() - delete(e.shardPools, idStr) - } - e.log.Info(ctx, logs.EngineShardHasBeenRemoved, zap.String("id", idStr)) } diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go index 0bbc7563c..3aa9629b0 100644 --- a/pkg/local_object_storage/engine/shards_test.go +++ b/pkg/local_object_storage/engine/shards_test.go @@ -17,7 +17,6 @@ func TestRemoveShard(t *testing.T) { e, ids := te.engine, te.shardIDs defer func() { require.NoError(t, e.Close(context.Background())) }() - require.Equal(t, numOfShards, len(e.shardPools)) require.Equal(t, numOfShards, len(e.shards)) removedNum := numOfShards / 2 @@ -37,7 +36,6 @@ func TestRemoveShard(t *testing.T) { } } - require.Equal(t, numOfShards-removedNum, len(e.shardPools)) require.Equal(t, numOfShards-removedNum, len(e.shards)) for id, removed := range mSh { From 737788b35f173fb9cd2838fb53858d8c8b22ec25 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 12 Mar 2025 09:21:15 +0300 Subject: [PATCH 415/591] [#1669] go.mod: Bump frostfs-qos version Signed-off-by: Dmitrii Stepanov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9d0988bcd..69273fda2 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 - git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3 + git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 diff --git a/go.sum b/go.sum index 3ec679ee7..a8f7216a5 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3 h1:QnAt5b2R6+hQthMOIn5ECfLAlVD8IAE5JRm1NCCOmuE= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250227072915-25102d1e1aa3/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529 h1:CBreXSxGoYJAdZ1QdJPsDs1UCXGF5psinII0lxtohsc= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= From bcc84c85a0bdc9095f511ba2d03094723b48c7dd Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 13 Mar 2025 08:57:35 +0300 Subject: [PATCH 416/591] [#1671] Replace `interface{}` with `any` gopatch: ``` @@ @@ -interface{} +any ``` Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/engine/evacuate_test.go | 8 ++++---- pkg/morph/client/client.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index ec7923297..bd5222b78 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -404,8 +404,8 @@ func TestEvacuateSingleProcess(t *testing.T) { require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - blocker := make(chan interface{}) - running := make(chan interface{}) + blocker := make(chan any) + running := make(chan any) var prm EvacuateShardPrm prm.ShardID = ids[1:2] @@ -446,8 +446,8 @@ func TestEvacuateObjectsAsync(t *testing.T) { require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly)) require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly)) - blocker := make(chan interface{}) - running := make(chan interface{}) + blocker := make(chan any) + running := make(chan any) var prm EvacuateShardPrm prm.ShardID = ids[1:2] diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index 10ded5142..19349ccd5 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -213,7 +213,7 @@ func (c *Client) Invoke(ctx context.Context, contract util.Uint160, fee fixedn.F // If the remote neo-go node does not support sessions, `unwrap.ErrNoSessionID` is returned. // batchSize is the number of items to prefetch: if the number of items in the iterator is less than batchSize, no session will be created. // The default batchSize is 100, the default limit from neo-go. -func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...interface{}) error { +func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int, contract util.Uint160, method string, args ...any) error { start := time.Now() success := false defer func() { From d66bffb1919c8b6caa253b84b1f95f38afd639a0 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 13 Mar 2025 09:01:42 +0300 Subject: [PATCH 417/591] [#1671] Use max builtin where possible gopatcH: ``` @@ var d, a expression @@ -if d < a { - d = a -} -return d +return max(d, a) @@ var d, a expression @@ -if d <= a { - d = a -} -return d +return max(d, a) ``` Signed-off-by: Evgenii Stratonikov --- .../config/engine/shard/boltdb/boltdb.go | 15 +++------------ .../config/engine/shard/pilorama/config.go | 10 ++-------- 2 files changed, 5 insertions(+), 20 deletions(-) diff --git a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go index a51308b5b..b564d36f8 100644 --- a/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go +++ b/cmd/frostfs-node/config/engine/shard/boltdb/boltdb.go @@ -37,10 +37,7 @@ func (x *Config) Perm() fs.FileMode { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchDelay() time.Duration { d := config.DurationSafe((*config.Config)(x), "max_batch_delay") - if d < 0 { - d = 0 - } - return d + return max(d, 0) } // MaxBatchSize returns the value of "max_batch_size" config parameter. @@ -48,10 +45,7 @@ func (x *Config) MaxBatchDelay() time.Duration { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchSize() int { s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) - if s < 0 { - s = 0 - } - return s + return max(s, 0) } // NoSync returns the value of "no_sync" config parameter. @@ -66,8 +60,5 @@ func (x *Config) NoSync() bool { // Returns 0 if the value is not a positive number. func (x *Config) PageSize() int { s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size")) - if s < 0 { - s = 0 - } - return s + return max(s, 0) } diff --git a/cmd/frostfs-node/config/engine/shard/pilorama/config.go b/cmd/frostfs-node/config/engine/shard/pilorama/config.go index 28671ca55..5d4e8f408 100644 --- a/cmd/frostfs-node/config/engine/shard/pilorama/config.go +++ b/cmd/frostfs-node/config/engine/shard/pilorama/config.go @@ -52,10 +52,7 @@ func (x *Config) NoSync() bool { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchDelay() time.Duration { d := config.DurationSafe((*config.Config)(x), "max_batch_delay") - if d <= 0 { - d = 0 - } - return d + return max(d, 0) } // MaxBatchSize returns the value of "max_batch_size" config parameter. @@ -63,8 +60,5 @@ func (x *Config) MaxBatchDelay() time.Duration { // Returns 0 if the value is not a positive number. func (x *Config) MaxBatchSize() int { s := int(config.IntSafe((*config.Config)(x), "max_batch_size")) - if s <= 0 { - s = 0 - } - return s + return max(s, 0) } From 40536d8a0658bf374120007b13bce712422f7ba1 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 13 Mar 2025 09:09:14 +0300 Subject: [PATCH 418/591] [#1671] Use `fmt.Appendf` where warranted Fix gopls warnings: ``` cmd/frostfs-adm/internal/modules/morph/config/config.go:68:20-64: Replace []byte(fmt.Sprintf...) with fmt.Appendf ```` gopatch: ``` @@ var f expression @@ -[]byte(fmt.Sprintf(f, ...)) +fmt.Appendf(nil, f, ...) ``` Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/config/config.go | 6 +++--- .../internal/modules/morph/contract/dump_hashes.go | 4 ++-- cmd/frostfs-adm/internal/modules/morph/policy/policy.go | 6 +++--- cmd/frostfs-cli/modules/control/list_targets.go | 2 +- pkg/local_object_storage/metabase/reset_test.go | 2 +- scripts/populate-metabase/internal/populate.go | 7 ++----- 6 files changed, 12 insertions(+), 15 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config/config.go index 65ccc9f9f..f64cb4817 100644 --- a/cmd/frostfs-adm/internal/modules/morph/config/config.go +++ b/cmd/frostfs-adm/internal/modules/morph/config/config.go @@ -65,14 +65,14 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error { nbuf := make([]byte, 8) copy(nbuf[:], v) n := binary.LittleEndian.Uint64(nbuf) - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%d (int)\n", k, n))) + _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n)) case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: if len(v) == 0 || len(v) > 1 { return helper.InvalidConfigValueErr(k) } - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%t (bool)\n", k, v[0] == 1))) + _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%t (bool)\n", k, v[0] == 1)) default: - _, _ = tw.Write([]byte(fmt.Sprintf("%s:\t%s (hex)\n", k, hex.EncodeToString(v)))) + _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%s (hex)\n", k, hex.EncodeToString(v))) } } diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go index 437e2480d..fb7e4ff62 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go @@ -219,8 +219,8 @@ func printContractInfo(cmd *cobra.Command, infos []contractDumpInfo) { if info.version == "" { info.version = "unknown" } - _, _ = tw.Write([]byte(fmt.Sprintf("%s\t(%s):\t%s\n", - info.name, info.version, info.hash.StringLE()))) + _, _ = tw.Write(fmt.Appendf(nil, "%s\t(%s):\t%s\n", + info.name, info.version, info.hash.StringLE())) } _ = tw.Flush() diff --git a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go index 686a244f0..f2932e87c 100644 --- a/cmd/frostfs-adm/internal/modules/morph/policy/policy.go +++ b/cmd/frostfs-adm/internal/modules/morph/policy/policy.go @@ -80,9 +80,9 @@ func dumpPolicyCmd(cmd *cobra.Command, _ []string) error { buf := bytes.NewBuffer(nil) tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) - _, _ = tw.Write([]byte(fmt.Sprintf("Execution Fee Factor:\t%d (int)\n", execFee))) - _, _ = tw.Write([]byte(fmt.Sprintf("Fee Per Byte:\t%d (int)\n", feePerByte))) - _, _ = tw.Write([]byte(fmt.Sprintf("Storage Price:\t%d (int)\n", storagePrice))) + _, _ = tw.Write(fmt.Appendf(nil, "Execution Fee Factor:\t%d (int)\n", execFee)) + _, _ = tw.Write(fmt.Appendf(nil, "Fee Per Byte:\t%d (int)\n", feePerByte)) + _, _ = tw.Write(fmt.Appendf(nil, "Storage Price:\t%d (int)\n", storagePrice)) _ = tw.Flush() cmd.Print(buf.String()) diff --git a/cmd/frostfs-cli/modules/control/list_targets.go b/cmd/frostfs-cli/modules/control/list_targets.go index 8bd2dc9cd..3142d02e7 100644 --- a/cmd/frostfs-cli/modules/control/list_targets.go +++ b/cmd/frostfs-cli/modules/control/list_targets.go @@ -62,7 +62,7 @@ func listTargets(cmd *cobra.Command, _ []string) { tw := tabwriter.NewWriter(buf, 0, 2, 2, ' ', 0) _, _ = tw.Write([]byte("#\tName\tType\n")) for i, t := range targets { - _, _ = tw.Write([]byte(fmt.Sprintf("%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType()))) + _, _ = tw.Write(fmt.Appendf(nil, "%s\t%s\t%s\n", strconv.Itoa(i), t.GetName(), t.GetType())) } _ = tw.Flush() cmd.Print(buf.String()) diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go index 45faecc13..5f0956f0b 100644 --- a/pkg/local_object_storage/metabase/reset_test.go +++ b/pkg/local_object_storage/metabase/reset_test.go @@ -37,7 +37,7 @@ func TestResetDropsContainerBuckets(t *testing.T) { for idx := range 100 { var putPrm PutPrm putPrm.SetObject(testutil.GenerateObject()) - putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx))) + putPrm.SetStorageID(fmt.Appendf(nil, "0/%d", idx)) _, err := db.Put(context.Background(), putPrm) require.NoError(t, err) } diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go index 4da23a295..fafe61eaa 100644 --- a/scripts/populate-metabase/internal/populate.go +++ b/scripts/populate-metabase/internal/populate.go @@ -31,13 +31,10 @@ func PopulateWithObjects( for range count { obj := factory() - - id := []byte(fmt.Sprintf( - "%c/%c/%c", + id := fmt.Appendf(nil, "%c/%c/%c", digits[rand.Int()%len(digits)], digits[rand.Int()%len(digits)], - digits[rand.Int()%len(digits)], - )) + digits[rand.Int()%len(digits)]) prm := meta.PutPrm{} prm.SetObject(obj) From 155d3ddb6e9d8400bdfc43cd2606ac577b6b8945 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 13 Mar 2025 09:14:14 +0300 Subject: [PATCH 419/591] [#1671] Use `min` builtin where possible Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/pilorama/forest.go | 5 +---- pkg/morph/client/client.go | 5 +---- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index 92183716c..ef284a727 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -205,10 +205,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI r := mergeNodeInfos(res) for i := range r { if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > *start { - finish := i + count - if len(res) < finish { - finish = len(res) - } + finish := min(len(res), i+count) last := string(findAttr(r[finish-1].Meta, AttributeFilename)) return r[i:finish], &last, nil } diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index 19349ccd5..e63d926e0 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -262,10 +262,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int }() // Batch size for TraverseIterator() can restricted on the server-side. - traverseBatchSize := batchSize - if invoker.DefaultIteratorResultItems < traverseBatchSize { - traverseBatchSize = invoker.DefaultIteratorResultItems - } + traverseBatchSize := min(batchSize, invoker.DefaultIteratorResultItems) for { items, err := c.rpcActor.TraverseIterator(sid, &r, traverseBatchSize) if err != nil { From 460e5cbccf8fa62134ff8bbc73f886ee5ceaeb87 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 13 Mar 2025 09:24:44 +0300 Subject: [PATCH 420/591] [#1671] Use `slices.Delete()` where possible gopatch is missing for this one, because https://github.com/uber-go/gopatch/issues/179 Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-lens/internal/tui/input.go | 4 +++- .../blobstor/internal/blobstortest/iterate.go | 3 ++- pkg/local_object_storage/metabase/lock.go | 3 ++- pkg/services/object/delete/exec.go | 3 ++- pkg/services/object/search/util.go | 3 ++- pkg/services/object/util/placement.go | 3 ++- pkg/services/object_manager/placement/traverser.go | 4 ++-- 7 files changed, 15 insertions(+), 8 deletions(-) diff --git a/cmd/frostfs-lens/internal/tui/input.go b/cmd/frostfs-lens/internal/tui/input.go index 4fdf97119..90729c119 100644 --- a/cmd/frostfs-lens/internal/tui/input.go +++ b/cmd/frostfs-lens/internal/tui/input.go @@ -1,6 +1,8 @@ package tui import ( + "slices" + "github.com/gdamore/tcell/v2" "github.com/rivo/tview" ) @@ -26,7 +28,7 @@ func (f *InputFieldWithHistory) AddToHistory(s string) { // Used history data for search prompt, so just make that data recent. if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] { - f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...) + f.history = slices.Delete(f.history, f.historyPointer, f.historyPointer+1) f.history = append(f.history, s) } diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go index 36b2c33f8..c11d0888b 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go @@ -3,6 +3,7 @@ package blobstortest import ( "context" "errors" + "slices" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" @@ -26,7 +27,7 @@ func TestIterate(t *testing.T, cons Constructor, minSize, maxSize uint64) { _, err := s.Delete(context.Background(), delPrm) require.NoError(t, err) - objects = append(objects[:delID], objects[delID+1:]...) + objects = slices.Delete(objects, delID, delID+1) runTestNormalHandler(t, s, objects) diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go index b930a0141..aa1478423 100644 --- a/pkg/local_object_storage/metabase/lock.go +++ b/pkg/local_object_storage/metabase/lock.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "slices" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" @@ -250,7 +251,7 @@ func freePotentialLocks(tx *bbolt.Tx, idCnr cid.ID, locker oid.ID) ([]oid.Addres unlockedObjects = append(unlockedObjects, addr) } else { // exclude locker - keyLockers = append(keyLockers[:i], keyLockers[i+1:]...) + keyLockers = slices.Delete(keyLockers, i, i+1) v, err = encodeList(keyLockers) if err != nil { diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go index 36a17bde2..a99ba3586 100644 --- a/pkg/services/object/delete/exec.go +++ b/pkg/services/object/delete/exec.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" @@ -182,7 +183,7 @@ func (exec *execCtx) addMembers(incoming []oid.ID) { for i := range members { for j := 0; j < len(incoming); j++ { // don't use range, slice mutates in body if members[i].Equals(incoming[j]) { - incoming = append(incoming[:j], incoming[j+1:]...) + incoming = slices.Delete(incoming, j, j+1) j-- } } diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go index fed168187..0be5345b9 100644 --- a/pkg/services/object/search/util.go +++ b/pkg/services/object/search/util.go @@ -2,6 +2,7 @@ package searchsvc import ( "context" + "slices" "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" @@ -53,7 +54,7 @@ func (w *uniqueIDWriter) WriteIDs(list []oid.ID) error { } // exclude processed address - list = append(list[:i], list[i+1:]...) + list = slices.Delete(list, i, i+1) i-- } diff --git a/pkg/services/object/util/placement.go b/pkg/services/object/util/placement.go index 195944f92..f74b0aab9 100644 --- a/pkg/services/object/util/placement.go +++ b/pkg/services/object/util/placement.go @@ -3,6 +3,7 @@ package util import ( "context" "fmt" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -93,7 +94,7 @@ func (p *remotePlacement) BuildPlacement(ctx context.Context, cnr cid.ID, obj *o } if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) { - vs[i] = append(vs[i][:j], vs[i][j+1:]...) + vs[i] = slices.Delete(vs[i], j, j+1) j-- } } diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go index efa4a5b06..a3f9af959 100644 --- a/pkg/services/object_manager/placement/traverser.go +++ b/pkg/services/object_manager/placement/traverser.go @@ -288,8 +288,8 @@ func (t *Traverser) Next() []Node { func (t *Traverser) skipEmptyVectors() { for i := 0; i < len(t.vectors); i++ { // don't use range, slice changes in body if len(t.vectors[i]) == 0 && t.rem[i] <= 0 || t.rem[0] == 0 { - t.vectors = append(t.vectors[:i], t.vectors[i+1:]...) - t.rem = append(t.rem[:i], t.rem[i+1:]...) + t.vectors = slices.Delete(t.vectors, i, i+1) + t.rem = slices.Delete(t.rem, i, i+1) i-- } else { break From ecb6b0793c877ae9a6d1b399a3c364323f77254d Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 13 Mar 2025 09:34:59 +0300 Subject: [PATCH 421/591] [#1671] Use `slices.ContainsFunc()` where possible Signed-off-by: Evgenii Stratonikov --- .../internal/modules/morph/helper/netmap.go | 8 +++----- pkg/network/group.go | 7 +++---- pkg/services/object/acl/v2/util_test.go | 9 ++------- pkg/services/policer/policer_test.go | 7 +++---- pkg/services/tree/service.go | 13 +++++-------- 5 files changed, 16 insertions(+), 28 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go index fb8f03783..20abaff0a 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go @@ -3,6 +3,7 @@ package helper import ( "errors" "fmt" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" @@ -118,11 +119,8 @@ func MergeNetmapConfig(roInvoker *invoker.Invoker, md map[string]any) error { return err } for k, v := range m { - for _, key := range NetmapConfigKeys { - if k == key { - md[k] = v - break - } + if slices.Contains(NetmapConfigKeys, k) { + md[k] = v } } return nil diff --git a/pkg/network/group.go b/pkg/network/group.go index 9843b14d4..5a71e530e 100644 --- a/pkg/network/group.go +++ b/pkg/network/group.go @@ -3,6 +3,7 @@ package network import ( "errors" "fmt" + "slices" "sort" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -164,10 +165,8 @@ func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) { // at least one common address. func (x AddressGroup) Intersects(x2 AddressGroup) bool { for i := range x { - for j := range x2 { - if x[i].equal(x2[j]) { - return true - } + if slices.ContainsFunc(x2, x[i].equal) { + return true } } diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go index 4b19cecfe..40fce8877 100644 --- a/pkg/services/object/acl/v2/util_test.go +++ b/pkg/services/object/acl/v2/util_test.go @@ -4,6 +4,7 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" + "slices" "testing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" @@ -91,13 +92,7 @@ func TestIsVerbCompatible(t *testing.T) { for op, list := range table { for _, verb := range verbs { - var contains bool - for _, v := range list { - if v == verb { - contains = true - break - } - } + contains := slices.Contains(list, verb) tok.ForVerb(verb) diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go index cef4c36d9..049c33753 100644 --- a/pkg/services/policer/policer_test.go +++ b/pkg/services/policer/policer_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "errors" + "slices" "sort" "testing" "time" @@ -226,10 +227,8 @@ func TestProcessObject(t *testing.T) { return nil, err } } - for _, i := range ti.objHolders { - if index == i { - return nil, nil - } + if slices.Contains(ti.objHolders, index) { + return nil, nil } return nil, new(apistatus.ObjectNotFound) } diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index f9b7395e7..cd89d6a28 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -345,14 +345,11 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) } else { var metaValue []KeyValue for _, kv := range m.Items { - for _, attr := range b.GetAttributes() { - if kv.Key == attr { - metaValue = append(metaValue, KeyValue{ - Key: kv.Key, - Value: kv.Value, - }) - break - } + if slices.Contains(b.GetAttributes(), kv.Key) { + metaValue = append(metaValue, KeyValue{ + Key: kv.Key, + Value: kv.Value, + }) } } x.Meta = metaValue From 997759994a55cc943804d6a16f29a4703f8435a3 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Thu, 13 Mar 2025 12:01:57 +0300 Subject: [PATCH 422/591] [#1676] golangci: Enable gci linter Signed-off-by: Alexander Chuprov --- .golangci.yml | 6 ++++++ cmd/frostfs-adm/internal/modules/storagecfg/root.go | 1 - cmd/frostfs-cli/modules/control/locate.go | 1 - pkg/services/control/server/list_shards_for_object.go | 1 - 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index d83f36de8..f21a46248 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -22,6 +22,11 @@ linters-settings: # 'default' case is present, even if all enum members aren't listed in the # switch default-signifies-exhaustive: true + gci: + sections: + - standard + - default + custom-order: true govet: # report about shadowed variables check-shadowing: false @@ -72,6 +77,7 @@ linters: - durationcheck - exhaustive - copyloopvar + - gci - gofmt - goimports - misspell diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/root.go b/cmd/frostfs-adm/internal/modules/storagecfg/root.go index 8acbc4579..a5adea0da 100644 --- a/cmd/frostfs-adm/internal/modules/storagecfg/root.go +++ b/cmd/frostfs-adm/internal/modules/storagecfg/root.go @@ -31,7 +31,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" ) diff --git a/cmd/frostfs-cli/modules/control/locate.go b/cmd/frostfs-cli/modules/control/locate.go index d10e2a896..4cb4be539 100644 --- a/cmd/frostfs-cli/modules/control/locate.go +++ b/cmd/frostfs-cli/modules/control/locate.go @@ -11,7 +11,6 @@ import ( rawclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/mr-tron/base58" "github.com/spf13/cobra" ) diff --git a/pkg/services/control/server/list_shards_for_object.go b/pkg/services/control/server/list_shards_for_object.go index 84469772f..39565ed50 100644 --- a/pkg/services/control/server/list_shards_for_object.go +++ b/pkg/services/control/server/list_shards_for_object.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "google.golang.org/grpc/codes" From ff4e9b6ae119ca234cf035c00d5c23fc4d494c5a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 13 Mar 2025 10:18:26 +0300 Subject: [PATCH 423/591] [#1673] logger: Drop unused fields Signed-off-by: Dmitrii Stepanov --- pkg/util/logger/logger.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index 19d3f1ed1..a0b2728c9 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -147,8 +147,6 @@ func newJournaldLogger(prm *Prm) (*Logger, error) { lvl := zap.NewAtomicLevelAt(prm.level) c := zap.NewProductionConfig() - c.Level = lvl - c.Encoding = "console" if prm.SamplingHook != nil { c.Sampling.Hook = prm.SamplingHook } From 7893d763d1b1d5eeeb62d8a4beb2175acaa13b65 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 13 Mar 2025 10:37:29 +0300 Subject: [PATCH 424/591] [#1673] logger: Add sampling for journald logger Signed-off-by: Dmitrii Stepanov --- pkg/util/logger/logger.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index a0b2728c9..2eb5e5538 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -2,6 +2,7 @@ package logger import ( "fmt" + "time" "git.frostfs.info/TrueCloudLab/zapjournald" "github.com/ssgreg/journald" @@ -166,7 +167,18 @@ func newJournaldLogger(prm *Prm) (*Logger, error) { zapjournald.SyslogPid(), }) - lZap := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1)) + var samplerOpts []zapcore.SamplerOption + if c.Sampling.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(c.Sampling.Hook)) + } + samplingCore := zapcore.NewSamplerWithOptions( + coreWithContext, + time.Second, + c.Sampling.Initial, + c.Sampling.Thereafter, + samplerOpts..., + ) + lZap := zap.New(samplingCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1)) l := &Logger{z: lZap, lvl: lvl} prm._log = l From 07a660fbc440dbd3974250608506e39c093bf6cc Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 13 Mar 2025 18:01:27 +0300 Subject: [PATCH 425/591] [#1677] writecache: Add QoS limiter usage Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 1 + internal/logs/logs.go | 1 + pkg/local_object_storage/writecache/cache.go | 2 ++ pkg/local_object_storage/writecache/flush.go | 15 ++++++++++++++- pkg/local_object_storage/writecache/options.go | 9 +++++++++ 5 files changed, 27 insertions(+), 1 deletion(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index e2fe23135..2531e9173 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -930,6 +930,7 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option { writecache.WithMaxCacheCount(wcRead.countLimit), writecache.WithNoSync(wcRead.noSync), writecache.WithLogger(c.log), + writecache.WithQoSLimiter(shCfg.limiter), ) } return writeCacheOpts diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 6115cdf90..3503c922e 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -514,4 +514,5 @@ const ( NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`" FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`" FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`" + WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object" ) diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go index b99d73d3a..ee709ea73 100644 --- a/pkg/local_object_storage/writecache/cache.go +++ b/pkg/local_object_storage/writecache/cache.go @@ -6,6 +6,7 @@ import ( "sync" "sync/atomic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -61,6 +62,7 @@ func New(opts ...Option) Cache { maxCacheSize: defaultMaxCacheSize, metrics: DefaultMetrics(), flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize, + qosLimiter: qos.NewNoopLimiter(), }, } diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index 2d07d8b32..893d27ba2 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -67,7 +67,13 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) { continue } - err := c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error { + release, err := c.qosLimiter.ReadRequest(ctx) + if err != nil { + c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.IterateInfo"), zap.Error(err)) + c.modeMtx.RUnlock() + continue + } + err = c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error { if err := fl.acquire(oi.DataSize); err != nil { return err } @@ -82,6 +88,7 @@ func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) { return ctx.Err() } }) + release() if err != nil { c.log.Warn(ctx, logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err)) } @@ -113,6 +120,12 @@ func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) { func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) { defer fl.release(objInfo.size) + release, err := c.qosLimiter.WriteRequest(ctx) + if err != nil { + c.log.Warn(ctx, logs.WriteCacheFailedToAcquireRPSQuota, zap.String("operation", "fstree.Get"), zap.Error(err)) + return + } + defer release() res, err := c.fsTree.Get(ctx, common.GetPrm{ Address: objInfo.addr, }) diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go index f2957fe98..dbbe66c19 100644 --- a/pkg/local_object_storage/writecache/options.go +++ b/pkg/local_object_storage/writecache/options.go @@ -3,6 +3,7 @@ package writecache import ( "context" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "go.uber.org/zap" ) @@ -38,6 +39,8 @@ type options struct { disableBackgroundFlush bool // flushSizeLimit is total size of flushing objects. flushSizeLimit uint64 + // qosLimiter used to limit flush RPS. + qosLimiter qos.Limiter } // WithLogger sets logger. @@ -136,3 +139,9 @@ func WithFlushSizeLimit(v uint64) Option { o.flushSizeLimit = v } } + +func WithQoSLimiter(l qos.Limiter) Option { + return func(o *options) { + o.qosLimiter = l + } +} From fde2649e60ff27c4e5aa66cbd163a14bf585398e Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 14 Mar 2025 12:03:42 +0300 Subject: [PATCH 426/591] [#1678] adm: Fix frostfs-adm morph list-subjects & list-group-subjects `include-names` for `list-subjects` returns error `invalid response subject struct` because `ListSubjects` returns only subject addresses (see frostfs-contract). Replace `include-names` with `extended` as now all subject info printed. Signed-off-by: Dmitrii Stepanov --- .../modules/morph/frostfsid/frostfsid.go | 52 ++++++++++++++----- 1 file changed, 39 insertions(+), 13 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go index 4fbd0bfe1..8ae606f1a 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go @@ -34,7 +34,7 @@ const ( subjectNameFlag = "subject-name" subjectKeyFlag = "subject-key" subjectAddressFlag = "subject-address" - includeNamesFlag = "include-names" + extendedFlag = "extended" groupNameFlag = "group-name" groupIDFlag = "group-id" @@ -209,7 +209,7 @@ func initFrostfsIDListSubjectsCmd() { Cmd.AddCommand(frostfsidListSubjectsCmd) frostfsidListSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace to list subjects") - frostfsidListSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") + frostfsidListSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") } func initFrostfsIDCreateGroupCmd() { @@ -256,7 +256,7 @@ func initFrostfsIDListGroupSubjectsCmd() { frostfsidListGroupSubjectsCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) frostfsidListGroupSubjectsCmd.Flags().String(namespaceFlag, "", "Namespace name") frostfsidListGroupSubjectsCmd.Flags().Int64(groupIDFlag, 0, "Group id") - frostfsidListGroupSubjectsCmd.Flags().Bool(includeNamesFlag, false, "Whether include subject name (require additional requests)") + frostfsidListGroupSubjectsCmd.Flags().Bool(extendedFlag, false, "Whether include subject info (require additional requests)") } func initFrostfsIDSetKVCmd() { @@ -336,7 +336,7 @@ func frostfsidDeleteSubject(cmd *cobra.Command, _ []string) { } func frostfsidListSubjects(cmd *cobra.Command, _ []string) { - includeNames, _ := cmd.Flags().GetBool(includeNamesFlag) + extended, _ := cmd.Flags().GetBool(extendedFlag) ns := getFrostfsIDNamespace(cmd) inv, _, hash := initInvoker(cmd) reader := frostfsidrpclient.NewReader(inv, hash) @@ -349,21 +349,19 @@ func frostfsidListSubjects(cmd *cobra.Command, _ []string) { sort.Slice(subAddresses, func(i, j int) bool { return subAddresses[i].Less(subAddresses[j]) }) for _, addr := range subAddresses { - if !includeNames { + if !extended { cmd.Println(address.Uint160ToString(addr)) continue } - sessionID, it, err := reader.ListSubjects() + items, err := reader.GetSubject(addr) commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) - items, err := readIterator(inv, &it, sessionID) - commonCmd.ExitOnErr(cmd, "can't read iterator: %w", err) - subj, err := frostfsidclient.ParseSubject(items) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) - cmd.Printf("%s (%s)\n", address.Uint160ToString(addr), subj.Name) + printSubjectInfo(cmd, addr, subj) + cmd.Println() } } @@ -483,7 +481,7 @@ func frostfsidDeleteKV(cmd *cobra.Command, _ []string) { func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { ns := getFrostfsIDNamespace(cmd) groupID := getFrostfsIDGroupID(cmd) - includeNames, _ := cmd.Flags().GetBool(includeNamesFlag) + extended, _ := cmd.Flags().GetBool(extendedFlag) inv, cs, hash := initInvoker(cmd) _, err := helper.NNSResolveHash(inv, cs.Hash, helper.DomainOf(constants.FrostfsIDContract)) commonCmd.ExitOnErr(cmd, "can't get netmap contract hash: %w", err) @@ -501,7 +499,7 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { sort.Slice(subjects, func(i, j int) bool { return subjects[i].Less(subjects[j]) }) for _, subjAddr := range subjects { - if !includeNames { + if !extended { cmd.Println(address.Uint160ToString(subjAddr)) continue } @@ -510,7 +508,8 @@ func frostfsidListGroupSubjects(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "can't get subject: %w", err) subj, err := frostfsidclient.ParseSubject(items) commonCmd.ExitOnErr(cmd, "can't parse subject: %w", err) - cmd.Printf("%s (%s)\n", address.Uint160ToString(subjAddr), subj.Name) + printSubjectInfo(cmd, subjAddr, subj) + cmd.Println() } } @@ -600,3 +599,30 @@ func initInvoker(cmd *cobra.Command) (*invoker.Invoker, *state.Contract, util.Ui return inv, cs, nmHash } + +func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclient.Subject) { + cmd.Printf("Address: %s\n", address.Uint160ToString(addr)) + pk := "" + if subj.PrimaryKey != nil { + pk = subj.PrimaryKey.String() + } + cmd.Printf("Primary key: %s\n", pk) + cmd.Printf("Name: %s\n", subj.Name) + cmd.Printf("Namespace: %s\n", subj.Namespace) + if len(subj.AdditionalKeys) > 0 { + cmd.Printf("Additional keys:\n") + for _, key := range subj.AdditionalKeys { + k := "" + if key != nil { + k = key.String() + } + cmd.Printf("- %s\n", k) + } + } + if len(subj.KV) > 0 { + cmd.Printf("KV:\n") + for k, v := range subj.KV { + cmd.Printf("- %s: %s\n", k, v) + } + } +} From ef6ac751dfa849e3c32171890229b5fbbf979b7a Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 14 Mar 2025 16:32:30 +0300 Subject: [PATCH 427/591] [#1671] Update gopls to v0.17.1 Signed-off-by: Evgenii Stratonikov --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 497dce115..538912cc1 100755 --- a/Makefile +++ b/Makefile @@ -42,7 +42,7 @@ GOFUMPT_VERSION ?= v0.7.0 GOFUMPT_DIR ?= $(abspath $(BIN))/gofumpt GOFUMPT_VERSION_DIR ?= $(GOFUMPT_DIR)/$(GOFUMPT_VERSION) -GOPLS_VERSION ?= v0.15.1 +GOPLS_VERSION ?= v0.17.1 GOPLS_DIR ?= $(abspath $(BIN))/gopls GOPLS_VERSION_DIR ?= $(GOPLS_DIR)/$(GOPLS_VERSION) GOPLS_TEMP_FILE := $(shell mktemp) From 91c7b39232fb23ad5e46975b7a04da57214f4de0 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 14 Mar 2025 16:34:34 +0300 Subject: [PATCH 428/591] [#1680] go.mod: Bump go version to 1.23 Change-Id: I77f908924f675e676f0db6a57204d7c1e0df219a Signed-off-by: Evgenii Stratonikov --- .forgejo/workflows/build.yml | 2 +- .forgejo/workflows/dco.yml | 2 +- .forgejo/workflows/pre-commit.yml | 2 +- .forgejo/workflows/tests.yml | 10 +++++----- .forgejo/workflows/vulncheck.yml | 2 +- Makefile | 2 +- go.mod | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.forgejo/workflows/build.yml b/.forgejo/workflows/build.yml index 9129d136e..d568b9607 100644 --- a/.forgejo/workflows/build.yml +++ b/.forgejo/workflows/build.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.22', '1.23' ] + go_versions: [ '1.23', '1.24' ] steps: - uses: actions/checkout@v3 diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml index 7c5af8410..190d7764a 100644 --- a/.forgejo/workflows/dco.yml +++ b/.forgejo/workflows/dco.yml @@ -13,7 +13,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.22' + go-version: '1.24' - name: Run commit format checker uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 diff --git a/.forgejo/workflows/pre-commit.yml b/.forgejo/workflows/pre-commit.yml index b27e7a39a..c2e293175 100644 --- a/.forgejo/workflows/pre-commit.yml +++ b/.forgejo/workflows/pre-commit.yml @@ -21,7 +21,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: 1.23 + go-version: 1.24 - name: Set up Python run: | apt update diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml index 4f1bebe61..f3f5432ce 100644 --- a/.forgejo/workflows/tests.yml +++ b/.forgejo/workflows/tests.yml @@ -16,7 +16,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' cache: true - name: Install linters @@ -30,7 +30,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.22', '1.23' ] + go_versions: [ '1.23', '1.24' ] fail-fast: false steps: - uses: actions/checkout@v3 @@ -53,7 +53,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.22' + go-version: '1.24' cache: true - name: Run tests @@ -68,7 +68,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' cache: true - name: Install staticcheck @@ -104,7 +104,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' cache: true - name: Install gofumpt diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index 140434dfc..bc94792d8 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -18,7 +18,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' check-latest: true - name: Install govulncheck diff --git a/Makefile b/Makefile index 538912cc1..fd2ee3b6f 100755 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" -GO_VERSION ?= 1.22 +GO_VERSION ?= 1.23 LINT_VERSION ?= 1.62.2 TRUECLOUDLAB_LINT_VERSION ?= 0.0.8 PROTOC_VERSION ?= 25.0 diff --git a/go.mod b/go.mod index 69273fda2..eeaca1645 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module git.frostfs.info/TrueCloudLab/frostfs-node -go 1.22 +go 1.23 require ( code.gitea.io/sdk/gitea v0.17.1 From 54ef71a92ffdcdaebe3fb1685d548170c4a11006 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 14 Mar 2025 16:39:19 +0300 Subject: [PATCH 429/591] [#1680] Update staticcheck to 2025.1.1 Change-Id: Ie851e714afebf171c4d42d4c49b42379c2665113 Signed-off-by: Evgenii Stratonikov --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index fd2ee3b6f..cd80fc72e 100755 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ PROTOC_OS_VERSION=osx-x86_64 ifeq ($(shell uname), Linux) PROTOC_OS_VERSION=linux-x86_64 endif -STATICCHECK_VERSION ?= 2024.1.1 +STATICCHECK_VERSION ?= 2025.1.1 ARCH = amd64 BIN = bin From fc743cc5373c7ccce331c2c84fb53d7dd41e4d29 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Tue, 18 Mar 2025 10:56:30 +0300 Subject: [PATCH 430/591] [#1684] cli: Correct description of control shards writecache seal Signed-off-by: Alexander Chuprov --- cmd/frostfs-cli/modules/control/writecache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/frostfs-cli/modules/control/writecache.go b/cmd/frostfs-cli/modules/control/writecache.go index 80e4a0c87..d0c9a641b 100644 --- a/cmd/frostfs-cli/modules/control/writecache.go +++ b/cmd/frostfs-cli/modules/control/writecache.go @@ -24,7 +24,7 @@ var writecacheShardCmd = &cobra.Command{ var sealWritecacheShardCmd = &cobra.Command{ Use: "seal", Short: "Flush objects from write-cache and move write-cache to degraded read only mode.", - Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the degraded read only mode: write-cache will be empty and no objects will be put in it.", + Long: "Flush all the objects from the write-cache to the main storage and move the write-cache to the 'CLOSED' mode: write-cache will be empty and no objects will be put in it.", Run: sealWritecache, } From a7319bc979669dde3a67762220fdbd96077a3f94 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 18 Mar 2025 09:26:34 +0300 Subject: [PATCH 431/591] [#1683] metabase/test: Report allocs in benchmarkSelect() Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/metabase/select_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index 5cc998311..ce2156d2e 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -1216,6 +1216,8 @@ func TestExpiredObjects(t *testing.T) { } func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.SearchFilters, expected int) { + b.ReportAllocs() + var prm meta.SelectPrm prm.SetContainerID(cid) prm.SetFilters(fs) From a405fb1f39d616e37fd6d0de35c2e68d6cd3b6b8 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 18 Mar 2025 09:34:47 +0300 Subject: [PATCH 432/591] [#1683] metabase: Check object status once in Select() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit objectStatus() is called twice for the same object: First, in selectObject() to filter removed objects. Then, again, in getObjectForSlowFilters() via db.get(). The second call will return the same result, so remove useless branch. ``` goos: linux goarch: amd64 pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz │ old │ status │ │ sec/op │ sec/op vs base │ Select/string_equal-8 5.022m ± 7% 3.968m ± 8% -20.98% (p=0.000 n=10) Select/string_not_equal-8 4.953m ± 9% 3.990m ± 10% -19.44% (p=0.000 n=10) Select/common_prefix-8 4.962m ± 8% 3.971m ± 9% -19.98% (p=0.000 n=10) Select/unknown-8 5.246m ± 9% 3.548m ± 5% -32.37% (p=0.000 n=10) geomean 5.045m 3.865m -23.39% │ old │ status │ │ B/op │ B/op vs base │ Select/string_equal-8 2.685Mi ± 0% 2.250Mi ± 0% -16.20% (p=0.000 n=10) Select/string_not_equal-8 2.685Mi ± 0% 2.250Mi ± 0% -16.20% (p=0.000 n=10) Select/common_prefix-8 2.685Mi ± 0% 2.250Mi ± 0% -16.20% (p=0.000 n=10) Select/unknown-8 2.677Mi ± 0% 2.243Mi ± 0% -16.24% (p=0.000 n=10) geomean 2.683Mi 2.248Mi -16.21% │ old │ status │ │ allocs/op │ allocs/op vs base │ Select/string_equal-8 69.03k ± 0% 56.02k ± 0% -18.84% (p=0.000 n=10) Select/string_not_equal-8 69.03k ± 0% 56.02k ± 0% -18.84% (p=0.000 n=10) Select/common_prefix-8 69.03k ± 0% 56.02k ± 0% -18.84% (p=0.000 n=10) Select/unknown-8 68.03k ± 0% 55.03k ± 0% -19.11% (p=0.000 n=10) geomean 68.78k 55.77k -18.90% ``` Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/metabase/select.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index 9f1b8b060..4a3b22b55 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -517,7 +517,7 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) { buf := make([]byte, addressKeySize) - obj, err := db.get(tx, addr, buf, true, false, currEpoch) + obj, err := db.get(tx, addr, buf, false, false, currEpoch) if err != nil { var ecInfoError *objectSDK.ECInfoError if errors.As(err, &ecInfoError) { From a11b2d27e4011cf366e889f79cac198abdbcaa51 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Thu, 20 Feb 2025 11:05:11 +0300 Subject: [PATCH 433/591] [#1642] tree: Introduce `Cursor` type * Use `Cursor` as parameter for `TreeSortedByFilename` Signed-off-by: Airat Arifullin --- pkg/local_object_storage/engine/tree.go | 4 ++-- pkg/local_object_storage/pilorama/boltdb.go | 9 ++++---- pkg/local_object_storage/pilorama/forest.go | 8 +++---- .../pilorama/forest_test.go | 4 ++-- pkg/local_object_storage/pilorama/heap.go | 6 ++--- .../pilorama/interface.go | 23 ++++++++++++++++++- .../pilorama/split_test.go | 2 +- pkg/local_object_storage/shard/tree.go | 2 +- pkg/services/tree/service.go | 2 +- 9 files changed, 40 insertions(+), 20 deletions(-) diff --git a/pkg/local_object_storage/engine/tree.go b/pkg/local_object_storage/engine/tree.go index 7f70d36f7..cfd15b4d4 100644 --- a/pkg/local_object_storage/engine/tree.go +++ b/pkg/local_object_storage/engine/tree.go @@ -230,7 +230,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree } // TreeSortedByFilename implements the pilorama.Forest interface. -func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) { +func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) { ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename", trace.WithAttributes( attribute.String("container_id", cid.EncodeToString()), @@ -241,7 +241,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, var err error var nodes []pilorama.MultiNodeInfo - var cursor *string + var cursor *pilorama.Cursor for _, sh := range e.sortShards(cid) { nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count) if err != nil { diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 9d71d9fda..0eea60ad8 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -1077,7 +1077,7 @@ func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeIDs MultiNode, threshol } // TreeSortedByFilename implements the Forest interface. -func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) { +func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeIDs MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { var ( startedAt = time.Now() success = false @@ -1128,7 +1128,6 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr } t.fillSortedChildren(b, nodeIDs, h) - for info, ok := h.pop(); ok; info, ok = h.pop() { for _, id := range info.id { childInfo, err := t.getChildInfo(b, key, id) @@ -1155,7 +1154,7 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr } if len(res) != 0 { s := string(findAttr(res[len(res)-1].Meta, AttributeFilename)) - last = &s + last = NewCursor(s) } return res, last, metaerr.Wrap(err) } @@ -1166,10 +1165,10 @@ func sortByFilename(nodes []NodeInfo) { }) } -func sortAndCut(result []NodeInfo, last *string) []NodeInfo { +func sortAndCut(result []NodeInfo, last *Cursor) []NodeInfo { var lastBytes []byte if last != nil { - lastBytes = []byte(*last) + lastBytes = []byte(last.GetFilename()) } sortByFilename(result) diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index ef284a727..ce8528a81 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -164,7 +164,7 @@ func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string, } // TreeSortedByFilename implements the Forest interface. -func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *string, count int) ([]MultiNodeInfo, *string, error) { +func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeIDs MultiNode, start *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) { fullID := cid.String() + "/" + treeID s, ok := f.treeMap[fullID] if !ok { @@ -204,14 +204,14 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI r := mergeNodeInfos(res) for i := range r { - if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > *start { + if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() { finish := min(len(res), i+count) last := string(findAttr(r[finish-1].Meta, AttributeFilename)) - return r[i:finish], &last, nil + return r[i:finish], NewCursor(last), nil } } last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename)) - return nil, &last, nil + return nil, NewCursor(last), nil } // TreeGetChildren implements the Forest interface. diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go index de56fc82b..844084c55 100644 --- a/pkg/local_object_storage/pilorama/forest_test.go +++ b/pkg/local_object_storage/pilorama/forest_test.go @@ -273,7 +273,7 @@ func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) { } var result []MultiNodeInfo - treeAppend := func(t *testing.T, last *string, count int) *string { + treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) require.NoError(t, err) result = append(result, res...) @@ -328,7 +328,7 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) { } var result []MultiNodeInfo - treeAppend := func(t *testing.T, last *string, count int) *string { + treeAppend := func(t *testing.T, last *Cursor, count int) *Cursor { res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count) require.NoError(t, err) result = append(result, res...) diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go index 5a00bcf7a..70afc148a 100644 --- a/pkg/local_object_storage/pilorama/heap.go +++ b/pkg/local_object_storage/pilorama/heap.go @@ -30,13 +30,13 @@ func (h *filenameHeap) Pop() any { // fixedHeap maintains a fixed number of smallest elements started at some point. type fixedHeap struct { - start *string + start *Cursor sorted bool count int h *filenameHeap } -func newHeap(start *string, count int) *fixedHeap { +func newHeap(start *Cursor, count int) *fixedHeap { h := new(filenameHeap) heap.Init(h) @@ -50,7 +50,7 @@ func newHeap(start *string, count int) *fixedHeap { const amortizationMultiplier = 5 func (h *fixedHeap) push(id MultiNode, filename string) bool { - if h.start != nil && filename <= *h.start { + if h.start != nil && filename <= (*h.start).GetFilename() { return false } diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go index 1f7e742a2..e364b008b 100644 --- a/pkg/local_object_storage/pilorama/interface.go +++ b/pkg/local_object_storage/pilorama/interface.go @@ -37,7 +37,7 @@ type Forest interface { TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error) // TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute.. // Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree. - TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *string, count int) ([]MultiNodeInfo, *string, error) + TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID MultiNode, last *Cursor, count int) ([]MultiNodeInfo, *Cursor, error) // TreeGetOpLog returns first log operation stored at or above the height. // In case no such operation is found, empty Move and nil error should be returned. TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error) @@ -79,6 +79,27 @@ const ( AttributeVersion = "Version" ) +// Cursor keeps state between function calls for traversing nodes. +// It stores the attributes associated with a previous call, allowing subsequent operations +// to resume traversal from this point rather than starting from the beginning. +type Cursor struct { + // Last traversed filename. + filename string +} + +func NewCursor(filename string) *Cursor { + return &Cursor{ + filename: filename, + } +} + +func (c *Cursor) GetFilename() string { + if c == nil { + return "" + } + return c.filename +} + // CIDDescriptor contains container ID and information about the node position // in the list of container nodes. type CIDDescriptor struct { diff --git a/pkg/local_object_storage/pilorama/split_test.go b/pkg/local_object_storage/pilorama/split_test.go index 54c2b90a6..eecee1527 100644 --- a/pkg/local_object_storage/pilorama/split_test.go +++ b/pkg/local_object_storage/pilorama/split_test.go @@ -96,7 +96,7 @@ func testDuplicateDirectory(t *testing.T, f Forest) { require.Equal(t, []byte{8}, testGetByPath(t, "dir1/dir3/value4")) require.Equal(t, []byte{10}, testGetByPath(t, "value0")) - testSortedByFilename := func(t *testing.T, root MultiNode, last *string, batchSize int) ([]MultiNodeInfo, *string) { + testSortedByFilename := func(t *testing.T, root MultiNode, last *Cursor, batchSize int) ([]MultiNodeInfo, *Cursor) { res, last, err := f.TreeSortedByFilename(context.Background(), d.CID, treeID, root, last, batchSize) require.NoError(t, err) return res, last diff --git a/pkg/local_object_storage/shard/tree.go b/pkg/local_object_storage/shard/tree.go index e9cd5f8c1..db361a8bd 100644 --- a/pkg/local_object_storage/shard/tree.go +++ b/pkg/local_object_storage/shard/tree.go @@ -246,7 +246,7 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin } // TreeSortedByFilename implements the pilorama.Forest interface. -func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *string, count int) ([]pilorama.MultiNodeInfo, *string, error) { +func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.MultiNode, last *pilorama.Cursor, count int) ([]pilorama.MultiNodeInfo, *pilorama.Cursor, error) { ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename", trace.WithAttributes( attribute.String("shard_id", s.ID().String()), diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index cd89d6a28..98c5626bd 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -412,7 +412,7 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS type stackItem struct { values []pilorama.MultiNodeInfo parent pilorama.MultiNode - last *string + last *pilorama.Cursor } func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid cidSDK.ID, b *GetSubTreeRequest_Body, forest pilorama.Forest) error { From 760b6a44ea0557489a1f16fbdbbdfb81efcc0d58 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Fri, 28 Feb 2025 15:47:29 +0300 Subject: [PATCH 434/591] [#1642] tree: Fix sorted getSubtree for multiversion filenames Signed-off-by: Airat Arifullin --- pkg/local_object_storage/pilorama/boltdb.go | 3 ++- pkg/local_object_storage/pilorama/forest.go | 4 ++-- pkg/local_object_storage/pilorama/heap.go | 15 +++++++++++++-- pkg/local_object_storage/pilorama/interface.go | 13 ++++++++++++- 4 files changed, 29 insertions(+), 6 deletions(-) diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 0eea60ad8..2ca6fdefa 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -1128,6 +1128,7 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr } t.fillSortedChildren(b, nodeIDs, h) + for info, ok := h.pop(); ok; info, ok = h.pop() { for _, id := range info.id { childInfo, err := t.getChildInfo(b, key, id) @@ -1154,7 +1155,7 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr } if len(res) != 0 { s := string(findAttr(res[len(res)-1].Meta, AttributeFilename)) - last = NewCursor(s) + last = NewCursor(s, res[len(res)-1].Children[len(res[len(res)-1].Children)-1]) } return res, last, metaerr.Wrap(err) } diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index ce8528a81..b5320e42d 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -207,11 +207,11 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI if start == nil || string(findAttr(r[i].Meta, AttributeFilename)) > start.GetFilename() { finish := min(len(res), i+count) last := string(findAttr(r[finish-1].Meta, AttributeFilename)) - return r[i:finish], NewCursor(last), nil + return r[i:finish], NewCursor(last, 0), nil } } last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename)) - return nil, NewCursor(last), nil + return nil, NewCursor(last, 0), nil } // TreeGetChildren implements the Forest interface. diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go index 70afc148a..b035be1e1 100644 --- a/pkg/local_object_storage/pilorama/heap.go +++ b/pkg/local_object_storage/pilorama/heap.go @@ -50,8 +50,19 @@ func newHeap(start *Cursor, count int) *fixedHeap { const amortizationMultiplier = 5 func (h *fixedHeap) push(id MultiNode, filename string) bool { - if h.start != nil && filename <= (*h.start).GetFilename() { - return false + if h.start != nil { + if filename < h.start.GetFilename() { + return false + } else if filename == h.start.GetFilename() { + // A tree may have a lot of nodes with the same filename but different versions so that + // len(nodes) > batch_size. The cut nodes should be pushed into the result on repeated call + // with the same filename. + pos := slices.Index(id, h.start.GetNode()) + if pos == -1 || pos+1 >= len(id) { + return false + } + id = id[pos+1:] + } } *h.h = append(*h.h, heapInfo{id: id, filename: filename}) diff --git a/pkg/local_object_storage/pilorama/interface.go b/pkg/local_object_storage/pilorama/interface.go index e364b008b..e1f6cd8e7 100644 --- a/pkg/local_object_storage/pilorama/interface.go +++ b/pkg/local_object_storage/pilorama/interface.go @@ -85,11 +85,15 @@ const ( type Cursor struct { // Last traversed filename. filename string + + // Last traversed node. + node Node } -func NewCursor(filename string) *Cursor { +func NewCursor(filename string, node Node) *Cursor { return &Cursor{ filename: filename, + node: node, } } @@ -100,6 +104,13 @@ func (c *Cursor) GetFilename() string { return c.filename } +func (c *Cursor) GetNode() Node { + if c == nil { + return Node(0) + } + return c.node +} + // CIDDescriptor contains container ID and information about the node position // in the list of container nodes. type CIDDescriptor struct { From 39f549a7ab011c25b81f4bafa7c21f8a46cd5b26 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 10 Mar 2025 13:17:24 +0300 Subject: [PATCH 435/591] [#1642] tree: Intoduce a helper `LastChild` Signed-off-by: Airat Arifullin --- pkg/local_object_storage/pilorama/boltdb.go | 2 +- pkg/local_object_storage/pilorama/multinode.go | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 2ca6fdefa..fc7cdaabc 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -1155,7 +1155,7 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr } if len(res) != 0 { s := string(findAttr(res[len(res)-1].Meta, AttributeFilename)) - last = NewCursor(s, res[len(res)-1].Children[len(res[len(res)-1].Children)-1]) + last = NewCursor(s, res[len(res)-1].LastChild()) } return res, last, metaerr.Wrap(err) } diff --git a/pkg/local_object_storage/pilorama/multinode.go b/pkg/local_object_storage/pilorama/multinode.go index 106ba6ae9..36d347f10 100644 --- a/pkg/local_object_storage/pilorama/multinode.go +++ b/pkg/local_object_storage/pilorama/multinode.go @@ -25,6 +25,10 @@ func (r *MultiNodeInfo) Add(info NodeInfo) bool { return true } +func (r *MultiNodeInfo) LastChild() Node { + return r.Children[len(r.Children)-1] +} + func (n NodeInfo) ToMultiNode() MultiNodeInfo { return MultiNodeInfo{ Children: MultiNode{n.ID}, From a7ac30da9c47abe0e2fffa49de287e8849d570ed Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Wed, 12 Mar 2025 17:01:56 +0300 Subject: [PATCH 436/591] [#1642] tree: Refactor `getSortedSubTree` * Reuse `item` as result for `forest.TreeSortedByFilename` invocation. Signed-off-by: Airat Arifullin --- pkg/services/tree/service.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 98c5626bd..eeffec08b 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -463,14 +463,13 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid break } - nodes, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize) + var err error + item.values, item.last, err = forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), item.parent, item.last, batchSize) if err != nil { return err } - item.values = nodes - item.last = last - if len(nodes) == 0 { + if len(item.values) == 0 { stack = stack[:len(stack)-1] continue } From a49f0717b3fbba0ecf51ce55f49221474a8f8fb8 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 19 Mar 2025 09:22:29 +0300 Subject: [PATCH 437/591] [#1685] metabase: Cache frequently accessed singleton buckets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are some buckets we access almost always, to check whether an object is alive. In search we also iterate over lots of objects, and `tx.Bucket()` shows itself a lot in pprof. ``` goos: linux goarch: amd64 pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz │ 1 │ 2 │ │ sec/op │ sec/op vs base │ Select/string_equal-8 4.753m ± 6% 3.969m ± 14% -16.50% (p=0.000 n=10) Select/string_not_equal-8 4.247m ± 9% 3.486m ± 11% -17.93% (p=0.000 n=10) Select/common_prefix-8 4.163m ± 5% 3.323m ± 5% -20.18% (p=0.000 n=10) Select/unknown-8 3.557m ± 3% 3.064m ± 8% -13.85% (p=0.001 n=10) geomean 4.158m 3.445m -17.15% │ 1 │ 2 │ │ B/op │ B/op vs base │ Select/string_equal-8 2.250Mi ± 0% 1.907Mi ± 0% -15.24% (p=0.000 n=10) Select/string_not_equal-8 2.250Mi ± 0% 1.907Mi ± 0% -15.24% (p=0.000 n=10) Select/common_prefix-8 2.250Mi ± 0% 1.907Mi ± 0% -15.24% (p=0.000 n=10) Select/unknown-8 2.243Mi ± 0% 1.900Mi ± 0% -15.29% (p=0.000 n=10) geomean 2.248Mi 1.905Mi -15.26% │ 1 │ 2 │ │ allocs/op │ allocs/op vs base │ Select/string_equal-8 56.02k ± 0% 47.03k ± 0% -16.05% (p=0.000 n=10) Select/string_not_equal-8 56.02k ± 0% 47.03k ± 0% -16.05% (p=0.000 n=10) Select/common_prefix-8 56.02k ± 0% 47.03k ± 0% -16.05% (p=0.000 n=10) Select/unknown-8 55.03k ± 0% 46.04k ± 0% -16.34% (p=0.000 n=10) geomean 55.78k 46.78k -16.12% ``` Signed-off-by: Evgenii Stratonikov --- .../metabase/bucket_cache.go | 45 +++++++++++++++++++ pkg/local_object_storage/metabase/exists.go | 10 +++-- pkg/local_object_storage/metabase/lock.go | 6 ++- pkg/local_object_storage/metabase/select.go | 3 +- 4 files changed, 59 insertions(+), 5 deletions(-) create mode 100644 pkg/local_object_storage/metabase/bucket_cache.go diff --git a/pkg/local_object_storage/metabase/bucket_cache.go b/pkg/local_object_storage/metabase/bucket_cache.go new file mode 100644 index 000000000..b425450af --- /dev/null +++ b/pkg/local_object_storage/metabase/bucket_cache.go @@ -0,0 +1,45 @@ +package meta + +import ( + "go.etcd.io/bbolt" +) + +type bucketCache struct { + locked *bbolt.Bucket + graveyard *bbolt.Bucket + garbage *bbolt.Bucket +} + +func newBucketCache() *bucketCache { + return &bucketCache{} +} + +func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { + if bc == nil { + return tx.Bucket(bucketNameLocked) + } + return getBucket(&bc.locked, tx, bucketNameLocked) +} + +func getGraveyardBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { + if bc == nil { + return tx.Bucket(graveyardBucketName) + } + return getBucket(&bc.graveyard, tx, graveyardBucketName) +} + +func getGarbageBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { + if bc == nil { + return tx.Bucket(garbageBucketName) + } + return getBucket(&bc.garbage, tx, garbageBucketName) +} + +func getBucket(cache **bbolt.Bucket, tx *bbolt.Tx, name []byte) *bbolt.Bucket { + if *cache != nil { + return *cache + } + + *cache = tx.Bucket(name) + return *cache +} diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go index 962108a76..0b28da5c9 100644 --- a/pkg/local_object_storage/metabase/exists.go +++ b/pkg/local_object_storage/metabase/exists.go @@ -153,8 +153,12 @@ func (db *DB) exists(tx *bbolt.Tx, addr oid.Address, ecParent oid.Address, currE // - 2 if object is covered with tombstone; // - 3 if object is expired. func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) { + return objectStatusWithCache(nil, tx, addr, currEpoch) +} + +func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, error) { // locked object could not be removed/marked with GC/expired - if objectLocked(tx, addr.Container(), addr.Object()) { + if objectLockedWithCache(bc, tx, addr.Container(), addr.Object()) { return 0, nil } @@ -167,8 +171,8 @@ func objectStatus(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (uint8, erro return 3, nil } - graveyardBkt := tx.Bucket(graveyardBucketName) - garbageBkt := tx.Bucket(garbageBucketName) + graveyardBkt := getGraveyardBucket(bc, tx) + garbageBkt := getGarbageBucket(bc, tx) addrKey := addressKey(addr, make([]byte, addressKeySize)) return inGraveyardWithKey(addrKey, graveyardBkt, garbageBkt), nil } diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go index aa1478423..f73c2b4f6 100644 --- a/pkg/local_object_storage/metabase/lock.go +++ b/pkg/local_object_storage/metabase/lock.go @@ -163,7 +163,11 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) { // checks if specified object is locked in the specified container. func objectLocked(tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { - bucketLocked := tx.Bucket(bucketNameLocked) + return objectLockedWithCache(nil, tx, idCnr, idObj) +} + +func objectLockedWithCache(bc *bucketCache, tx *bbolt.Tx, idCnr cid.ID, idObj oid.ID) bool { + bucketLocked := getLockedBucket(bc, tx) if bucketLocked != nil { key := make([]byte, cidSize) idCnr.Encode(key) diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index 4a3b22b55..a95384753 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -131,6 +131,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters res := make([]oid.Address, 0, len(mAddr)) + bc := newBucketCache() for a, ind := range mAddr { if ind != expLen { continue // ignore objects with unmatched fast filters @@ -145,7 +146,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters var addr oid.Address addr.SetContainer(cnr) addr.SetObject(id) - st, err := objectStatus(tx, addr, currEpoch) + st, err := objectStatusWithCache(bc, tx, addr, currEpoch) if err != nil { return nil, err } From af5b3575d0a2a63f6ec87fb58feb28b30419110a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 20 Mar 2025 14:41:13 +0300 Subject: [PATCH 438/591] [#1690] qos: Do not export zero metrics counters Signed-off-by: Dmitrii Stepanov --- internal/qos/limiter.go | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go index 8f00791c5..e92cef652 100644 --- a/internal/qos/limiter.go +++ b/internal/qos/limiter.go @@ -211,13 +211,26 @@ func (n *mClockLimiter) startMetricsCollect() { continue } metrics := n.metrics.Load().metrics - for tag, s := range n.readStats { - metrics.SetOperationTagCounters(shardID, "read", tag, s.pending.Load(), s.inProgress.Load(), s.completed.Load(), s.resourceExhausted.Load()) - } - for tag, s := range n.writeStats { - metrics.SetOperationTagCounters(shardID, "write", tag, s.pending.Load(), s.inProgress.Load(), s.completed.Load(), s.resourceExhausted.Load()) - } + exportMetrics(metrics, n.readStats, shardID, "read") + exportMetrics(metrics, n.writeStats, shardID, "write") } } }() } + +func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation string) { + var pending uint64 + var inProgress uint64 + var completed uint64 + var resExh uint64 + for tag, s := range stats { + pending = s.pending.Load() + inProgress = s.inProgress.Load() + completed = s.completed.Load() + resExh = s.resourceExhausted.Load() + if pending == 0 && inProgress == 0 && completed == 0 && resExh == 0 { + continue + } + metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh) + } +} From 21bed3362c03730160b5f98fa3dc06c3d4d78cf2 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 20 Mar 2025 10:14:22 +0300 Subject: [PATCH 439/591] [#1685] metabase: Cache expired bucket MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ``` goos: linux goarch: amd64 pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz │ master │ expired │ │ sec/op │ sec/op vs base │ Select/string_equal-8 4.007m ± 10% 3.529m ± 11% -11.94% (p=0.000 n=10) Select/string_not_equal-8 3.834m ± 12% 3.440m ± 7% -10.29% (p=0.029 n=10) Select/common_prefix-8 3.470m ± 9% 3.240m ± 6% ~ (p=0.105 n=10) Select/unknown-8 3.156m ± 3% 3.198m ± 6% ~ (p=0.631 n=10) geomean 3.602m 3.349m -7.03% │ master │ expired │ │ B/op │ B/op vs base │ Select/string_equal-8 1.907Mi ± 0% 1.885Mi ± 0% -1.18% (p=0.000 n=10) Select/string_not_equal-8 1.907Mi ± 0% 1.885Mi ± 0% -1.18% (p=0.000 n=10) Select/common_prefix-8 1.907Mi ± 0% 1.885Mi ± 0% -1.18% (p=0.000 n=10) Select/unknown-8 1.900Mi ± 0% 1.877Mi ± 0% -1.18% (p=0.000 n=10) geomean 1.905Mi 1.883Mi -1.18% │ master │ expired │ │ allocs/op │ allocs/op vs base │ Select/string_equal-8 47.03k ± 0% 46.04k ± 0% -2.12% (p=0.000 n=10) Select/string_not_equal-8 47.03k ± 0% 46.04k ± 0% -2.12% (p=0.000 n=10) Select/common_prefix-8 47.03k ± 0% 46.04k ± 0% -2.12% (p=0.000 n=10) Select/unknown-8 46.04k ± 0% 45.05k ± 0% -2.16% (p=0.000 n=10) geomean 46.78k 45.79k -2.13% ``` Change-Id: I9c7a5e1f5c8b9eb3f25a563fd74c6ad2a9d1b92e Signed-off-by: Evgenii Stratonikov --- .../metabase/bucket_cache.go | 25 ++++++++++++++++++- pkg/local_object_storage/metabase/exists.go | 2 +- pkg/local_object_storage/metabase/expired.go | 8 +++--- 3 files changed, 30 insertions(+), 5 deletions(-) diff --git a/pkg/local_object_storage/metabase/bucket_cache.go b/pkg/local_object_storage/metabase/bucket_cache.go index b425450af..14c164afc 100644 --- a/pkg/local_object_storage/metabase/bucket_cache.go +++ b/pkg/local_object_storage/metabase/bucket_cache.go @@ -1,6 +1,7 @@ package meta import ( + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.etcd.io/bbolt" ) @@ -8,10 +9,11 @@ type bucketCache struct { locked *bbolt.Bucket graveyard *bbolt.Bucket garbage *bbolt.Bucket + expired map[cid.ID]*bbolt.Bucket } func newBucketCache() *bucketCache { - return &bucketCache{} + return &bucketCache{expired: make(map[cid.ID]*bbolt.Bucket)} } func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { @@ -43,3 +45,24 @@ func getBucket(cache **bbolt.Bucket, tx *bbolt.Tx, name []byte) *bbolt.Bucket { *cache = tx.Bucket(name) return *cache } + +func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { + if bc == nil { + bucketName := make([]byte, bucketKeySize) + bucketName = objectToExpirationEpochBucketName(cnr, bucketName) + return tx.Bucket(bucketName) + } + return getMappedBucket(bc.expired, tx, objectToExpirationEpochBucketName, cnr) +} + +func getMappedBucket(m map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket { + value, ok := m[cnr] + if ok { + return value + } + + bucketName := make([]byte, bucketKeySize) + bucketName = nameFunc(cnr, bucketName) + m[cnr] = getBucket(&value, tx, bucketName) + return value +} diff --git a/pkg/local_object_storage/metabase/exists.go b/pkg/local_object_storage/metabase/exists.go index 0b28da5c9..7bd6f90a6 100644 --- a/pkg/local_object_storage/metabase/exists.go +++ b/pkg/local_object_storage/metabase/exists.go @@ -162,7 +162,7 @@ func objectStatusWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, curr return 0, nil } - expired, err := isExpired(tx, addr, currEpoch) + expired, err := isExpiredWithCache(bc, tx, addr, currEpoch) if err != nil { return 0, err } diff --git a/pkg/local_object_storage/metabase/expired.go b/pkg/local_object_storage/metabase/expired.go index 68144d8b1..a1351cb6f 100644 --- a/pkg/local_object_storage/metabase/expired.go +++ b/pkg/local_object_storage/metabase/expired.go @@ -74,9 +74,11 @@ func (db *DB) FilterExpired(ctx context.Context, epoch uint64, addresses []oid.A } func isExpired(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) { - bucketName := make([]byte, bucketKeySize) - bucketName = objectToExpirationEpochBucketName(addr.Container(), bucketName) - b := tx.Bucket(bucketName) + return isExpiredWithCache(nil, tx, addr, currEpoch) +} + +func isExpiredWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (bool, error) { + b := getExpiredBucket(bc, tx, addr.Container()) if b == nil { return false, nil } From eb9df85b989f861f50891fc21732ce56e56dabc3 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 20 Mar 2025 10:23:26 +0300 Subject: [PATCH 440/591] [#1685] metabase: Cache primary bucket MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ``` goos: linux goarch: amd64 pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz │ expired │ primary │ │ sec/op │ sec/op vs base │ Select/string_equal-8 3.529m ± 11% 3.689m ± 7% +4.55% (p=0.023 n=10) Select/string_not_equal-8 3.440m ± 7% 3.543m ± 13% ~ (p=0.190 n=10) Select/common_prefix-8 3.240m ± 6% 3.050m ± 5% -5.85% (p=0.005 n=10) Select/unknown-8 3.198m ± 6% 2.928m ± 8% -8.44% (p=0.003 n=10) geomean 3.349m 3.287m -1.84% │ expired │ primary │ │ B/op │ B/op vs base │ Select/string_equal-8 1.885Mi ± 0% 1.786Mi ± 0% -5.23% (p=0.000 n=10) Select/string_not_equal-8 1.885Mi ± 0% 1.786Mi ± 0% -5.23% (p=0.000 n=10) Select/common_prefix-8 1.885Mi ± 0% 1.786Mi ± 0% -5.23% (p=0.000 n=10) Select/unknown-8 1.877Mi ± 0% 1.779Mi ± 0% -5.26% (p=0.000 n=10) geomean 1.883Mi 1.784Mi -5.24% │ expired │ primary │ │ allocs/op │ allocs/op vs base │ Select/string_equal-8 46.04k ± 0% 43.04k ± 0% -6.50% (p=0.000 n=10) Select/string_not_equal-8 46.04k ± 0% 43.04k ± 0% -6.50% (p=0.000 n=10) Select/common_prefix-8 46.04k ± 0% 43.04k ± 0% -6.50% (p=0.000 n=10) Select/unknown-8 45.05k ± 0% 42.05k ± 0% -6.65% (p=0.000 n=10) geomean 45.79k 42.79k -6.54% ``` Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/metabase/bucket_cache.go | 15 ++++++++++++++- pkg/local_object_storage/metabase/get.go | 15 ++++++++++----- pkg/local_object_storage/metabase/select.go | 12 ++++++------ 3 files changed, 30 insertions(+), 12 deletions(-) diff --git a/pkg/local_object_storage/metabase/bucket_cache.go b/pkg/local_object_storage/metabase/bucket_cache.go index 14c164afc..69553d55c 100644 --- a/pkg/local_object_storage/metabase/bucket_cache.go +++ b/pkg/local_object_storage/metabase/bucket_cache.go @@ -10,10 +10,14 @@ type bucketCache struct { graveyard *bbolt.Bucket garbage *bbolt.Bucket expired map[cid.ID]*bbolt.Bucket + primary map[cid.ID]*bbolt.Bucket } func newBucketCache() *bucketCache { - return &bucketCache{expired: make(map[cid.ID]*bbolt.Bucket)} + return &bucketCache{ + expired: make(map[cid.ID]*bbolt.Bucket), + primary: make(map[cid.ID]*bbolt.Bucket), + } } func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { @@ -55,6 +59,15 @@ func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { return getMappedBucket(bc.expired, tx, objectToExpirationEpochBucketName, cnr) } +func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { + if bc == nil { + bucketName := make([]byte, bucketKeySize) + bucketName = primaryBucketName(cnr, bucketName) + return tx.Bucket(bucketName) + } + return getMappedBucket(bc.primary, tx, primaryBucketName, cnr) +} + func getMappedBucket(m map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket { value, ok := m[cnr] if ok { diff --git a/pkg/local_object_storage/metabase/get.go b/pkg/local_object_storage/metabase/get.go index 615add1af..821810c09 100644 --- a/pkg/local_object_storage/metabase/get.go +++ b/pkg/local_object_storage/metabase/get.go @@ -88,8 +88,12 @@ func (db *DB) Get(ctx context.Context, prm GetPrm) (res GetRes, err error) { } func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) { + return db.getWithCache(nil, tx, addr, key, checkStatus, raw, currEpoch) +} + +func (db *DB) getWithCache(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw bool, currEpoch uint64) (*objectSDK.Object, error) { if checkStatus { - st, err := objectStatus(tx, addr, currEpoch) + st, err := objectStatusWithCache(bc, tx, addr, currEpoch) if err != nil { return nil, err } @@ -109,12 +113,13 @@ func (db *DB) get(tx *bbolt.Tx, addr oid.Address, key []byte, checkStatus, raw b bucketName := make([]byte, bucketKeySize) // check in primary index - data := getFromBucket(tx, primaryBucketName(cnr, bucketName), key) - if len(data) != 0 { - return obj, obj.Unmarshal(data) + if b := getPrimaryBucket(bc, tx, cnr); b != nil { + if data := b.Get(key); len(data) != 0 { + return obj, obj.Unmarshal(data) + } } - data = getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key) + data := getFromBucket(tx, ecInfoBucketName(cnr, bucketName), key) if len(data) != 0 { return nil, getECInfoError(tx, cnr, data) } diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index a95384753..60da50671 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -154,7 +154,7 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters continue // ignore removed objects } - addr, match := db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch) + addr, match := db.matchSlowFilters(bc, tx, addr, group.slowFilters, currEpoch) if !match { continue // ignore objects with unmatched slow filters } @@ -452,13 +452,13 @@ func (db *DB) selectObjectID( } // matchSlowFilters return true if object header is matched by all slow filters. -func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) { +func (db *DB) matchSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) { result := addr if len(f) == 0 { return result, true } - obj, isECChunk, err := db.getObjectForSlowFilters(tx, addr, currEpoch) + obj, isECChunk, err := db.getObjectForSlowFilters(bc, tx, addr, currEpoch) if err != nil { return result, false } @@ -516,9 +516,9 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc return result, true } -func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) { +func (db *DB) getObjectForSlowFilters(bc *bucketCache, tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) { buf := make([]byte, addressKeySize) - obj, err := db.get(tx, addr, buf, false, false, currEpoch) + obj, err := db.getWithCache(bc, tx, addr, buf, false, false, currEpoch) if err != nil { var ecInfoError *objectSDK.ECInfoError if errors.As(err, &ecInfoError) { @@ -528,7 +528,7 @@ func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch continue } addr.SetObject(objID) - obj, err = db.get(tx, addr, buf, true, false, currEpoch) + obj, err = db.getWithCache(bc, tx, addr, buf, true, false, currEpoch) if err == nil { return obj, true, nil } From e8801dbf49c2407bb7cbd25d003d7a9b95fc7c3b Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 20 Mar 2025 15:28:14 +0300 Subject: [PATCH 441/591] [#1691] metabase: Move cheaper conditions to the front in ListWithCursor() `objectLocked` call is expensive, it does IO. We may omit it if object is not expired. Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/metabase/list.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index eaef3b9ba..0b6cdf702 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -251,7 +251,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket } expEpoch, hasExpEpoch := hasExpirationEpoch(&o) - if !objectLocked(bkt.Tx(), cnt, obj) && hasExpEpoch && expEpoch < currEpoch { + if hasExpEpoch && expEpoch < currEpoch && !objectLocked(bkt.Tx(), cnt, obj) { continue } From 45b779615104ed790f65f4776fb59cc6c163d0f6 Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Tue, 11 Mar 2025 16:57:07 +0300 Subject: [PATCH 442/591] [#1689] ci: Reimplement CI tasks in Jenkinsfile This commit introduces Jenkins pipeline that duplicates the features of existing Forgejo Actions workflows. Change-Id: I657a6c27373a1ed4736ae27b4fb660e0ac86012d Signed-off-by: Vitaliy Potyarkin --- .ci/Jenkinsfile | 81 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 .ci/Jenkinsfile diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile new file mode 100644 index 000000000..e21ce61c5 --- /dev/null +++ b/.ci/Jenkinsfile @@ -0,0 +1,81 @@ +def golang = ['1.23', '1.24'] +def golangDefault = "golang:${golang.last()}" + +async { + + for (version in golang) { + def go = version + + task("test/go${go}") { + container("golang:${go}") { + sh 'make test' + } + } + + task("build/go${go}") { + container("golang:${go}") { + for (app in ['cli', 'node', 'ir', 'adm', 'lens']) { + sh """ + make bin/frostfs-${app} + bin/frostfs-${app} --version + """ + } + } + } + } + + task('test/race') { + container(golangDefault) { + sh 'make test GOFLAGS="-count=1 -race"' + } + } + + task('lint') { + container(golangDefault) { + sh 'make lint-install lint' + } + } + + task('staticcheck') { + container(golangDefault) { + sh 'make staticcheck-install staticcheck-run' + } + } + + task('gopls') { + container(golangDefault) { + sh 'make gopls-install gopls-run' + } + } + + task('gofumpt') { + container(golangDefault) { + sh ''' + make fumpt-install + make fumpt + git diff --exit-code --quiet + ''' + } + } + + task('vulncheck') { + container(golangDefault) { + sh ''' + go install golang.org/x/vuln/cmd/govulncheck@latest + govulncheck ./... + ''' + } + } + + task('pre-commit') { + sh ''' + apt update + apt install -y --no-install-recommends pre-commit + ''' // TODO: Make an OCI image for pre-commit + golang? Unpack golang tarball with a library function? + withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) { + sh 'pre-commit run --color=always --hook-stage=manual --all-files' + } + } +} + +// TODO: dco check From affab255120dd54fc581a26fbdcac2cf9a82346a Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 13 Mar 2025 10:26:08 +0300 Subject: [PATCH 443/591] Makefile: Add Gerrit-related targets This commit adds helper targets to easily setup an existing repo for work with Gerrit. Change-Id: I0696eb8ea84cc16a9482be6a2fb0382fe624bb96 Signed-off-by: Evgenii Stratonikov --- Makefile | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/Makefile b/Makefile index cd80fc72e..46168719d 100755 --- a/Makefile +++ b/Makefile @@ -186,6 +186,20 @@ test: @echo "⇒ Running go test" @GOFLAGS="$(GOFLAGS)" go test ./... +# Install Gerrit commit-msg hook +review-install: GIT_HOOK_DIR := $(shell git rev-parse --git-dir)/hooks +review-install: + @git config remote.review.url \ + || git remote add review ssh://review.frostfs.info:2222/TrueCloudLab/frostfs-node + @mkdir -p $(GIT_HOOK_DIR)/ + @curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg + @chmod +x $(GIT_HOOK_DIR)/commit-msg + +# Create a PR in Gerrit +review: BRANCH ?= master +review: + @git push review HEAD:refs/for/$(BRANCH) + # Run pre-commit pre-commit-run: @pre-commit run -a --hook-stage manual From 7df2912a83f8c4b02e89d6311a10f0bdb0a6eddc Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 20 Mar 2025 17:26:35 +0300 Subject: [PATCH 444/591] [#1689] Makefile: Create prepare-commit-msg hook too `commit-msg` is ignored when `--no-verify` option is used, so there is no way to ignore `pre-commit` while retaining `commit-msg` hook. Ignoring pre-commit is useful, though, so we might add Change-Id in `prepare-commit-msg` hook instead. It accepts more parameters, but the first one is a file with the commit message, so we may reuse `commit-msg` hook. Change-Id: I4edb79810bbe38a5dcf7f4f07535f34c6bda0da3 Signed-off-by: Evgenii Stratonikov --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index 46168719d..210ea1dc1 100755 --- a/Makefile +++ b/Makefile @@ -194,6 +194,8 @@ review-install: @mkdir -p $(GIT_HOOK_DIR)/ @curl -Lo $(GIT_HOOK_DIR)/commit-msg https://review.frostfs.info/tools/hooks/commit-msg @chmod +x $(GIT_HOOK_DIR)/commit-msg + @echo -e '#!/bin/sh\n"$$(git rev-parse --git-path hooks)"/commit-msg "$$1"' >$(GIT_HOOK_DIR)/prepare-commit-msg + @chmod +x $(GIT_HOOK_DIR)/prepare-commit-msg # Create a PR in Gerrit review: BRANCH ?= master From 60cea8c714fcbad23d86143c9c1949b09d5444e6 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 20 Mar 2025 16:22:22 +0300 Subject: [PATCH 445/591] [#1692] metabase/test: Fix end of iteration error check This is not good: ``` BenchmarkListWithCursor/1_item-8 --- FAIL: BenchmarkListWithCursor/1_item-8 list_test.go:63: error: end of object listing ``` Change-Id: I61b70937ce30fefaf16ebeb0cdb51bdd39096061 Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/metabase/list_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index 817b22010..02985991c 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -59,7 +59,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) { for range b.N { res, err := db.ListWithCursor(context.Background(), prm) if err != nil { - if errors.Is(err, meta.ErrEndOfListing) { + if !errors.Is(err, meta.ErrEndOfListing) { b.Fatalf("error: %v", err) } prm.SetCursor(nil) From 3f4717a37fbb27ba59cc63065a83bfc18eac7a8b Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 20 Mar 2025 16:35:01 +0300 Subject: [PATCH 446/591] [#1692] metabase: Do not allocate map in cache unless needed Change-Id: I8b1015a8c7c3df4153a08fdb788117d9f0d6c333 Signed-off-by: Evgenii Stratonikov --- .../metabase/bucket_cache.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/pkg/local_object_storage/metabase/bucket_cache.go b/pkg/local_object_storage/metabase/bucket_cache.go index 69553d55c..de1479e6f 100644 --- a/pkg/local_object_storage/metabase/bucket_cache.go +++ b/pkg/local_object_storage/metabase/bucket_cache.go @@ -14,10 +14,7 @@ type bucketCache struct { } func newBucketCache() *bucketCache { - return &bucketCache{ - expired: make(map[cid.ID]*bbolt.Bucket), - primary: make(map[cid.ID]*bbolt.Bucket), - } + return &bucketCache{} } func getLockedBucket(bc *bucketCache, tx *bbolt.Tx) *bbolt.Bucket { @@ -56,7 +53,7 @@ func getExpiredBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { bucketName = objectToExpirationEpochBucketName(cnr, bucketName) return tx.Bucket(bucketName) } - return getMappedBucket(bc.expired, tx, objectToExpirationEpochBucketName, cnr) + return getMappedBucket(&bc.expired, tx, objectToExpirationEpochBucketName, cnr) } func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { @@ -65,17 +62,21 @@ func getPrimaryBucket(bc *bucketCache, tx *bbolt.Tx, cnr cid.ID) *bbolt.Bucket { bucketName = primaryBucketName(cnr, bucketName) return tx.Bucket(bucketName) } - return getMappedBucket(bc.primary, tx, primaryBucketName, cnr) + return getMappedBucket(&bc.primary, tx, primaryBucketName, cnr) } -func getMappedBucket(m map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket { - value, ok := m[cnr] +func getMappedBucket(m *map[cid.ID]*bbolt.Bucket, tx *bbolt.Tx, nameFunc func(cid.ID, []byte) []byte, cnr cid.ID) *bbolt.Bucket { + value, ok := (*m)[cnr] if ok { return value } + if *m == nil { + *m = make(map[cid.ID]*bbolt.Bucket, 1) + } + bucketName := make([]byte, bucketKeySize) bucketName = nameFunc(cnr, bucketName) - m[cnr] = getBucket(&value, tx, bucketName) + (*m)[cnr] = getBucket(&value, tx, bucketName) return value } From 049a650b89554a1f4f879048edb3555be6b387ec Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Tue, 28 Jan 2025 13:29:10 +0300 Subject: [PATCH 447/591] [#1619] logger: Simplify `logger` config reloading Change-Id: Ide892b250304b8cdb6c279f5f728c3b35f05df54 Signed-off-by: Anton Nikiforov --- cmd/frostfs-ir/config.go | 6 ++- cmd/frostfs-ir/main.go | 2 +- cmd/frostfs-node/config.go | 41 ++++++++++---------- pkg/services/object/common/writer/ec_test.go | 2 +- pkg/util/logger/logger.go | 40 +++---------------- 5 files changed, 31 insertions(+), 60 deletions(-) diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go index 09af08525..19b7f05d6 100644 --- a/cmd/frostfs-ir/config.go +++ b/cmd/frostfs-ir/config.go @@ -9,6 +9,7 @@ import ( configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "github.com/spf13/viper" "go.uber.org/zap" ) @@ -38,13 +39,14 @@ func reloadConfig() error { } cmode.Store(cfg.GetBool("node.kludge_compatibility_mode")) audit.Store(cfg.GetBool("audit.enabled")) + var logPrm logger.Prm err = logPrm.SetLevelString(cfg.GetString("logger.level")) if err != nil { return err } - logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp") + log.Reload(logPrm) - return logPrm.Reload() + return nil } func watchForSignal(ctx context.Context, cancel func()) { diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index ade64ba84..114d8e4de 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -31,7 +31,6 @@ const ( var ( wg = new(sync.WaitGroup) intErr = make(chan error) // internal inner ring errors - logPrm = new(logger.Prm) innerRing *innerring.Server pprofCmp *pprofComponent metricsCmp *httpComponent @@ -70,6 +69,7 @@ func main() { metrics := irMetrics.NewInnerRingMetrics() + var logPrm logger.Prm err = logPrm.SetLevelString( cfg.GetString("logger.level"), ) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 2531e9173..c3c687763 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -473,7 +473,6 @@ type shared struct { // dynamicConfiguration stores parameters of the // components that supports runtime reconfigurations. type dynamicConfiguration struct { - logger *logger.Prm pprof *httpComponent metrics *httpComponent } @@ -714,7 +713,8 @@ func initCfg(appCfg *config.Config) *cfg { netState.metrics = c.metricsCollector - logPrm := c.loggerPrm() + logPrm, err := c.loggerPrm() + fatalOnErr(err) logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() log, err := logger.NewLogger(logPrm) fatalOnErr(err) @@ -1076,26 +1076,22 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID return sh } -func (c *cfg) loggerPrm() *logger.Prm { - // check if it has been inited before - if c.dynamicConfiguration.logger == nil { - c.dynamicConfiguration.logger = new(logger.Prm) - } - +func (c *cfg) loggerPrm() (logger.Prm, error) { + var prm logger.Prm // (re)init read configuration - err := c.dynamicConfiguration.logger.SetLevelString(c.LoggerCfg.level) + err := prm.SetLevelString(c.LoggerCfg.level) if err != nil { // not expected since validation should be performed before - panic("incorrect log level format: " + c.LoggerCfg.level) + return logger.Prm{}, errors.New("incorrect log level format: " + c.LoggerCfg.level) } - err = c.dynamicConfiguration.logger.SetDestination(c.LoggerCfg.destination) + err = prm.SetDestination(c.LoggerCfg.destination) if err != nil { // not expected since validation should be performed before - panic("incorrect log destination format: " + c.LoggerCfg.destination) + return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination) } - c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp + prm.PrependTimestamp = c.LoggerCfg.timestamp - return c.dynamicConfiguration.logger + return prm, nil } func (c *cfg) LocalAddress() network.AddressGroup { @@ -1335,11 +1331,7 @@ func (c *cfg) reloadConfig(ctx context.Context) { // all the components are expected to support // Logger's dynamic reconfiguration approach - // Logger - - logPrm := c.loggerPrm() - - components := c.getComponents(ctx, logPrm) + components := c.getComponents(ctx) // Object c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime) @@ -1377,10 +1369,17 @@ func (c *cfg) reloadConfig(ctx context.Context) { c.log.Info(ctx, logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully) } -func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp { +func (c *cfg) getComponents(ctx context.Context) []dCmp { var components []dCmp - components = append(components, dCmp{"logger", logPrm.Reload}) + components = append(components, dCmp{"logger", func() error { + prm, err := c.loggerPrm() + if err != nil { + return err + } + c.log.Reload(prm) + return nil + }}) components = append(components, dCmp{"runtime", func() error { setRuntimeParameters(ctx, c) return nil diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go index 2458e352f..d5eeddf21 100644 --- a/pkg/services/object/common/writer/ec_test.go +++ b/pkg/services/object/common/writer/ec_test.go @@ -130,7 +130,7 @@ func TestECWriter(t *testing.T) { nodeKey, err := keys.NewPrivateKey() require.NoError(t, err) - log, err := logger.NewLogger(nil) + log, err := logger.NewLogger(logger.Prm{}) require.NoError(t, err) var n nmKeys diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index 2eb5e5538..952a6f2dc 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -23,16 +23,8 @@ type Logger struct { // Parameters that have been connected to the Logger support its // configuration changing. // -// Passing Prm after a successful connection via the NewLogger, connects -// the Prm to a new instance of the Logger. -// -// See also Reload, SetLevelString. +// See also Logger.Reload, SetLevelString. type Prm struct { - // link to the created Logger - // instance; used for a runtime - // reconfiguration - _log *Logger - // support runtime rereading level zapcore.Level @@ -73,22 +65,6 @@ func (p *Prm) SetDestination(d string) error { return nil } -// Reload reloads configuration of a connected instance of the Logger. -// Returns ErrLoggerNotConnected if no connection has been performed. -// Returns any reconfiguration error from the Logger directly. -func (p Prm) Reload() error { - if p._log == nil { - // incorrect logger usage - panic("parameters are not connected to any Logger") - } - - return p._log.reload(p) -} - -func defaultPrm() *Prm { - return new(Prm) -} - // NewLogger constructs a new zap logger instance. Constructing with nil // parameters is safe: default values will be used then. // Passing non-nil parameters after a successful creation (non-error) allows @@ -100,10 +76,7 @@ func defaultPrm() *Prm { // - ISO8601 time encoding. // // Logger records a stack trace for all messages at or above fatal level. -func NewLogger(prm *Prm) (*Logger, error) { - if prm == nil { - prm = defaultPrm() - } +func NewLogger(prm Prm) (*Logger, error) { switch prm.dest { case DestinationUndefined, DestinationStdout: return newConsoleLogger(prm) @@ -114,7 +87,7 @@ func NewLogger(prm *Prm) (*Logger, error) { } } -func newConsoleLogger(prm *Prm) (*Logger, error) { +func newConsoleLogger(prm Prm) (*Logger, error) { lvl := zap.NewAtomicLevelAt(prm.level) c := zap.NewProductionConfig() @@ -139,12 +112,11 @@ func newConsoleLogger(prm *Prm) (*Logger, error) { } l := &Logger{z: lZap, lvl: lvl} - prm._log = l return l, nil } -func newJournaldLogger(prm *Prm) (*Logger, error) { +func newJournaldLogger(prm Prm) (*Logger, error) { lvl := zap.NewAtomicLevelAt(prm.level) c := zap.NewProductionConfig() @@ -181,14 +153,12 @@ func newJournaldLogger(prm *Prm) (*Logger, error) { lZap := zap.New(samplingCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1)) l := &Logger{z: lZap, lvl: lvl} - prm._log = l return l, nil } -func (l *Logger) reload(prm Prm) error { +func (l *Logger) Reload(prm Prm) { l.lvl.SetLevel(prm.level) - return nil } func (l *Logger) WithOptions(options ...zap.Option) { From eea46a599d5e8c07d354900152ba62584b9c8d44 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 21 Mar 2025 11:20:53 +0300 Subject: [PATCH 448/591] [#1695] qos: Add treesync tag Tree sync is too much different from GC and rebuild to use the same tag for GC and tree sync. Change-Id: Ib44d5fa9a88daff507d759d0b0410cc9272e236f Signed-off-by: Dmitrii Stepanov --- internal/qos/grpc.go | 4 ++-- internal/qos/limiter.go | 2 +- internal/qos/stats.go | 1 + internal/qos/tags.go | 7 +++++++ internal/qos/validate.go | 1 + pkg/services/tree/service.go | 2 +- 6 files changed, 13 insertions(+), 4 deletions(-) diff --git a/internal/qos/grpc.go b/internal/qos/grpc.go index 534a1f74b..58cd9e52c 100644 --- a/internal/qos/grpc.go +++ b/internal/qos/grpc.go @@ -26,7 +26,7 @@ func NewAdjustOutgoingIOTagUnaryClientInterceptor() grpc.UnaryClientInterceptor if err != nil { tag = IOTagClient } - if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache { + if tag.IsLocal() { tag = IOTagInternal } ctx = tagging.ContextWithIOTag(ctx, tag.String()) @@ -44,7 +44,7 @@ func NewAdjustOutgoingIOTagStreamClientInterceptor() grpc.StreamClientIntercepto if err != nil { tag = IOTagClient } - if tag == IOTagBackground || tag == IOTagPolicer || tag == IOTagWritecache { + if tag.IsLocal() { tag = IOTagInternal } ctx = tagging.ContextWithIOTag(ctx, tag.String()) diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go index e92cef652..82f9917a5 100644 --- a/internal/qos/limiter.go +++ b/internal/qos/limiter.go @@ -74,7 +74,7 @@ func createScheduler(config limits.OpConfig) (scheduler, error) { func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo { result := make(map[string]scheduling.TagInfo) - for _, tag := range []IOTag{IOTagClient, IOTagBackground, IOTagInternal, IOTagPolicer, IOTagWritecache} { + for _, tag := range []IOTag{IOTagClient, IOTagBackground, IOTagInternal, IOTagPolicer, IOTagWritecache, IOTagTreeSync} { result[tag.String()] = scheduling.TagInfo{ Share: defaultShare, } diff --git a/internal/qos/stats.go b/internal/qos/stats.go index f077f552b..aa4d4caf9 100644 --- a/internal/qos/stats.go +++ b/internal/qos/stats.go @@ -9,6 +9,7 @@ var statTags = map[string]struct{}{ IOTagPolicer.String(): {}, IOTagWritecache.String(): {}, IOTagCritical.String(): {}, + IOTagTreeSync.String(): {}, unknownStatsTag: {}, } diff --git a/internal/qos/tags.go b/internal/qos/tags.go index 9db45f190..2781dec76 100644 --- a/internal/qos/tags.go +++ b/internal/qos/tags.go @@ -13,6 +13,7 @@ const ( IOTagClient IOTag = "client" IOTagInternal IOTag = "internal" IOTagBackground IOTag = "background" + IOTagTreeSync IOTag = "treesync" IOTagWritecache IOTag = "writecache" IOTagPolicer IOTag = "policer" IOTagCritical IOTag = "critical" @@ -34,6 +35,8 @@ func FromRawString(s string) (IOTag, error) { return IOTagWritecache, nil case string(IOTagPolicer): return IOTagPolicer, nil + case string(IOTagTreeSync): + return IOTagTreeSync, nil default: return ioTagUnknown, fmt.Errorf("unknown tag %s", s) } @@ -50,3 +53,7 @@ func IOTagFromContext(ctx context.Context) string { } return tag } + +func (t IOTag) IsLocal() bool { + return t == IOTagBackground || t == IOTagPolicer || t == IOTagWritecache || t == IOTagTreeSync +} diff --git a/internal/qos/validate.go b/internal/qos/validate.go index 3fa4ebbd1..75bf4f4b9 100644 --- a/internal/qos/validate.go +++ b/internal/qos/validate.go @@ -47,6 +47,7 @@ func validateTags(configTags []limits.IOTagConfig) error { IOTagBackground: {}, IOTagWritecache: {}, IOTagPolicer: {}, + IOTagTreeSync: {}, } for _, t := range configTags { tag, err := FromRawString(t.Tag) diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index eeffec08b..b9bb96bab 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -85,7 +85,7 @@ func New(opts ...Option) *Service { // Start starts the service. func (s *Service) Start(ctx context.Context) { - ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) + ctx = tagging.ContextWithIOTag(ctx, qos.IOTagTreeSync.String()) go s.replicateLoop(ctx) go s.syncLoop(ctx) From 5385f9994fd4decefbbdd82a85aecedb55671872 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 21 Mar 2025 11:59:25 +0300 Subject: [PATCH 449/591] [#1695] mod: Bump frostfs-observability version Change-Id: Id362b71f743ff70c8cd374030c9fa67e2566022f Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-cli/internal/client/sdk.go | 2 +- cmd/frostfs-cli/modules/tree/client.go | 2 +- go.mod | 2 +- go.sum | 4 ++-- pkg/network/cache/multi.go | 2 +- pkg/services/tree/cache.go | 2 +- pkg/services/tree/sync.go | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/frostfs-cli/internal/client/sdk.go b/cmd/frostfs-cli/internal/client/sdk.go index 2d9c45cbd..1eadfa2e1 100644 --- a/cmd/frostfs-cli/internal/client/sdk.go +++ b/cmd/frostfs-cli/internal/client/sdk.go @@ -56,7 +56,7 @@ func GetSDKClient(ctx context.Context, cmd *cobra.Command, key *ecdsa.PrivateKey prmDial := client.PrmDial{ Endpoint: addr.URIAddr(), GRPCDialOptions: []grpc.DialOption{ - grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInteceptor()), + grpc.WithChainUnaryInterceptor(tracing.NewUnaryClientInterceptor()), grpc.WithChainStreamInterceptor(tracing.NewStreamClientInterceptor()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), }, diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go index 933378df6..421b96ccd 100644 --- a/cmd/frostfs-cli/modules/tree/client.go +++ b/cmd/frostfs-cli/modules/tree/client.go @@ -33,7 +33,7 @@ func _client() (tree.TreeServiceClient, error) { opts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( - tracing.NewUnaryClientInteceptor(), + tracing.NewUnaryClientInterceptor(), ), grpc.WithChainStreamInterceptor( tracing.NewStreamClientInterceptor(), diff --git a/go.mod b/go.mod index eeaca1645..fafb4f828 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d - git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 + git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 git.frostfs.info/TrueCloudLab/hrw v1.2.1 diff --git a/go.sum b/go.sum index a8f7216a5..7818583d4 100644 --- a/go.sum +++ b/go.sum @@ -6,8 +6,8 @@ git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSV git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824 h1:Mxw1c/8t96vFIUOffl28lFaHKi413oCBfLMGJmF9cFA= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250212111929-d34e1329c824/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA= +git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529 h1:CBreXSxGoYJAdZ1QdJPsDs1UCXGF5psinII0lxtohsc= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw= diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index e94fa580a..77420865a 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -66,7 +66,7 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address grpc.WithChainUnaryInterceptor( qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInteceptor(), + tracing.NewUnaryClientInterceptor(), tagging.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index 70f4a843b..3359af2c5 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -99,7 +99,7 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (* grpc.WithChainUnaryInterceptor( qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInteceptor(), + tracing.NewUnaryClientInterceptor(), tagging.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 89450b739..103e2a613 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -344,7 +344,7 @@ func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) { grpc.WithChainUnaryInterceptor( qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), - tracing_grpc.NewUnaryClientInteceptor(), + tracing_grpc.NewUnaryClientInterceptor(), tagging.NewUnaryClientInteceptor(), ), grpc.WithChainStreamInterceptor( From 3fa5c22ddfc036a414e66fb0e89c816e44a677dc Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 13:18:18 +0300 Subject: [PATCH 450/591] [#1689] Makefile: Add default reviewers via --push-option Gerrit doesn't provide an easy way to have default reviewers assigned to new change requests. However, we can use `--push-option` and mention all people from storage-core-developers group. Change-Id: Ia01f8a3c5c8eb8a1dca6efb66fbe07018f6a42c9 Signed-off-by: Evgenii Stratonikov --- Makefile | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 210ea1dc1..c9147f511 100755 --- a/Makefile +++ b/Makefile @@ -200,7 +200,14 @@ review-install: # Create a PR in Gerrit review: BRANCH ?= master review: - @git push review HEAD:refs/for/$(BRANCH) + @git push review HEAD:refs/for/$(BRANCH) \ + --push-option r=e.stratonikov@yadro.com \ + --push-option r=d.stepanov@yadro.com \ + --push-option r=an.nikiforov@yadro.com \ + --push-option r=a.arifullin@yadro.com \ + --push-option r=ekaterina.lebedeva@yadro.com \ + --push-option r=a.savchuk@yadro.com \ + --push-option r=a.chuprov@yadro.com # Run pre-commit pre-commit-run: From af76350bfb2eaabb22ad19fccf1412d26a226004 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 21 Mar 2025 13:19:28 +0300 Subject: [PATCH 451/591] [#1695] qos: Sort tags by asc Change-Id: Ia23e392bb49d2536096de2ba07fc6f8fb7ac0489 Signed-off-by: Dmitrii Stepanov --- internal/qos/limiter.go | 2 +- internal/qos/stats.go | 6 +++--- internal/qos/tags.go | 24 ++++++++++++------------ internal/qos/validate.go | 4 ++-- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go index 82f9917a5..98d254fd0 100644 --- a/internal/qos/limiter.go +++ b/internal/qos/limiter.go @@ -74,7 +74,7 @@ func createScheduler(config limits.OpConfig) (scheduler, error) { func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo { result := make(map[string]scheduling.TagInfo) - for _, tag := range []IOTag{IOTagClient, IOTagBackground, IOTagInternal, IOTagPolicer, IOTagWritecache, IOTagTreeSync} { + for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} { result[tag.String()] = scheduling.TagInfo{ Share: defaultShare, } diff --git a/internal/qos/stats.go b/internal/qos/stats.go index aa4d4caf9..3ecfad9f9 100644 --- a/internal/qos/stats.go +++ b/internal/qos/stats.go @@ -3,13 +3,13 @@ package qos const unknownStatsTag = "unknown" var statTags = map[string]struct{}{ - IOTagClient.String(): {}, IOTagBackground.String(): {}, + IOTagClient.String(): {}, + IOTagCritical.String(): {}, IOTagInternal.String(): {}, IOTagPolicer.String(): {}, - IOTagWritecache.String(): {}, - IOTagCritical.String(): {}, IOTagTreeSync.String(): {}, + IOTagWritecache.String(): {}, unknownStatsTag: {}, } diff --git a/internal/qos/tags.go b/internal/qos/tags.go index 2781dec76..e3f7cafd6 100644 --- a/internal/qos/tags.go +++ b/internal/qos/tags.go @@ -10,33 +10,33 @@ import ( type IOTag string const ( - IOTagClient IOTag = "client" - IOTagInternal IOTag = "internal" IOTagBackground IOTag = "background" + IOTagClient IOTag = "client" + IOTagCritical IOTag = "critical" + IOTagInternal IOTag = "internal" + IOTagPolicer IOTag = "policer" IOTagTreeSync IOTag = "treesync" IOTagWritecache IOTag = "writecache" - IOTagPolicer IOTag = "policer" - IOTagCritical IOTag = "critical" ioTagUnknown IOTag = "" ) func FromRawString(s string) (IOTag, error) { switch s { - case string(IOTagCritical): - return IOTagCritical, nil - case string(IOTagClient): - return IOTagClient, nil - case string(IOTagInternal): - return IOTagInternal, nil case string(IOTagBackground): return IOTagBackground, nil - case string(IOTagWritecache): - return IOTagWritecache, nil + case string(IOTagClient): + return IOTagClient, nil + case string(IOTagCritical): + return IOTagCritical, nil + case string(IOTagInternal): + return IOTagInternal, nil case string(IOTagPolicer): return IOTagPolicer, nil case string(IOTagTreeSync): return IOTagTreeSync, nil + case string(IOTagWritecache): + return IOTagWritecache, nil default: return ioTagUnknown, fmt.Errorf("unknown tag %s", s) } diff --git a/internal/qos/validate.go b/internal/qos/validate.go index 75bf4f4b9..d4475e38b 100644 --- a/internal/qos/validate.go +++ b/internal/qos/validate.go @@ -42,12 +42,12 @@ func validateOpConfig(c limits.OpConfig) error { func validateTags(configTags []limits.IOTagConfig) error { tags := map[IOTag]tagConfig{ + IOTagBackground: {}, IOTagClient: {}, IOTagInternal: {}, - IOTagBackground: {}, - IOTagWritecache: {}, IOTagPolicer: {}, IOTagTreeSync: {}, + IOTagWritecache: {}, } for _, t := range configTags { tag, err := FromRawString(t.Tag) From 9aa486c9d814d3feed94a99d24aa1914a57b17fa Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 14:10:46 +0300 Subject: [PATCH 452/591] [#1689] Makefile: Create dirs with -p flag On CI there is no `bin` directory initially, so an error occurs ``` mkdir: cannot create directory '/var/cache/jenkins-agent/workspace/gerrit/frostfs-node#55-488b12-8ac3c/bin/gofumpt': No such file or directory ``` Change-Id: I43895c8f5ed7cc5c71c8025228710279f9e75e9c Signed-off-by: Evgenii Stratonikov --- Makefile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index c9147f511..2c1cf89f7 100755 --- a/Makefile +++ b/Makefile @@ -115,7 +115,7 @@ protoc: # Install protoc protoc-install: @rm -rf $(PROTOBUF_DIR) - @mkdir $(PROTOBUF_DIR) + @mkdir -p $(PROTOBUF_DIR) @echo "⇒ Installing protoc... " @wget -q -O $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip 'https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(PROTOC_OS_VERSION).zip' @unzip -q -o $(PROTOBUF_DIR)/protoc-$(PROTOC_VERSION).zip -d $(PROTOC_DIR) @@ -169,7 +169,7 @@ imports: # Install gofumpt fumpt-install: @rm -rf $(GOFUMPT_DIR) - @mkdir $(GOFUMPT_DIR) + @mkdir -p $(GOFUMPT_DIR) @GOBIN=$(GOFUMPT_VERSION_DIR) go install mvdan.cc/gofumpt@$(GOFUMPT_VERSION) # Run gofumpt @@ -214,9 +214,9 @@ pre-commit-run: @pre-commit run -a --hook-stage manual # Install linters -lint-install: +lint-install: $(BIN) @rm -rf $(OUTPUT_LINT_DIR) - @mkdir $(OUTPUT_LINT_DIR) + @mkdir -p $(OUTPUT_LINT_DIR) @mkdir -p $(TMP_DIR) @rm -rf $(TMP_DIR)/linters @git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters @@ -235,7 +235,7 @@ lint: # Install staticcheck staticcheck-install: @rm -rf $(STATICCHECK_DIR) - @mkdir $(STATICCHECK_DIR) + @mkdir -p $(STATICCHECK_DIR) @GOBIN=$(STATICCHECK_VERSION_DIR) go install honnef.co/go/tools/cmd/staticcheck@$(STATICCHECK_VERSION) # Run staticcheck @@ -248,7 +248,7 @@ staticcheck-run: # Install gopls gopls-install: @rm -rf $(GOPLS_DIR) - @mkdir $(GOPLS_DIR) + @mkdir -p $(GOPLS_DIR) @GOBIN=$(GOPLS_VERSION_DIR) go install golang.org/x/tools/gopls@$(GOPLS_VERSION) # Run gopls From 016f2e11e371b8cbb5b6459aed33b2e3c962fc3e Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 14:38:18 +0300 Subject: [PATCH 453/591] [#1689] Makefile: Add more restricted .SHELLFLAGS Catch more errors immediately. Change-Id: I576f1b394a2b167c78c693a794ab8cca3ac1013b Signed-off-by: Evgenii Stratonikov --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 2c1cf89f7..321365f0d 100755 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ #!/usr/bin/make -f SHELL = bash +.SHELLFLAGS = -euo pipefail -c REPO ?= $(shell go list -m) VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") From d95128913123f7c3c1fbc31c79ade0d54594b2e5 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Tue, 4 Mar 2025 19:41:53 +0300 Subject: [PATCH 454/591] [#1294] docs: Fix description of shard switching mode Signed-off-by: Alexander Chuprov --- docs/shard-modes.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/shard-modes.md b/docs/shard-modes.md index 3b459335b..6cc4ab13c 100644 --- a/docs/shard-modes.md +++ b/docs/shard-modes.md @@ -51,10 +51,7 @@ However, all mode changing operations are idempotent. ## Automatic mode changes -Shard can automatically switch to a `degraded-read-only` mode in 3 cases: -1. If the metabase was not available or couldn't be opened/initialized during shard startup. -2. If shard error counter exceeds threshold. -3. If the metabase couldn't be reopened during SIGHUP handling. +A shard can automatically switch to `read-only` mode if its error counter exceeds the threshold. # Detach shard From 4919b6a206ad9f448fcb36ae7d574a8f7dd3ad59 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Fri, 21 Mar 2025 15:51:22 +0300 Subject: [PATCH 455/591] [#1689] node/config: Allow zero `max_ops` in RPC limits config The limiter allows zeros for limits, meaning "this operation is disabled". However, the config didn't allow zero due to the lack of distinction between "no value" and "zero" - cast functions read both `nil` and zero as zero. Now, the config allows a zero limit. Added tests. Managing such cases should be easier after #1610. Change-Id: Ifc840732390b2feb975f230573b34bf479406e05 Signed-off-by: Aleksey Savchuk --- cmd/frostfs-node/config/rpc/config.go | 5 ++-- cmd/frostfs-node/config/rpc/config_test.go | 26 ++++++++++++++++++- .../rpc/testdata/{node.env => no_max_ops.env} | 0 .../testdata/{node.json => no_max_ops.json} | 0 .../testdata/{node.yaml => no_max_ops.yaml} | 0 .../config/rpc/testdata/zero_max_ops.env | 4 +++ .../config/rpc/testdata/zero_max_ops.json | 19 ++++++++++++++ .../config/rpc/testdata/zero_max_ops.yaml | 9 +++++++ 8 files changed, 59 insertions(+), 4 deletions(-) rename cmd/frostfs-node/config/rpc/testdata/{node.env => no_max_ops.env} (100%) rename cmd/frostfs-node/config/rpc/testdata/{node.json => no_max_ops.json} (100%) rename cmd/frostfs-node/config/rpc/testdata/{node.yaml => no_max_ops.yaml} (100%) create mode 100644 cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env create mode 100644 cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json create mode 100644 cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml diff --git a/cmd/frostfs-node/config/rpc/config.go b/cmd/frostfs-node/config/rpc/config.go index 197990d07..e0efdfde2 100644 --- a/cmd/frostfs-node/config/rpc/config.go +++ b/cmd/frostfs-node/config/rpc/config.go @@ -31,12 +31,11 @@ func Limits(c *config.Config) []LimitConfig { break } - maxOps := config.IntSafe(sc, "max_ops") - if maxOps == 0 { + if sc.Value("max_ops") == nil { panic("no max operations for method group") } - limits = append(limits, LimitConfig{methods, maxOps}) + limits = append(limits, LimitConfig{methods, config.IntSafe(sc, "max_ops")}) } return limits diff --git a/cmd/frostfs-node/config/rpc/config_test.go b/cmd/frostfs-node/config/rpc/config_test.go index 31a837cee..a6365e19f 100644 --- a/cmd/frostfs-node/config/rpc/config_test.go +++ b/cmd/frostfs-node/config/rpc/config_test.go @@ -38,7 +38,7 @@ func TestRPCSection(t *testing.T) { }) t.Run("no max operations", func(t *testing.T) { - const path = "testdata/node" + const path = "testdata/no_max_ops" fileConfigTest := func(c *config.Config) { require.Panics(t, func() { _ = Limits(c) }) @@ -50,4 +50,28 @@ func TestRPCSection(t *testing.T) { configtest.ForEnvFileType(t, path, fileConfigTest) }) }) + + t.Run("zero max operations", func(t *testing.T) { + const path = "testdata/zero_max_ops" + + fileConfigTest := func(c *config.Config) { + limits := Limits(c) + require.Len(t, limits, 2) + + limit0 := limits[0] + limit1 := limits[1] + + require.ElementsMatch(t, limit0.Methods, []string{"/neo.fs.v2.object.ObjectService/PutSingle", "/neo.fs.v2.object.ObjectService/Put"}) + require.Equal(t, limit0.MaxOps, int64(0)) + + require.ElementsMatch(t, limit1.Methods, []string{"/neo.fs.v2.object.ObjectService/Get"}) + require.Equal(t, limit1.MaxOps, int64(10000)) + } + + configtest.ForEachFileType(path, fileConfigTest) + + t.Run("ENV", func(t *testing.T) { + configtest.ForEnvFileType(t, path, fileConfigTest) + }) + }) } diff --git a/cmd/frostfs-node/config/rpc/testdata/node.env b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.env similarity index 100% rename from cmd/frostfs-node/config/rpc/testdata/node.env rename to cmd/frostfs-node/config/rpc/testdata/no_max_ops.env diff --git a/cmd/frostfs-node/config/rpc/testdata/node.json b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.json similarity index 100% rename from cmd/frostfs-node/config/rpc/testdata/node.json rename to cmd/frostfs-node/config/rpc/testdata/no_max_ops.json diff --git a/cmd/frostfs-node/config/rpc/testdata/node.yaml b/cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml similarity index 100% rename from cmd/frostfs-node/config/rpc/testdata/node.yaml rename to cmd/frostfs-node/config/rpc/testdata/no_max_ops.yaml diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env new file mode 100644 index 000000000..ce7302b0b --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.env @@ -0,0 +1,4 @@ +FROSTFS_RPC_LIMITS_0_METHODS="/neo.fs.v2.object.ObjectService/PutSingle /neo.fs.v2.object.ObjectService/Put" +FROSTFS_RPC_LIMITS_0_MAX_OPS=0 +FROSTFS_RPC_LIMITS_1_METHODS="/neo.fs.v2.object.ObjectService/Get" +FROSTFS_RPC_LIMITS_1_MAX_OPS=10000 diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json new file mode 100644 index 000000000..16a1c173f --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.json @@ -0,0 +1,19 @@ +{ + "rpc": { + "limits": [ + { + "methods": [ + "/neo.fs.v2.object.ObjectService/PutSingle", + "/neo.fs.v2.object.ObjectService/Put" + ], + "max_ops": 0 + }, + { + "methods": [ + "/neo.fs.v2.object.ObjectService/Get" + ], + "max_ops": 10000 + } + ] + } +} diff --git a/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml new file mode 100644 index 000000000..525d768d4 --- /dev/null +++ b/cmd/frostfs-node/config/rpc/testdata/zero_max_ops.yaml @@ -0,0 +1,9 @@ +rpc: + limits: + - methods: + - /neo.fs.v2.object.ObjectService/PutSingle + - /neo.fs.v2.object.ObjectService/Put + max_ops: 0 + - methods: + - /neo.fs.v2.object.ObjectService/Get + max_ops: 10000 From e7e91ef63407d4946f70ffa746e33d79cfea1999 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 16:31:20 +0300 Subject: [PATCH 456/591] [#1689] adm: Remove storagecfg subcommand It is unused and unsupported for a long time. Change-Id: I570567db4e8cb202e41286064406ad85cd0e7a39 Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/root.go | 2 - .../internal/modules/storagecfg/config.go | 135 ------ .../internal/modules/storagecfg/root.go | 432 ------------------ 3 files changed, 569 deletions(-) delete mode 100644 cmd/frostfs-adm/internal/modules/storagecfg/config.go delete mode 100644 cmd/frostfs-adm/internal/modules/storagecfg/root.go diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go index defd898c8..e42204b7a 100644 --- a/cmd/frostfs-adm/internal/modules/root.go +++ b/cmd/frostfs-adm/internal/modules/root.go @@ -7,7 +7,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg" "git.frostfs.info/TrueCloudLab/frostfs-node/misc" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/autocomplete" utilConfig "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config" @@ -41,7 +40,6 @@ func init() { rootCmd.AddCommand(config.RootCmd) rootCmd.AddCommand(morph.RootCmd) - rootCmd.AddCommand(storagecfg.RootCmd) rootCmd.AddCommand(metabase.RootCmd) rootCmd.AddCommand(autocomplete.Command("frostfs-adm")) diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/config.go b/cmd/frostfs-adm/internal/modules/storagecfg/config.go deleted file mode 100644 index 67e3414c2..000000000 --- a/cmd/frostfs-adm/internal/modules/storagecfg/config.go +++ /dev/null @@ -1,135 +0,0 @@ -package storagecfg - -const configTemplate = `logger: - level: info # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" - -node: - wallet: - path: {{ .Wallet.Path }} # path to a NEO wallet; ignored if key is presented - address: {{ .Wallet.Account }} # address of a NEO account in the wallet; ignored if key is presented - password: {{ .Wallet.Password }} # password for a NEO account in the wallet; ignored if key is presented - addresses: # list of addresses announced by Storage node in the Network map - - {{ .AnnouncedAddress }} - attribute_0: UN-LOCODE:{{ .Attribute.Locode }} - relay: {{ .Relay }} # start Storage node in relay mode without bootstrapping into the Network map - -grpc: - num: 1 # total number of listener endpoints - 0: - endpoint: {{ .Endpoint }} # endpoint for gRPC server - tls:{{if .TLSCert}} - enabled: true # enable TLS for a gRPC connection (min version is TLS 1.2) - certificate: {{ .TLSCert }} # path to TLS certificate - key: {{ .TLSKey }} # path to TLS key - {{- else }} - enabled: false # disable TLS for a gRPC connection - {{- end}} - -control: - authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service - {{- range .AuthorizedKeys }} - - {{.}}{{end}} - grpc: - endpoint: {{.ControlEndpoint}} # endpoint that is listened by the Control Service - -morph: - dial_timeout: 20s # timeout for side chain NEO RPC client connection - cache_ttl: 15s # use TTL cache for side chain GET operations - rpc_endpoint: # side chain N3 RPC endpoints - {{- range .MorphRPC }} - - address: wss://{{.}}/ws{{end}} -{{if not .Relay }} -storage: - shard: - default: # section with the default shard parameters - metabase: - perm: 0644 # permissions for metabase files(directories: +x for current user and group) - - blobstor: - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) - depth: 2 # max depth of object tree storage in FS - small_object_size: 102400 # 100KiB, size threshold for "small" objects which are stored in key-value DB, not in FS, bytes - compress: true # turn on/off Zstandard compression (level 3) of stored objects - compression_exclude_content_types: - - audio/* - - video/* - - blobovnicza: - size: 1073741824 # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes - depth: 1 # max depth of object tree storage in key-value DB - width: 4 # max width of object tree storage in key-value DB - opened_cache_capacity: 50 # maximum number of opened database files - opened_cache_ttl: 5m # ttl for opened database file - opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's - - gc: - remover_batch_size: 200 # number of objects to be removed by the garbage collector - remover_sleep_interval: 5m # frequency of the garbage collector invocation - 0: - mode: "read-write" # mode of the shard, must be one of the: "read-write" (default), "read-only" - - metabase: - path: {{ .MetabasePath }} # path to the metabase - - blobstor: - path: {{ .BlobstorPath }} # path to the blobstor -{{end}}` - -const ( - neofsMainnetAddress = "2cafa46838e8b564468ebd868dcafdd99dce6221" - balanceMainnetAddress = "dc1ec98d9d0c5f9dfade16144defe08cffc5ca55" - neofsTestnetAddress = "b65d8243ac63983206d17e5221af0653a7266fa1" - balanceTestnetAddress = "e0420c216003747626670d1424569c17c79015bf" -) - -var n3config = map[string]struct { - MorphRPC []string - RPC []string - NeoFSContract string - BalanceContract string -}{ - "testnet": { - MorphRPC: []string{ - "rpc01.morph.testnet.fs.neo.org:51331", - "rpc02.morph.testnet.fs.neo.org:51331", - "rpc03.morph.testnet.fs.neo.org:51331", - "rpc04.morph.testnet.fs.neo.org:51331", - "rpc05.morph.testnet.fs.neo.org:51331", - "rpc06.morph.testnet.fs.neo.org:51331", - "rpc07.morph.testnet.fs.neo.org:51331", - }, - RPC: []string{ - "rpc01.testnet.n3.nspcc.ru:21331", - "rpc02.testnet.n3.nspcc.ru:21331", - "rpc03.testnet.n3.nspcc.ru:21331", - "rpc04.testnet.n3.nspcc.ru:21331", - "rpc05.testnet.n3.nspcc.ru:21331", - "rpc06.testnet.n3.nspcc.ru:21331", - "rpc07.testnet.n3.nspcc.ru:21331", - }, - NeoFSContract: neofsTestnetAddress, - BalanceContract: balanceTestnetAddress, - }, - "mainnet": { - MorphRPC: []string{ - "rpc1.morph.fs.neo.org:40341", - "rpc2.morph.fs.neo.org:40341", - "rpc3.morph.fs.neo.org:40341", - "rpc4.morph.fs.neo.org:40341", - "rpc5.morph.fs.neo.org:40341", - "rpc6.morph.fs.neo.org:40341", - "rpc7.morph.fs.neo.org:40341", - }, - RPC: []string{ - "rpc1.n3.nspcc.ru:10331", - "rpc2.n3.nspcc.ru:10331", - "rpc3.n3.nspcc.ru:10331", - "rpc4.n3.nspcc.ru:10331", - "rpc5.n3.nspcc.ru:10331", - "rpc6.n3.nspcc.ru:10331", - "rpc7.n3.nspcc.ru:10331", - }, - NeoFSContract: neofsMainnetAddress, - BalanceContract: balanceMainnetAddress, - }, -} diff --git a/cmd/frostfs-adm/internal/modules/storagecfg/root.go b/cmd/frostfs-adm/internal/modules/storagecfg/root.go deleted file mode 100644 index a5adea0da..000000000 --- a/cmd/frostfs-adm/internal/modules/storagecfg/root.go +++ /dev/null @@ -1,432 +0,0 @@ -package storagecfg - -import ( - "bytes" - "context" - "encoding/hex" - "errors" - "fmt" - "math/rand" - "net" - "net/url" - "os" - "path/filepath" - "slices" - "strconv" - "strings" - "text/template" - "time" - - netutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - "github.com/chzyer/readline" - "github.com/nspcc-dev/neo-go/cli/flags" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/encoding/fixedn" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/gas" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" - "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/spf13/cobra" -) - -const ( - walletFlag = "wallet" - accountFlag = "account" -) - -const ( - defaultControlEndpoint = "localhost:8090" - defaultDataEndpoint = "localhost" -) - -// RootCmd is a root command of config section. -var RootCmd = &cobra.Command{ - Use: "storage-config [-w wallet] [-a acccount] []", - Short: "Section for storage node configuration commands", - Run: storageConfig, -} - -func init() { - fs := RootCmd.Flags() - - fs.StringP(walletFlag, "w", "", "Path to wallet") - fs.StringP(accountFlag, "a", "", "Wallet account") -} - -type config struct { - AnnouncedAddress string - AuthorizedKeys []string - ControlEndpoint string - Endpoint string - TLSCert string - TLSKey string - MorphRPC []string - Attribute struct { - Locode string - } - Wallet struct { - Path string - Account string - Password string - } - Relay bool - BlobstorPath string - MetabasePath string -} - -func storageConfig(cmd *cobra.Command, args []string) { - outPath := getOutputPath(args) - - historyPath := filepath.Join(os.TempDir(), "frostfs-adm.history") - readline.SetHistoryPath(historyPath) - - var c config - - c.Wallet.Path, _ = cmd.Flags().GetString(walletFlag) - if c.Wallet.Path == "" { - c.Wallet.Path = getPath("Path to the storage node wallet: ") - } - - w, err := wallet.NewWalletFromFile(c.Wallet.Path) - fatalOnErr(err) - - fillWalletAccount(cmd, &c, w) - - accH, err := flags.ParseAddress(c.Wallet.Account) - fatalOnErr(err) - - acc := w.GetAccount(accH) - if acc == nil { - fatalOnErr(errors.New("can't find account in wallet")) - } - - c.Wallet.Password, err = input.ReadPassword(fmt.Sprintf("Enter password for %s > ", c.Wallet.Account)) - fatalOnErr(err) - - err = acc.Decrypt(c.Wallet.Password, keys.NEP2ScryptParams()) - fatalOnErr(err) - - c.AuthorizedKeys = append(c.AuthorizedKeys, hex.EncodeToString(acc.PrivateKey().PublicKey().Bytes())) - - network := readNetwork(cmd) - - c.MorphRPC = n3config[network].MorphRPC - - depositGas(cmd, acc, network) - - c.Attribute.Locode = getString("UN-LOCODE attribute in [XX YYY] format: ") - - endpoint := getDefaultEndpoint(cmd, &c) - c.Endpoint = getString(fmt.Sprintf("Listening address [%s]: ", endpoint)) - if c.Endpoint == "" { - c.Endpoint = endpoint - } - - c.ControlEndpoint = getString(fmt.Sprintf("Listening address (control endpoint) [%s]: ", defaultControlEndpoint)) - if c.ControlEndpoint == "" { - c.ControlEndpoint = defaultControlEndpoint - } - - c.TLSCert = getPath("TLS Certificate (optional): ") - if c.TLSCert != "" { - c.TLSKey = getPath("TLS Key: ") - } - - c.Relay = getConfirmation(false, "Use node as a relay? yes/[no]: ") - if !c.Relay { - p := getPath("Path to the storage directory (all available storage will be used): ") - c.BlobstorPath = filepath.Join(p, "blob") - c.MetabasePath = filepath.Join(p, "meta") - } - - out := applyTemplate(c) - fatalOnErr(os.WriteFile(outPath, out, 0o644)) - - cmd.Println("Node is ready for work! Run `frostfs-node -config " + outPath + "`") -} - -func getDefaultEndpoint(cmd *cobra.Command, c *config) string { - var addr, port string - for { - c.AnnouncedAddress = getString("Publicly announced address: ") - validator := netutil.Address{} - err := validator.FromString(c.AnnouncedAddress) - if err != nil { - cmd.Println("Incorrect address format. See https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/network/address.go for details.") - continue - } - uriAddr, err := url.Parse(validator.URIAddr()) - if err != nil { - panic(fmt.Errorf("unexpected error: %w", err)) - } - addr = uriAddr.Hostname() - port = uriAddr.Port() - ip, err := net.ResolveIPAddr("ip", addr) - if err != nil { - cmd.Printf("Can't resolve IP address %s: %v\n", addr, err) - continue - } - - if !ip.IP.IsGlobalUnicast() { - cmd.Println("IP must be global unicast.") - continue - } - cmd.Printf("Resolved IP address: %s\n", ip.String()) - - _, err = strconv.ParseUint(port, 10, 16) - if err != nil { - cmd.Println("Port must be an integer.") - continue - } - - break - } - return net.JoinHostPort(defaultDataEndpoint, port) -} - -func fillWalletAccount(cmd *cobra.Command, c *config, w *wallet.Wallet) { - c.Wallet.Account, _ = cmd.Flags().GetString(accountFlag) - if c.Wallet.Account == "" { - addr := address.Uint160ToString(w.GetChangeAddress()) - c.Wallet.Account = getWalletAccount(w, fmt.Sprintf("Wallet account [%s]: ", addr)) - if c.Wallet.Account == "" { - c.Wallet.Account = addr - } - } -} - -func readNetwork(cmd *cobra.Command) string { - var network string - for { - network = getString("Choose network [mainnet]/testnet: ") - switch network { - case "": - network = "mainnet" - case "testnet", "mainnet": - default: - cmd.Println(`Network must be either "mainnet" or "testnet"`) - continue - } - break - } - return network -} - -func getOutputPath(args []string) string { - if len(args) != 0 { - return args[0] - } - outPath := getPath("File to write config at [./config.yml]: ") - if outPath == "" { - outPath = "./config.yml" - } - return outPath -} - -func getWalletAccount(w *wallet.Wallet, prompt string) string { - addrs := make([]readline.PrefixCompleterInterface, len(w.Accounts)) - for i := range w.Accounts { - addrs[i] = readline.PcItem(w.Accounts[i].Address) - } - - readline.SetAutoComplete(readline.NewPrefixCompleter(addrs...)) - defer readline.SetAutoComplete(nil) - - s, err := readline.Line(prompt) - fatalOnErr(err) - return strings.TrimSpace(s) // autocompleter can return a string with a trailing space -} - -func getString(prompt string) string { - s, err := readline.Line(prompt) - fatalOnErr(err) - if s != "" { - _ = readline.AddHistory(s) - } - return s -} - -type filenameCompleter struct{} - -func (filenameCompleter) Do(line []rune, pos int) (newLine [][]rune, length int) { - prefix := string(line[:pos]) - dir := filepath.Dir(prefix) - de, err := os.ReadDir(dir) - if err != nil { - return nil, 0 - } - - for i := range de { - name := filepath.Join(dir, de[i].Name()) - if strings.HasPrefix(name, prefix) { - tail := []rune(strings.TrimPrefix(name, prefix)) - if de[i].IsDir() { - tail = append(tail, filepath.Separator) - } - newLine = append(newLine, tail) - } - } - if pos != 0 { - return newLine, pos - len([]rune(dir)) - } - return newLine, 0 -} - -func getPath(prompt string) string { - readline.SetAutoComplete(filenameCompleter{}) - defer readline.SetAutoComplete(nil) - - p, err := readline.Line(prompt) - fatalOnErr(err) - - if p == "" { - return p - } - - _ = readline.AddHistory(p) - - abs, err := filepath.Abs(p) - if err != nil { - fatalOnErr(fmt.Errorf("can't create an absolute path: %w", err)) - } - - return abs -} - -func getConfirmation(def bool, prompt string) bool { - for { - s, err := readline.Line(prompt) - fatalOnErr(err) - - switch strings.ToLower(s) { - case "y", "yes": - return true - case "n", "no": - return false - default: - if len(s) == 0 { - return def - } - } - } -} - -func applyTemplate(c config) []byte { - tmpl, err := template.New("config").Parse(configTemplate) - fatalOnErr(err) - - b := bytes.NewBuffer(nil) - fatalOnErr(tmpl.Execute(b, c)) - - return b.Bytes() -} - -func fatalOnErr(err error) { - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} - -func depositGas(cmd *cobra.Command, acc *wallet.Account, network string) { - sideClient := initClient(n3config[network].MorphRPC) - balanceHash, _ := util.Uint160DecodeStringLE(n3config[network].BalanceContract) - - sideActor, err := actor.NewSimple(sideClient, acc) - if err != nil { - fatalOnErr(fmt.Errorf("creating actor over side chain client: %w", err)) - } - - sideGas := nep17.NewReader(sideActor, balanceHash) - accSH := acc.Contract.ScriptHash() - - balance, err := sideGas.BalanceOf(accSH) - if err != nil { - fatalOnErr(fmt.Errorf("side chain balance: %w", err)) - } - - ok := getConfirmation(false, fmt.Sprintf("Current NeoFS balance is %s, make a deposit? y/[n]: ", - fixedn.ToString(balance, 12))) - if !ok { - return - } - - amountStr := getString("Enter amount in GAS: ") - amount, err := fixedn.FromString(amountStr, 8) - if err != nil { - fatalOnErr(fmt.Errorf("invalid amount: %w", err)) - } - - mainClient := initClient(n3config[network].RPC) - neofsHash, _ := util.Uint160DecodeStringLE(n3config[network].NeoFSContract) - - mainActor, err := actor.NewSimple(mainClient, acc) - if err != nil { - fatalOnErr(fmt.Errorf("creating actor over main chain client: %w", err)) - } - - mainGas := nep17.New(mainActor, gas.Hash) - - txHash, _, err := mainGas.Transfer(accSH, neofsHash, amount, nil) - if err != nil { - fatalOnErr(fmt.Errorf("sending TX to the NeoFS contract: %w", err)) - } - - cmd.Print("Waiting for transactions to persist.") - tick := time.NewTicker(time.Second / 2) - defer tick.Stop() - - timer := time.NewTimer(time.Second * 20) - defer timer.Stop() - - at := trigger.Application - -loop: - for { - select { - case <-tick.C: - _, err := mainClient.GetApplicationLog(txHash, &at) - if err == nil { - cmd.Print("\n") - break loop - } - cmd.Print(".") - case <-timer.C: - cmd.Printf("\nTimeout while waiting for transaction to persist.\n") - if getConfirmation(false, "Continue configuration? yes/[no]: ") { - return - } - os.Exit(1) - } - } -} - -func initClient(rpc []string) *rpcclient.Client { - var c *rpcclient.Client - var err error - - shuffled := slices.Clone(rpc) - rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] }) - - for _, endpoint := range shuffled { - c, err = rpcclient.New(context.Background(), "https://"+endpoint, rpcclient.Options{ - DialTimeout: time.Second * 2, - RequestTimeout: time.Second * 5, - }) - if err != nil { - continue - } - if err = c.Init(); err != nil { - continue - } - return c - } - - fatalOnErr(fmt.Errorf("can't create N3 client: %w", err)) - panic("unreachable") -} From 30099194ba8f64b5340c3c2ff3153d31ab549223 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 15:18:28 +0300 Subject: [PATCH 457/591] [#1689] config: Remove testnet and mainnet configs They are invalid and unsupported. There is neither mainnet nor testnet currently. Change-Id: I520363e2de0c22a584238accc253248be3eefea5 Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/validate_test.go | 14 ---- config/mainnet/README.md | 28 ------- config/mainnet/config.yml | 70 ---------------- config/testnet/README.md | 129 ------------------------------ config/testnet/config.yml | 52 ------------ 5 files changed, 293 deletions(-) delete mode 100644 config/mainnet/README.md delete mode 100644 config/mainnet/config.yml delete mode 100644 config/testnet/README.md delete mode 100644 config/testnet/config.yml diff --git a/cmd/frostfs-node/validate_test.go b/cmd/frostfs-node/validate_test.go index d9c0f167f..495365cf0 100644 --- a/cmd/frostfs-node/validate_test.go +++ b/cmd/frostfs-node/validate_test.go @@ -1,7 +1,6 @@ package main import ( - "os" "path/filepath" "testing" @@ -22,17 +21,4 @@ func TestValidate(t *testing.T) { require.NoError(t, err) }) }) - - t.Run("mainnet", func(t *testing.T) { - os.Clearenv() // ENVs have priority over config files, so we do this in tests - p := filepath.Join(exampleConfigPrefix, "mainnet/config.yml") - c := config.New(p, "", config.EnvPrefix) - require.NoError(t, validateConfig(c)) - }) - t.Run("testnet", func(t *testing.T) { - os.Clearenv() // ENVs have priority over config files, so we do this in tests - p := filepath.Join(exampleConfigPrefix, "testnet/config.yml") - c := config.New(p, "", config.EnvPrefix) - require.NoError(t, validateConfig(c)) - }) } diff --git a/config/mainnet/README.md b/config/mainnet/README.md deleted file mode 100644 index 717a9b0ff..000000000 --- a/config/mainnet/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# N3 Mainnet Storage node configuration - -Here is a template for simple storage node configuration in N3 Mainnet. -Make sure to specify correct values instead of `<...>` placeholders. -Do not change `contracts` section. Run the latest frostfs-node release with -the fixed config `frostfs-node -c config.yml` - -To use NeoFS in the Mainnet, you need to deposit assets to NeoFS contract. -The contract sript hash is `2cafa46838e8b564468ebd868dcafdd99dce6221` -(N3 address `NNxVrKjLsRkWsmGgmuNXLcMswtxTGaNQLk`) - -## Tips - -Use `grpcs://` scheme in the announced address if you enable TLS in grpc server. -```yaml -node: - addresses: - - grpcs://frostfs.my.org:8080 - -grpc: - num: 1 - 0: - endpoint: frostfs.my.org:8080 - tls: - enabled: true - certificate: /path/to/cert - key: /path/to/key -``` diff --git a/config/mainnet/config.yml b/config/mainnet/config.yml deleted file mode 100644 index d86ea451f..000000000 --- a/config/mainnet/config.yml +++ /dev/null @@ -1,70 +0,0 @@ -node: - wallet: - path: - address: - password: - addresses: - - - attribute_0: UN-LOCODE: - attribute_1: Price:100000 - attribute_2: User-Agent:FrostFS\/0.9999 - -grpc: - num: 1 - 0: - endpoint: - tls: - enabled: false - -storage: - shard_num: 1 - shard: - 0: - metabase: - path: /storage/path/metabase - perm: 0600 - blobstor: - - path: /storage/path/blobovnicza - type: blobovnicza - perm: 0600 - opened_cache_capacity: 32 - depth: 1 - width: 1 - - path: /storage/path/fstree - type: fstree - perm: 0600 - depth: 4 - writecache: - enabled: false - gc: - remover_batch_size: 100 - remover_sleep_interval: 1m - -logger: - level: info - -prometheus: - enabled: true - address: localhost:9090 - shutdown_timeout: 15s - -object: - put: - remote_pool_size: 100 - local_pool_size: 100 - -morph: - rpc_endpoint: - - wss://rpc1.morph.frostfs.info:40341/ws - - wss://rpc2.morph.frostfs.info:40341/ws - - wss://rpc3.morph.frostfs.info:40341/ws - - wss://rpc4.morph.frostfs.info:40341/ws - - wss://rpc5.morph.frostfs.info:40341/ws - - wss://rpc6.morph.frostfs.info:40341/ws - - wss://rpc7.morph.frostfs.info:40341/ws - dial_timeout: 20s - -contracts: - balance: dc1ec98d9d0c5f9dfade16144defe08cffc5ca55 - container: 1b6e68d299b570e1cb7e86eadfdc06aa2e8e0cc5 - netmap: 7c5bdb23e36cc7cce95bf42f3ab9e452c2501df1 diff --git a/config/testnet/README.md b/config/testnet/README.md deleted file mode 100644 index e2cda33ec..000000000 --- a/config/testnet/README.md +++ /dev/null @@ -1,129 +0,0 @@ -# N3 Testnet Storage node configuration - -There is a prepared configuration for NeoFS Storage Node deployment in -N3 Testnet. The easiest way to deploy a Storage Node is to use the prepared -docker image and run it with docker-compose. - -## Build image - -Prepared **frostfs-storage-testnet** image is available at Docker Hub. -However, if you need to rebuild it for some reason, run -`make image-storage-testnet` command. - -``` -$ make image-storage-testnet -... -Successfully built ab0557117b02 -Successfully tagged nspccdev/neofs-storage-testnet:0.25.1 -``` - -## Deploy node - -To run a storage node in N3 Testnet environment, you should deposit GAS assets, -update docker-compose file and start the node. - -### Deposit - -The Storage Node owner should deposit GAS to NeoFS smart contract. It generates a -bit of sidechain GAS in the node's wallet. Sidechain GAS is used to send bootstrap tx. - -First, obtain GAS in N3 Testnet chain. You can do that with -[faucet](https://neowish.ngd.network) service. - -Then, make a deposit by transferring GAS to NeoFS contract in N3 Testnet. -You can provide scripthash in the `data` argument of transfer tx to make a -deposit to a specified account. Otherwise, deposit is made to the tx sender. - -NeoFS contract scripthash in N3 Testnet is `b65d8243ac63983206d17e5221af0653a7266fa1`, -so the address is `NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF`. - -See a deposit example with `neo-go`. - -``` -neo-go wallet nep17 transfer -w wallet.json -r https://rpc01.testnet.n3.nspcc.ru:21331 \ ---from NXxRAFPqPstaPByndKMHuC8iGcaHgtRY3m \ ---to NadZ8YfvkddivcFFkztZgfwxZyKf1acpRF \ ---token GAS \ ---amount 1 -``` - -### Configure - -Next, configure `node_config.env` file. Change endpoints values. Both -should contain your **public** IP. - -``` -NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512 -NEOFS_NODE_ADDRESSES=65.52.183.157:36512 -``` - -Set up your [UN/LOCODE](https://unece.org/trade/cefact/unlocode-code-list-country-and-territory) -attribute. - -``` -NEOFS_GRPC_0_ENDPOINT=65.52.183.157:36512 -NEOFS_NODE_ADDRESSES=65.52.183.157:36512 -NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED -``` - -You can validate UN/LOCODE attribute in -[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0) -with frostfs-cli. - -``` -$ frostfs-cli util locode info --db ./locode_db --locode 'RU LED' -Country: Russia -Location: Saint Petersburg (ex Leningrad) -Continent: Europe -Subdivision: [SPE] Sankt-Peterburg -Coordinates: 59.53, 30.15 -``` - -It is recommended to pass the node's key as a file. To do so, convert your wallet -WIF to 32-byte hex (via `frostfs-cli` for example) and save it to a file. - -``` -// Print WIF in a 32-byte hex format -$ frostfs-cli util keyer Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s -PrivateKey 11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56 -PublicKey 02be8b2e837cab232168f5c3303f1b985818b7583682fb49026b8d2f43df7c1059 -WIF Kwp4Q933QujZLUCcn39tzY94itNQJS4EjTp28oAMzuxMwabm3p1s -Wallet3.0 Nfzmk7FAZmEHDhLePdgysQL2FgkJbaEMpQ -ScriptHash3.0 dffe39998f50d42f2e06807866161cd0440b4bdc -ScriptHash3.0BE dc4b0b44d01c16667880062e2fd4508f9939fedf - -// Save 32-byte hex into a file -$ echo '11ab917cd99170cb8d0d48e78fca317564e6b3aaff7f7058952d6175cdca0f56' | xxd -r -p > my_wallet.key -``` - -Then, specify the path to this file in `docker-compose.yml` -```yaml - volumes: - - frostfs_storage:/storage - - ./my_wallet.key:/node.key -``` - - -NeoFS objects will be stored on your machine. By default, docker-compose -is configured to store objects in named docker volume `frostfs_storage`. You can -specify a directory on the filesystem to store objects there. - -```yaml - volumes: - - /home/username/frostfs/rc3/storage:/storage - - ./my_wallet.key:/node.key -``` - -### Start - -Run the node with `docker-compose up` command and stop it with `docker-compose down`. - -### Debug - -To print node logs, use `docker logs frostfs-testnet`. To print debug messages in -log, set up log level to debug with this env: - -```yaml - environment: - - NEOFS_LOGGER_LEVEL=debug -``` diff --git a/config/testnet/config.yml b/config/testnet/config.yml deleted file mode 100644 index 76b36cdf6..000000000 --- a/config/testnet/config.yml +++ /dev/null @@ -1,52 +0,0 @@ -logger: - level: info - -morph: - rpc_endpoint: - - wss://rpc01.morph.testnet.frostfs.info:51331/ws - - wss://rpc02.morph.testnet.frostfs.info:51331/ws - - wss://rpc03.morph.testnet.frostfs.info:51331/ws - - wss://rpc04.morph.testnet.frostfs.info:51331/ws - - wss://rpc05.morph.testnet.frostfs.info:51331/ws - - wss://rpc06.morph.testnet.frostfs.info:51331/ws - - wss://rpc07.morph.testnet.frostfs.info:51331/ws - dial_timeout: 20s - -contracts: - balance: e0420c216003747626670d1424569c17c79015bf - container: 9dbd2b5e67568ed285c3d6f96bac4edf5e1efba0 - netmap: d4b331639799e2958d4bc5b711b469d79de94e01 - -node: - key: /node.key - attribute_0: Deployed:SelfHosted - attribute_1: User-Agent:FrostFS\/0.9999 - -prometheus: - enabled: true - address: localhost:9090 - shutdown_timeout: 15s - -storage: - shard_num: 1 - shard: - 0: - metabase: - path: /storage/metabase - perm: 0777 - blobstor: - - path: /storage/path/blobovnicza - type: blobovnicza - perm: 0600 - opened_cache_capacity: 32 - depth: 1 - width: 1 - - path: /storage/path/fstree - type: fstree - perm: 0600 - depth: 4 - writecache: - enabled: false - gc: - remover_batch_size: 100 - remover_sleep_interval: 1m From a4da1da7670b32b73e03f240d0cfcca45f33afda Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 15:40:40 +0300 Subject: [PATCH 458/591] [#905] morph/client: Fetch NNS hash once on init NNS contract hash is taken from the contract with ID=1. Because morph client is expected to work with the same chain, and because contract hash doesn't change on update, there is no need to fetch it from each new endpoint. Change-Id: Ic6dc18283789da076d6a0b3701139b97037714cc Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/client.go | 17 +------------ pkg/morph/client/constructor.go | 5 ++++ pkg/morph/client/nns.go | 42 ++++----------------------------- 3 files changed, 10 insertions(+), 54 deletions(-) diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index e63d926e0..a7c3c6d8d 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -60,6 +60,7 @@ type Client struct { rpcActor *actor.Actor // neo-go RPC actor gasToken *nep17.Token // neo-go GAS token wrapper rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper + nnsHash util.Uint160 // NNS contract hash acc *wallet.Account // neo account accAddr util.Uint160 // account's address @@ -94,27 +95,12 @@ type Client struct { type cache struct { m sync.RWMutex - nnsHash *util.Uint160 gKey *keys.PublicKey txHeights *lru.Cache[util.Uint256, uint32] metrics metrics.MorphCacheMetrics } -func (c *cache) nns() *util.Uint160 { - c.m.RLock() - defer c.m.RUnlock() - - return c.nnsHash -} - -func (c *cache) setNNSHash(nnsHash util.Uint160) { - c.m.Lock() - defer c.m.Unlock() - - c.nnsHash = &nnsHash -} - func (c *cache) groupKey() *keys.PublicKey { c.m.RLock() defer c.m.RUnlock() @@ -133,7 +119,6 @@ func (c *cache) invalidate() { c.m.Lock() defer c.m.Unlock() - c.nnsHash = nil c.gKey = nil c.txHeights.Purge() } diff --git a/pkg/morph/client/constructor.go b/pkg/morph/client/constructor.go index d061747bb..e4dcd0db7 100644 --- a/pkg/morph/client/constructor.go +++ b/pkg/morph/client/constructor.go @@ -145,6 +145,11 @@ func New(ctx context.Context, key *keys.PrivateKey, opts ...Option) (*Client, er if cli.client == nil { return nil, ErrNoHealthyEndpoint } + cs, err := cli.client.GetContractStateByID(nnsContractID) + if err != nil { + return nil, fmt.Errorf("resolve nns hash: %w", err) + } + cli.nnsHash = cs.Hash cli.setActor(act) go cli.closeWaiter(ctx) diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go index f292dccf1..b03967fdd 100644 --- a/pkg/morph/client/nns.go +++ b/pkg/morph/client/nns.go @@ -61,11 +61,7 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) { return util.Uint160{}, ErrConnectionLost } - nnsHash, err := c.NNSHash() - if err != nil { - return util.Uint160{}, err - } - + nnsHash := c.NNSHash() sh, err = nnsResolve(c.client, nnsHash, name) if err != nil { return sh, fmt.Errorf("NNS.resolve: %w", err) @@ -74,34 +70,8 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) { } // NNSHash returns NNS contract hash. -func (c *Client) NNSHash() (util.Uint160, error) { - c.switchLock.RLock() - defer c.switchLock.RUnlock() - - if c.inactive { - return util.Uint160{}, ErrConnectionLost - } - - success := false - startedAt := time.Now() - - defer func() { - c.cache.metrics.AddMethodDuration("NNSContractHash", success, time.Since(startedAt)) - }() - - nnsHash := c.cache.nns() - - if nnsHash == nil { - cs, err := c.client.GetContractStateByID(nnsContractID) - if err != nil { - return util.Uint160{}, fmt.Errorf("NNS contract state: %w", err) - } - - c.cache.setNNSHash(cs.Hash) - nnsHash = &cs.Hash - } - success = true - return *nnsHash, nil +func (c *Client) NNSHash() util.Uint160 { + return c.nnsHash } func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) { @@ -241,11 +211,7 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) { return gKey, nil } - nnsHash, err := c.NNSHash() - if err != nil { - return nil, err - } - + nnsHash := c.NNSHash() item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName) if err != nil { return nil, err From eed082459081628eef495d13b5053c07fb67ae84 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 21 Mar 2025 18:03:03 +0300 Subject: [PATCH 459/591] go.mod: Bump frostfs-qos version Change-Id: I8bc045b509ee1259cfad288477a0b7d045683f10 Signed-off-by: Dmitrii Stepanov --- go.mod | 2 +- go.sum | 4 ++-- pkg/network/cache/multi.go | 2 +- pkg/services/tree/cache.go | 2 +- pkg/services/tree/sync.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index fafb4f828..753bfbd29 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 - git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529 + git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 diff --git a/go.sum b/go.sum index 7818583d4..7f9417954 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529 h1:CBreXSxGoYJAdZ1QdJPsDs1UCXGF5psinII0lxtohsc= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250310135838-3e7ca9403529/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275 h1:WqWxCnCl2ekfjWja/CpGeF2rf4h0x199xhdnsm/j+E8= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go index 77420865a..54c1e18fb 100644 --- a/pkg/network/cache/multi.go +++ b/pkg/network/cache/multi.go @@ -67,7 +67,7 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), tracing.NewUnaryClientInterceptor(), - tagging.NewUnaryClientInteceptor(), + tagging.NewUnaryClientInterceptor(), ), grpc.WithChainStreamInterceptor( qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index 3359af2c5..e2be2f4a2 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -100,7 +100,7 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (* qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), tracing.NewUnaryClientInterceptor(), - tagging.NewUnaryClientInteceptor(), + tagging.NewUnaryClientInterceptor(), ), grpc.WithChainStreamInterceptor( qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 103e2a613..1c3521344 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -345,7 +345,7 @@ func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) { qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), tracing_grpc.NewUnaryClientInterceptor(), - tagging.NewUnaryClientInteceptor(), + tagging.NewUnaryClientInterceptor(), ), grpc.WithChainStreamInterceptor( qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), From 73e35bc88554954371555bd0e9bdf6ce99d20de7 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Wed, 19 Mar 2025 13:57:44 +0300 Subject: [PATCH 460/591] [#1052] object: Make `ape` middleware form request info * Move some helpers from `acl/v2` package to `ape`. Also move errors; * Introduce `Metadata`, `RequestInfo` types; * Introduce `RequestInfoExtractor` interface and its implementation. The extractor's purpose is to extract request info based on request metadata. It also validates session token; * Refactor ape service - each handler forms request info and pass necessary fields to checker. Signed-off-by: Airat Arifullin --- cmd/frostfs-node/object.go | 5 +- pkg/services/object/ape/errors.go | 15 + pkg/services/object/ape/metadata.go | 172 ++++++++++++ pkg/services/object/ape/metadata_test.go | 164 +++++++++++ pkg/services/object/ape/service.go | 335 ++++++++++------------- pkg/services/object/ape/types.go | 8 + pkg/services/object/ape/util.go | 169 ++++++++++++ pkg/services/object/ape/util_test.go | 84 ++++++ 8 files changed, 759 insertions(+), 193 deletions(-) create mode 100644 pkg/services/object/ape/metadata.go create mode 100644 pkg/services/object/ape/metadata_test.go create mode 100644 pkg/services/object/ape/util.go create mode 100644 pkg/services/object/ape/util_test.go diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index ad6f4140a..652d3ad04 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -172,7 +172,7 @@ func initObjectService(c *cfg) { splitSvc := createSplitService(c, sPutV2, sGetV2, sSearchV2, sDeleteV2, sPatch) - apeSvc := createAPEService(c, splitSvc) + apeSvc := createAPEService(c, &irFetcher, splitSvc) aclSvc := createACLServiceV2(c, apeSvc, &irFetcher) @@ -439,7 +439,7 @@ func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFe ) } -func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service { +func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service { return objectAPE.NewService( objectAPE.NewChecker( c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(), @@ -451,6 +451,7 @@ func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *object c.cfgObject.cnrSource, c.binPublicKey, ), + objectAPE.NewRequestInfoExtractor(c.log, c.cfgObject.cnrSource, irFetcher, c.netMapSource), splitSvc, ) } diff --git a/pkg/services/object/ape/errors.go b/pkg/services/object/ape/errors.go index 6e458b384..82e660a7f 100644 --- a/pkg/services/object/ape/errors.go +++ b/pkg/services/object/ape/errors.go @@ -7,6 +7,21 @@ import ( apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" ) +var ( + errMissingContainerID = malformedRequestError("missing container ID") + errEmptyVerificationHeader = malformedRequestError("empty verification header") + errEmptyBodySig = malformedRequestError("empty at body signature") + errInvalidSessionSig = malformedRequestError("invalid session token signature") + errInvalidSessionOwner = malformedRequestError("invalid session token owner") + errInvalidVerb = malformedRequestError("session token verb is invalid") +) + +func malformedRequestError(reason string) error { + invalidArgErr := &apistatus.InvalidArgument{} + invalidArgErr.SetMessage(reason) + return invalidArgErr +} + func toStatusErr(err error) error { var chRouterErr *checkercore.ChainRouterError if !errors.As(err, &chRouterErr) { diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go new file mode 100644 index 000000000..b37c3b6f8 --- /dev/null +++ b/pkg/services/object/ape/metadata.go @@ -0,0 +1,172 @@ +package ape + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" +) + +type Metadata struct { + Container cid.ID + Object *oid.ID + MetaHeader *session.RequestMetaHeader + VerificationHeader *session.RequestVerificationHeader + SessionToken *sessionSDK.Object + BearerToken *bearer.Token +} + +func (m Metadata) RequestOwner() (*user.ID, *keys.PublicKey, error) { + if m.VerificationHeader == nil { + return nil, nil, errEmptyVerificationHeader + } + + if m.BearerToken != nil && m.BearerToken.Impersonate() { + return unmarshalPublicKeyWithOwner(m.BearerToken.SigningKeyBytes()) + } + + // if session token is presented, use it as truth source + if m.SessionToken != nil { + // verify signature of session token + return ownerFromToken(m.SessionToken) + } + + // otherwise get original body signature + bodySignature := originalBodySignature(m.VerificationHeader) + if bodySignature == nil { + return nil, nil, errEmptyBodySig + } + + return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) +} + +// RequestInfo contains request information extracted by request metadata. +type RequestInfo struct { + // Role defines under which role this request is executed. + // It must be represented only as a constant represented in native schema. + Role string + + ContainerOwner user.ID + + // Namespace defines to which namespace a container is belonged. + Namespace string + + // HEX-encoded sender key. + SenderKey string +} + +type RequestInfoExtractor interface { + GetRequestInfo(context.Context, Metadata, string) (RequestInfo, error) +} + +type extractor struct { + containers container.Source + + nm netmap.Source + + classifier objectCore.SenderClassifier +} + +func NewRequestInfoExtractor(log *logger.Logger, containers container.Source, irFetcher InnerRingFetcher, nm netmap.Source) RequestInfoExtractor { + return &extractor{ + containers: containers, + nm: nm, + classifier: objectCore.NewSenderClassifier(irFetcher, nm, log), + } +} + +func (e *extractor) verifySessionToken(ctx context.Context, sessionToken *sessionSDK.Object, method string) error { + currentEpoch, err := e.nm.Epoch(ctx) + if err != nil { + return errors.New("can't fetch current epoch") + } + if sessionToken.ExpiredAt(currentEpoch) { + return new(apistatus.SessionTokenExpired) + } + if sessionToken.InvalidAt(currentEpoch) { + return fmt.Errorf("malformed request: token is invalid at %d epoch)", currentEpoch) + } + if !assertVerb(*sessionToken, method) { + return errInvalidVerb + } + return nil +} + +func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method string) (ri RequestInfo, err error) { + cnr, err := e.containers.Get(ctx, m.Container) + if err != nil { + return ri, err + } + + if m.SessionToken != nil { + if err = e.verifySessionToken(ctx, m.SessionToken, method); err != nil { + return ri, err + } + } + + ownerID, ownerKey, err := m.RequestOwner() + if err != nil { + return ri, err + } + res, err := e.classifier.Classify(ctx, ownerID, ownerKey, m.Container, cnr.Value) + if err != nil { + return ri, err + } + + ri.Role = nativeSchemaRole(res.Role) + ri.ContainerOwner = cnr.Value.Owner() + + cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") + if hasNamespace { + ri.Namespace = cnrNamespace + } + + // it is assumed that at the moment the key will be valid, + // otherwise the request would not pass validation + ri.SenderKey = hex.EncodeToString(res.Key) + + return ri, nil +} + +func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) { + var sTok *sessionSDK.Object + + if tokV2 != nil { + sTok = new(sessionSDK.Object) + + err := sTok.ReadFromV2(*tokV2) + if err != nil { + return nil, fmt.Errorf("invalid session token: %w", err) + } + + if sTok.AssertVerb(sessionSDK.VerbObjectDelete) { + // if session relates to object's removal, we don't check + // relation of the tombstone to the session here since user + // can't predict tomb's ID. + err = assertSessionRelation(*sTok, cnr, nil) + } else { + err = assertSessionRelation(*sTok, cnr, obj) + } + + if err != nil { + return nil, err + } + } + + return sTok, nil +} diff --git a/pkg/services/object/ape/metadata_test.go b/pkg/services/object/ape/metadata_test.go new file mode 100644 index 000000000..fd919008f --- /dev/null +++ b/pkg/services/object/ape/metadata_test.go @@ -0,0 +1,164 @@ +package ape + +import ( + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + sigutilV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/signature" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/google/uuid" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +func TestRequestOwner(t *testing.T) { + containerOwner, err := keys.NewPrivateKey() + require.NoError(t, err) + + userPk, err := keys.NewPrivateKey() + require.NoError(t, err) + + var userID user.ID + user.IDFromKey(&userID, userPk.PrivateKey.PublicKey) + + var userSignature refs.Signature + userSignature.SetKey(userPk.PublicKey().Bytes()) + + vh := new(sessionV2.RequestVerificationHeader) + vh.SetBodySignature(&userSignature) + + t.Run("empty verification header", func(t *testing.T) { + req := Metadata{} + checkOwner(t, req, nil, errEmptyVerificationHeader) + }) + t.Run("empty verification header signature", func(t *testing.T) { + req := Metadata{ + VerificationHeader: new(sessionV2.RequestVerificationHeader), + } + checkOwner(t, req, nil, errEmptyBodySig) + }) + t.Run("no tokens", func(t *testing.T) { + req := Metadata{ + VerificationHeader: vh, + } + checkOwner(t, req, userPk.PublicKey(), nil) + }) + + t.Run("bearer without impersonate, no session", func(t *testing.T) { + req := Metadata{ + VerificationHeader: vh, + BearerToken: newBearer(t, containerOwner, userID, false), + } + checkOwner(t, req, userPk.PublicKey(), nil) + }) + t.Run("bearer with impersonate, no session", func(t *testing.T) { + req := Metadata{ + VerificationHeader: vh, + BearerToken: newBearer(t, containerOwner, userID, true), + } + checkOwner(t, req, containerOwner.PublicKey(), nil) + }) + t.Run("bearer with impersonate, with session", func(t *testing.T) { + // To check that bearer token takes priority, use different key to sign session token. + pk, err := keys.NewPrivateKey() + require.NoError(t, err) + + req := Metadata{ + VerificationHeader: vh, + BearerToken: newBearer(t, containerOwner, userID, true), + SessionToken: newSession(t, pk), + } + checkOwner(t, req, containerOwner.PublicKey(), nil) + }) + t.Run("with session", func(t *testing.T) { + req := Metadata{ + VerificationHeader: vh, + SessionToken: newSession(t, containerOwner), + } + checkOwner(t, req, containerOwner.PublicKey(), nil) + }) + t.Run("malformed session token", func(t *testing.T) { + // This test is tricky: session token has issuer field and signature, which must correspond to each other. + // SDK prevents constructing such token in the first place, but it is still possible via API. + // Thus, construct v2 token, convert it to SDK one and pass to our function. + pk, err := keys.NewPrivateKey() + require.NoError(t, err) + + var user1 user.ID + user.IDFromKey(&user1, pk.PrivateKey.PublicKey) + + var id refs.OwnerID + id.SetValue(user1.WalletBytes()) + + raw, err := uuid.New().MarshalBinary() + require.NoError(t, err) + + var cidV2 refs.ContainerID + cidtest.ID().WriteToV2(&cidV2) + + sessionCtx := new(sessionV2.ObjectSessionContext) + sessionCtx.SetTarget(&cidV2) + + var body sessionV2.TokenBody + body.SetOwnerID(&id) + body.SetID(raw) + body.SetLifetime(new(sessionV2.TokenLifetime)) + body.SetSessionKey(pk.PublicKey().Bytes()) + body.SetContext(sessionCtx) + + var tokV2 sessionV2.Token + tokV2.SetBody(&body) + require.NoError(t, sigutilV2.SignData(&containerOwner.PrivateKey, smWrapper{Token: &tokV2})) + require.NoError(t, sigutilV2.VerifyData(smWrapper{Token: &tokV2})) + + var tok sessionSDK.Object + require.NoError(t, tok.ReadFromV2(tokV2)) + + req := Metadata{ + VerificationHeader: vh, + SessionToken: &tok, + } + checkOwner(t, req, nil, errInvalidSessionOwner) + }) +} + +type smWrapper struct { + *sessionV2.Token +} + +func (s smWrapper) ReadSignedData(data []byte) ([]byte, error) { + return s.Token.GetBody().StableMarshal(data), nil +} + +func (s smWrapper) SignedDataSize() int { + return s.Token.GetBody().StableSize() +} + +func newSession(t *testing.T, pk *keys.PrivateKey) *sessionSDK.Object { + var tok sessionSDK.Object + require.NoError(t, tok.Sign(pk.PrivateKey)) + return &tok +} + +func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool) *bearer.Token { + var tok bearer.Token + tok.SetImpersonate(impersonate) + tok.ForUser(user) + require.NoError(t, tok.Sign(pk.PrivateKey)) + return &tok +} + +func checkOwner(t *testing.T, req Metadata, expected *keys.PublicKey, expectedErr error) { + _, actual, err := req.RequestOwner() + if expectedErr != nil { + require.ErrorIs(t, err, expectedErr) + return + } + + require.NoError(t, err) + require.Equal(t, expected, actual) +} diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go index d9594a3fc..e199e2638 100644 --- a/pkg/services/object/ape/service.go +++ b/pkg/services/object/ape/service.go @@ -2,9 +2,6 @@ package ape import ( "context" - "encoding/hex" - "errors" - "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" @@ -12,19 +9,18 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" ) -var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext") - type Service struct { apeChecker Checker + extractor RequestInfoExtractor + next objectSvc.ServiceServer } @@ -64,9 +60,10 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service) } } -func NewService(apeChecker Checker, next objectSvc.ServiceServer) *Service { +func NewService(apeChecker Checker, extractor RequestInfoExtractor, next objectSvc.ServiceServer) *Service { return &Service{ apeChecker: apeChecker, + extractor: extractor, next: next, } } @@ -76,15 +73,9 @@ type getStreamBasicChecker struct { apeChecker Checker - namespace string + metadata Metadata - senderKey []byte - - containerOwner user.ID - - role string - - bearerToken *bearer.Token + reqInfo RequestInfo } func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { @@ -95,15 +86,15 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { } prm := Prm{ - Namespace: g.namespace, + Namespace: g.reqInfo.Namespace, Container: cnrID, Object: objID, Header: partInit.GetHeader(), Method: nativeschema.MethodGetObject, - SenderKey: hex.EncodeToString(g.senderKey), - ContainerOwner: g.containerOwner, - Role: g.role, - BearerToken: g.bearerToken, + SenderKey: g.reqInfo.SenderKey, + ContainerOwner: g.reqInfo.ContainerOwner, + Role: g.reqInfo.Role, + BearerToken: g.metadata.BearerToken, XHeaders: resp.GetMetaHeader().GetXHeaders(), } @@ -114,69 +105,53 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { return g.GetObjectStream.Send(resp) } -func requestContext(ctx context.Context) (*objectSvc.RequestContext, error) { - untyped := ctx.Value(objectSvc.RequestContextKey) - if untyped == nil { - return nil, fmt.Errorf("no key %s in context", objectSvc.RequestContextKey) - } - rc, ok := untyped.(*objectSvc.RequestContext) - if !ok { - return nil, errFailedToCastToRequestContext - } - return rc, nil -} - func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectStream) error { - reqCtx, err := requestContext(stream.Context()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return toStatusErr(err) + return err + } + reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodGetObject) + if err != nil { + return err } - return c.next.Get(request, &getStreamBasicChecker{ GetObjectStream: stream, apeChecker: c.apeChecker, - namespace: reqCtx.Namespace, - senderKey: reqCtx.SenderKey, - containerOwner: reqCtx.ContainerOwner, - role: nativeSchemaRole(reqCtx.Role), - bearerToken: reqCtx.BearerToken, + metadata: md, + reqInfo: reqInfo, }) } type putStreamBasicChecker struct { apeChecker Checker + extractor RequestInfoExtractor + next objectSvc.PutObjectStream } func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - if partInit, ok := request.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { - reqCtx, err := requestContext(ctx) + md, err := newMetadata(request, partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) if err != nil { - return toStatusErr(err) + return err } - - cnrID, objID, err := getAddressParamsSDK(partInit.GetHeader().GetContainerID(), partInit.GetObjectID()) + reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) if err != nil { - return toStatusErr(err) + return err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Header: partInit.GetHeader(), Method: nativeschema.MethodPutObject, - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - Role: nativeSchemaRole(reqCtx.Role), - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + Role: reqInfo.Role, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -196,6 +171,7 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { return &putStreamBasicChecker{ apeChecker: c.apeChecker, + extractor: c.extractor, next: streamer, }, err } @@ -203,40 +179,36 @@ func (c *Service) Put(ctx context.Context) (objectSvc.PutObjectStream, error) { type patchStreamBasicChecker struct { apeChecker Checker + extractor RequestInfoExtractor + next objectSvc.PatchObjectStream nonFirstSend bool } func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - if !p.nonFirstSend { p.nonFirstSend = true - reqCtx, err := requestContext(ctx) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return toStatusErr(err) + return err } - - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + reqInfo, err := p.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPatchObject) if err != nil { - return toStatusErr(err) + return err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Method: nativeschema.MethodPatchObject, - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - Role: nativeSchemaRole(reqCtx.Role), - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + Role: reqInfo.Role, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -256,22 +228,17 @@ func (c *Service) Patch(ctx context.Context) (objectSvc.PatchObjectStream, error return &patchStreamBasicChecker{ apeChecker: c.apeChecker, + extractor: c.extractor, next: streamer, }, err } func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*objectV2.HeadResponse, error) { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHeadObject) if err != nil { return nil, err } @@ -285,7 +252,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj switch headerPart := resp.GetBody().GetHeaderPart().(type) { case *objectV2.ShortHeader: cidV2 := new(refs.ContainerID) - cnrID.WriteToV2(cidV2) + md.Container.WriteToV2(cidV2) header.SetContainerID(cidV2) header.SetVersion(headerPart.GetVersion()) header.SetCreationEpoch(headerPart.GetCreationEpoch()) @@ -301,16 +268,16 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Header: header, Method: nativeschema.MethodHeadObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -319,32 +286,24 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.SearchStream) error { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - var cnrID cid.ID - if cnrV2 := request.GetBody().GetContainerID(); cnrV2 != nil { - if err := cnrID.ReadFromV2(*cnrV2); err != nil { - return toStatusErr(err) - } - } - - reqCtx, err := requestContext(stream.Context()) + md, err := newMetadata(request, request.GetBody().GetContainerID(), nil) if err != nil { - return toStatusErr(err) + return err + } + reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodSearchObject) + if err != nil { + return err } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, + Namespace: reqInfo.Namespace, + Container: md.Container, Method: nativeschema.MethodSearchObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -354,31 +313,25 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc } func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) (*objectV2.DeleteResponse, error) { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodDeleteObject) if err != nil { return nil, err } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Method: nativeschema.MethodDeleteObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -393,31 +346,25 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) ( } func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.GetObjectRangeStream) error { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { - return toStatusErr(err) + return err } - - reqCtx, err := requestContext(stream.Context()) + reqInfo, err := c.extractor.GetRequestInfo(stream.Context(), md, nativeschema.MethodRangeObject) if err != nil { - return toStatusErr(err) + return err } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Method: nativeschema.MethodRangeObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -427,31 +374,25 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G } func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHashRequest) (*objectV2.GetRangeHashResponse, error) { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetAddress().GetContainerID(), request.GetBody().GetAddress().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodHashObject) if err != nil { return nil, err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Method: nativeschema.MethodHashObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } resp, err := c.next.GetRangeHash(ctx, request) @@ -466,32 +407,26 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa } func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { - meta := request.GetMetaHeader() - for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { - meta = origin - } - - cnrID, objID, err := getAddressParamsSDK(request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) + md, err := newMetadata(request, request.GetBody().GetObject().GetHeader().GetContainerID(), request.GetBody().GetObject().GetObjectID()) if err != nil { return nil, err } - - reqCtx, err := requestContext(ctx) + reqInfo, err := c.extractor.GetRequestInfo(ctx, md, nativeschema.MethodPutObject) if err != nil { return nil, err } prm := Prm{ - Namespace: reqCtx.Namespace, - Container: cnrID, - Object: objID, + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, Header: request.GetBody().GetObject().GetHeader(), Method: nativeschema.MethodPutObject, - Role: nativeSchemaRole(reqCtx.Role), - SenderKey: hex.EncodeToString(reqCtx.SenderKey), - ContainerOwner: reqCtx.ContainerOwner, - BearerToken: reqCtx.BearerToken, - XHeaders: meta.GetXHeaders(), + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err = c.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -501,18 +436,36 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ return c.next.PutSingle(ctx, request) } -func getAddressParamsSDK(cidV2 *refs.ContainerID, objV2 *refs.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) { - if cidV2 != nil { - if err = cnrID.ReadFromV2(*cidV2); err != nil { - return - } +type request interface { + GetMetaHeader() *session.RequestMetaHeader + GetVerificationHeader() *session.RequestVerificationHeader +} + +func newMetadata(request request, cnrV2 *refs.ContainerID, objV2 *refs.ObjectID) (md Metadata, err error) { + meta := request.GetMetaHeader() + for origin := meta.GetOrigin(); origin != nil; origin = meta.GetOrigin() { + meta = origin } - if objV2 != nil { - objID = new(oid.ID) - if err = objID.ReadFromV2(*objV2); err != nil { - return - } + cnrID, objID, err := getAddressParamsSDK(cnrV2, objV2) + if err != nil { + return + } + session, err := readSessionToken(cnrID, objID, meta.GetSessionToken()) + if err != nil { + return + } + bearer, err := originalBearerToken(request.GetMetaHeader()) + if err != nil { + return + } + + md = Metadata{ + Container: cnrID, + Object: objID, + VerificationHeader: request.GetVerificationHeader(), + SessionToken: session, + BearerToken: bearer, } return } diff --git a/pkg/services/object/ape/types.go b/pkg/services/object/ape/types.go index 46e55360d..97dbfa658 100644 --- a/pkg/services/object/ape/types.go +++ b/pkg/services/object/ape/types.go @@ -7,3 +7,11 @@ import "context" type Checker interface { CheckAPE(context.Context, Prm) error } + +// InnerRingFetcher is an interface that must provide +// Inner Ring information. +type InnerRingFetcher interface { + // InnerRingKeys must return list of public keys of + // the actual inner ring. + InnerRingKeys(ctx context.Context) ([][]byte, error) +} diff --git a/pkg/services/object/ape/util.go b/pkg/services/object/ape/util.go new file mode 100644 index 000000000..5cd2caa50 --- /dev/null +++ b/pkg/services/object/ape/util.go @@ -0,0 +1,169 @@ +package ape + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "errors" + "fmt" + + refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" + sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" +) + +func getAddressParamsSDK(cidV2 *refsV2.ContainerID, objV2 *refsV2.ObjectID) (cnrID cid.ID, objID *oid.ID, err error) { + if cidV2 != nil { + if err = cnrID.ReadFromV2(*cidV2); err != nil { + return + } + } else { + err = errMissingContainerID + return + } + + if objV2 != nil { + objID = new(oid.ID) + if err = objID.ReadFromV2(*objV2); err != nil { + return + } + } + return +} + +// originalBearerToken goes down to original request meta header and fetches +// bearer token from there. +func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, error) { + for header.GetOrigin() != nil { + header = header.GetOrigin() + } + + tokV2 := header.GetBearerToken() + if tokV2 == nil { + return nil, nil + } + + var tok bearer.Token + return &tok, tok.ReadFromV2(*tokV2) +} + +func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) { + // 1. First check signature of session token. + if !token.VerifySignature() { + return nil, nil, errInvalidSessionSig + } + + // 2. Then check if session token owner issued the session token + // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion + var tokV2 sessionV2.Token + token.WriteToV2(&tokV2) + + tokenIssuerKey, err := unmarshalPublicKey(tokV2.GetSignature().GetKey()) + if err != nil { + return nil, nil, fmt.Errorf("invalid key in session token signature: %w", err) + } + + tokenIssuer := token.Issuer() + + if !isOwnerFromKey(tokenIssuer, tokenIssuerKey) { + // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again + return nil, nil, errInvalidSessionOwner + } + + return &tokenIssuer, tokenIssuerKey, nil +} + +func originalBodySignature(v *sessionV2.RequestVerificationHeader) *refsV2.Signature { + if v == nil { + return nil + } + + for v.GetOrigin() != nil { + v = v.GetOrigin() + } + + return v.GetBodySignature() +} + +func unmarshalPublicKey(bs []byte) (*keys.PublicKey, error) { + return keys.NewPublicKeyFromBytes(bs, elliptic.P256()) +} + +func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { + if key == nil { + return false + } + + var id2 user.ID + user.IDFromKey(&id2, (ecdsa.PublicKey)(*key)) + + return id2.Equals(id) +} + +// assertVerb checks that token verb corresponds to the method. +func assertVerb(tok sessionSDK.Object, method string) bool { + switch method { + case nativeschema.MethodPutObject: + return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch) + case nativeschema.MethodDeleteObject: + return tok.AssertVerb(sessionSDK.VerbObjectDelete) + case nativeschema.MethodGetObject: + return tok.AssertVerb(sessionSDK.VerbObjectGet) + case nativeschema.MethodHeadObject: + return tok.AssertVerb( + sessionSDK.VerbObjectHead, + sessionSDK.VerbObjectGet, + sessionSDK.VerbObjectDelete, + sessionSDK.VerbObjectRange, + sessionSDK.VerbObjectRangeHash, + sessionSDK.VerbObjectPatch, + ) + case nativeschema.MethodSearchObject: + return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete) + case nativeschema.MethodRangeObject: + return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch) + case nativeschema.MethodHashObject: + return tok.AssertVerb(sessionSDK.VerbObjectRangeHash) + case nativeschema.MethodPatchObject: + return tok.AssertVerb(sessionSDK.VerbObjectPatch) + } + return false +} + +// assertSessionRelation checks if given token describing the FrostFS session +// relates to the given container and optional object. Missing object +// means that the context isn't bound to any FrostFS object in the container. +// Returns no error iff relation is correct. Criteria: +// +// session is bound to the given container +// object is not specified or session is bound to this object +// +// Session MUST be bound to the particular container, otherwise behavior is undefined. +func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error { + if !tok.AssertContainer(cnr) { + return errors.New("requested container is not related to the session") + } + + if obj != nil && !tok.AssertObject(*obj) { + return errors.New("requested object is not related to the session") + } + + return nil +} + +func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { + key, err := unmarshalPublicKey(rawKey) + if err != nil { + return nil, nil, fmt.Errorf("invalid signature key: %w", err) + } + + var idSender user.ID + user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key)) + + return &idSender, key, nil +} diff --git a/pkg/services/object/ape/util_test.go b/pkg/services/object/ape/util_test.go new file mode 100644 index 000000000..916bce427 --- /dev/null +++ b/pkg/services/object/ape/util_test.go @@ -0,0 +1,84 @@ +package ape + +import ( + "slices" + "testing" + + cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" + sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" + nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" + "github.com/stretchr/testify/require" +) + +func TestIsVerbCompatible(t *testing.T) { + table := map[string][]sessionSDK.ObjectVerb{ + nativeschema.MethodPutObject: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch}, + nativeschema.MethodDeleteObject: {sessionSDK.VerbObjectDelete}, + nativeschema.MethodGetObject: {sessionSDK.VerbObjectGet}, + nativeschema.MethodHeadObject: { + sessionSDK.VerbObjectHead, + sessionSDK.VerbObjectGet, + sessionSDK.VerbObjectDelete, + sessionSDK.VerbObjectRange, + sessionSDK.VerbObjectRangeHash, + sessionSDK.VerbObjectPatch, + }, + nativeschema.MethodRangeObject: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch}, + nativeschema.MethodHashObject: {sessionSDK.VerbObjectRangeHash}, + nativeschema.MethodSearchObject: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete}, + nativeschema.MethodPatchObject: {sessionSDK.VerbObjectPatch}, + } + + verbs := []sessionSDK.ObjectVerb{ + sessionSDK.VerbObjectPut, + sessionSDK.VerbObjectDelete, + sessionSDK.VerbObjectHead, + sessionSDK.VerbObjectRange, + sessionSDK.VerbObjectRangeHash, + sessionSDK.VerbObjectGet, + sessionSDK.VerbObjectSearch, + sessionSDK.VerbObjectPatch, + } + + var tok sessionSDK.Object + + for op, list := range table { + for _, verb := range verbs { + contains := slices.Contains(list, verb) + + tok.ForVerb(verb) + + require.Equal(t, contains, assertVerb(tok, op), + "%v in token, %s executing", verb, op) + } + } +} + +func TestAssertSessionRelation(t *testing.T) { + var tok sessionSDK.Object + cnr := cidtest.ID() + cnrOther := cidtest.ID() + obj := oidtest.ID() + objOther := oidtest.ID() + + // make sure ids differ, otherwise test won't work correctly + require.False(t, cnrOther.Equals(cnr)) + require.False(t, objOther.Equals(obj)) + + // bind session to the container (required) + tok.BindContainer(cnr) + + // test container-global session + require.NoError(t, assertSessionRelation(tok, cnr, nil)) + require.NoError(t, assertSessionRelation(tok, cnr, &obj)) + require.Error(t, assertSessionRelation(tok, cnrOther, nil)) + require.Error(t, assertSessionRelation(tok, cnrOther, &obj)) + + // limit the session to the particular object + tok.LimitByObjects(obj) + + // test fixed object session (here obj arg must be non-nil everywhere) + require.NoError(t, assertSessionRelation(tok, cnr, &obj)) + require.Error(t, assertSessionRelation(tok, cnr, &objOther)) +} From ccdd6cb767d728ce85e6da62b6b0fc9c760f69d8 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Wed, 19 Mar 2025 14:46:33 +0300 Subject: [PATCH 461/591] [#1052] object: Nuke out `acl` middleware * Remove `acl` package as it's no longer used; * Remove `RequestContext`; * Fix `cmd/frostfs-node`. Signed-off-by: Airat Arifullin --- cmd/frostfs-node/object.go | 17 +- pkg/services/object/acl/eacl/v2/eacl_test.go | 166 ---- pkg/services/object/acl/eacl/v2/headers.go | 246 ------ pkg/services/object/acl/eacl/v2/object.go | 92 --- pkg/services/object/acl/eacl/v2/opts.go | 11 - pkg/services/object/acl/eacl/v2/xheader.go | 69 -- pkg/services/object/acl/v2/errors.go | 20 - pkg/services/object/acl/v2/opts.go | 12 - pkg/services/object/acl/v2/request.go | 152 ---- pkg/services/object/acl/v2/request_test.go | 164 ---- pkg/services/object/acl/v2/service.go | 779 ------------------- pkg/services/object/acl/v2/types.go | 11 - pkg/services/object/acl/v2/util.go | 223 ------ pkg/services/object/acl/v2/util_test.go | 131 ---- pkg/services/object/request_context.go | 24 - 15 files changed, 2 insertions(+), 2115 deletions(-) delete mode 100644 pkg/services/object/acl/eacl/v2/eacl_test.go delete mode 100644 pkg/services/object/acl/eacl/v2/headers.go delete mode 100644 pkg/services/object/acl/eacl/v2/object.go delete mode 100644 pkg/services/object/acl/eacl/v2/opts.go delete mode 100644 pkg/services/object/acl/eacl/v2/xheader.go delete mode 100644 pkg/services/object/acl/v2/errors.go delete mode 100644 pkg/services/object/acl/v2/opts.go delete mode 100644 pkg/services/object/acl/v2/request.go delete mode 100644 pkg/services/object/acl/v2/request_test.go delete mode 100644 pkg/services/object/acl/v2/service.go delete mode 100644 pkg/services/object/acl/v2/types.go delete mode 100644 pkg/services/object/acl/v2/util.go delete mode 100644 pkg/services/object/acl/v2/util_test.go delete mode 100644 pkg/services/object/request_context.go diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 652d3ad04..2674be8c7 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -16,7 +16,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" objectTransportGRPC "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/transport/object/grpc" objectService "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2" objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete" @@ -174,10 +173,8 @@ func initObjectService(c *cfg) { apeSvc := createAPEService(c, &irFetcher, splitSvc) - aclSvc := createACLServiceV2(c, apeSvc, &irFetcher) - var commonSvc objectService.Common - commonSvc.Init(&c.internals, aclSvc) + commonSvc.Init(&c.internals, apeSvc) respSvc := objectService.NewResponseService( &commonSvc, @@ -284,7 +281,7 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl }) } -func createInnerRingFetcher(c *cfg) v2.InnerRingFetcher { +func createInnerRingFetcher(c *cfg) objectAPE.InnerRingFetcher { return &innerRingFetcherWithNotary{ sidechain: c.cfgMorph.client, } @@ -429,16 +426,6 @@ func createSplitService(c *cfg, sPutV2 *putsvcV2.Service, sGetV2 *getsvcV2.Servi ) } -func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFetcher) v2.Service { - return v2.New( - apeSvc, - c.netMapSource, - irFetcher, - c.cfgObject.cnrSource, - v2.WithLogger(c.log), - ) -} - func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectService.TransportSplitter) *objectAPE.Service { return objectAPE.NewService( objectAPE.NewChecker( diff --git a/pkg/services/object/acl/eacl/v2/eacl_test.go b/pkg/services/object/acl/eacl/v2/eacl_test.go deleted file mode 100644 index 94e015abe..000000000 --- a/pkg/services/object/acl/eacl/v2/eacl_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package v2 - -import ( - "context" - "crypto/ecdsa" - "errors" - "testing" - - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -type testLocalStorage struct { - t *testing.T - - expAddr oid.Address - - obj *objectSDK.Object - - err error -} - -func (s *testLocalStorage) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) { - require.True(s.t, addr.Container().Equals(s.expAddr.Container())) - require.True(s.t, addr.Object().Equals(s.expAddr.Object())) - - return s.obj, s.err -} - -func testXHeaders(strs ...string) []session.XHeader { - res := make([]session.XHeader, len(strs)/2) - - for i := 0; i < len(strs); i += 2 { - res[i/2].SetKey(strs[i]) - res[i/2].SetValue(strs[i+1]) - } - - return res -} - -func TestHeadRequest(t *testing.T) { - req := new(objectV2.HeadRequest) - - meta := new(session.RequestMetaHeader) - req.SetMetaHeader(meta) - - body := new(objectV2.HeadRequestBody) - req.SetBody(body) - - addr := oidtest.Address() - - var addrV2 refs.Address - addr.WriteToV2(&addrV2) - - body.SetAddress(&addrV2) - - xKey := "x-key" - xVal := "x-val" - xHdrs := testXHeaders( - xKey, xVal, - ) - - meta.SetXHeaders(xHdrs) - - obj := objectSDK.New() - - attrKey := "attr_key" - attrVal := "attr_val" - var attr objectSDK.Attribute - attr.SetKey(attrKey) - attr.SetValue(attrVal) - obj.SetAttributes(attr) - - table := new(eaclSDK.Table) - - priv, err := keys.NewPrivateKey() - require.NoError(t, err) - senderKey := priv.PublicKey() - - r := eaclSDK.NewRecord() - r.SetOperation(eaclSDK.OperationHead) - r.SetAction(eaclSDK.ActionDeny) - r.AddFilter(eaclSDK.HeaderFromObject, eaclSDK.MatchStringEqual, attrKey, attrVal) - r.AddFilter(eaclSDK.HeaderFromRequest, eaclSDK.MatchStringEqual, xKey, xVal) - eaclSDK.AddFormedTarget(r, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey)) - - table.AddRecord(r) - - lStorage := &testLocalStorage{ - t: t, - expAddr: addr, - obj: obj, - } - - id := addr.Object() - - newSource := func(t *testing.T) eaclSDK.TypedHeaderSource { - hdrSrc, err := NewMessageHeaderSource( - lStorage, - NewRequestXHeaderSource(req), - addr.Container(), - WithOID(&id)) - require.NoError(t, err) - return hdrSrc - } - - cnr := addr.Container() - - unit := new(eaclSDK.ValidationUnit). - WithContainerID(&cnr). - WithOperation(eaclSDK.OperationHead). - WithSenderKey(senderKey.Bytes()). - WithEACLTable(table) - - validator := eaclSDK.NewValidator() - - checkAction(t, eaclSDK.ActionDeny, validator, unit.WithHeaderSource(newSource(t))) - - meta.SetXHeaders(nil) - - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) - - meta.SetXHeaders(xHdrs) - - obj.SetAttributes() - - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) - - lStorage.err = errors.New("any error") - - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) - - r.SetAction(eaclSDK.ActionAllow) - - rID := eaclSDK.NewRecord() - rID.SetOperation(eaclSDK.OperationHead) - rID.SetAction(eaclSDK.ActionDeny) - rID.AddObjectIDFilter(eaclSDK.MatchStringEqual, addr.Object()) - eaclSDK.AddFormedTarget(rID, eaclSDK.RoleUnknown, (ecdsa.PublicKey)(*senderKey)) - - table = eaclSDK.NewTable() - table.AddRecord(r) - table.AddRecord(rID) - - unit.WithEACLTable(table) - checkDefaultAction(t, validator, unit.WithHeaderSource(newSource(t))) -} - -func checkAction(t *testing.T, expected eaclSDK.Action, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) { - actual, fromRule := v.CalculateAction(u) - require.True(t, fromRule) - require.Equal(t, expected, actual) -} - -func checkDefaultAction(t *testing.T, v *eaclSDK.Validator, u *eaclSDK.ValidationUnit) { - actual, fromRule := v.CalculateAction(u) - require.False(t, fromRule) - require.Equal(t, eaclSDK.ActionAllow, actual) -} diff --git a/pkg/services/object/acl/eacl/v2/headers.go b/pkg/services/object/acl/eacl/v2/headers.go deleted file mode 100644 index ecb793df8..000000000 --- a/pkg/services/object/acl/eacl/v2/headers.go +++ /dev/null @@ -1,246 +0,0 @@ -package v2 - -import ( - "context" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -type Option func(*cfg) - -type cfg struct { - storage ObjectStorage - - msg XHeaderSource - - cnr cid.ID - obj *oid.ID -} - -type ObjectStorage interface { - Head(context.Context, oid.Address) (*objectSDK.Object, error) -} - -type Request interface { - GetMetaHeader() *session.RequestMetaHeader -} - -type Response interface { - GetMetaHeader() *session.ResponseMetaHeader -} - -type headerSource struct { - requestHeaders []eaclSDK.Header - objectHeaders []eaclSDK.Header - - incompleteObjectHeaders bool -} - -func NewMessageHeaderSource(os ObjectStorage, xhs XHeaderSource, cnrID cid.ID, opts ...Option) (eaclSDK.TypedHeaderSource, error) { - cfg := &cfg{ - storage: os, - cnr: cnrID, - msg: xhs, - } - - for i := range opts { - opts[i](cfg) - } - - if cfg.msg == nil { - return nil, errors.New("message is not provided") - } - - var res headerSource - - err := cfg.readObjectHeaders(&res) - if err != nil { - return nil, err - } - - res.requestHeaders = cfg.msg.GetXHeaders() - - return res, nil -} - -func (h headerSource) HeadersOfType(typ eaclSDK.FilterHeaderType) ([]eaclSDK.Header, bool) { - switch typ { - default: - return nil, true - case eaclSDK.HeaderFromRequest: - return h.requestHeaders, true - case eaclSDK.HeaderFromObject: - return h.objectHeaders, !h.incompleteObjectHeaders - } -} - -type xHeader session.XHeader - -func (x xHeader) Key() string { - return (*session.XHeader)(&x).GetKey() -} - -func (x xHeader) Value() string { - return (*session.XHeader)(&x).GetValue() -} - -var errMissingOID = errors.New("object ID is missing") - -func (h *cfg) readObjectHeaders(dst *headerSource) error { - switch m := h.msg.(type) { - default: - panic(fmt.Sprintf("unexpected message type %T", h.msg)) - case requestXHeaderSource: - return h.readObjectHeadersFromRequestXHeaderSource(m, dst) - case responseXHeaderSource: - return h.readObjectHeadersResponseXHeaderSource(m, dst) - } -} - -func (h *cfg) readObjectHeadersFromRequestXHeaderSource(m requestXHeaderSource, dst *headerSource) error { - switch req := m.req.(type) { - case - *objectV2.GetRequest, - *objectV2.HeadRequest: - if h.obj == nil { - return errMissingOID - } - - objHeaders, completed := h.localObjectHeaders(h.cnr, h.obj) - - dst.objectHeaders = objHeaders - dst.incompleteObjectHeaders = !completed - case - *objectV2.GetRangeRequest, - *objectV2.GetRangeHashRequest, - *objectV2.DeleteRequest: - if h.obj == nil { - return errMissingOID - } - - dst.objectHeaders = addressHeaders(h.cnr, h.obj) - case *objectV2.PutRequest: - if v, ok := req.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit); ok { - oV2 := new(objectV2.Object) - oV2.SetObjectID(v.GetObjectID()) - oV2.SetHeader(v.GetHeader()) - - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) - } - case *objectV2.PutSingleRequest: - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(req.GetBody().GetObject()), h.cnr, h.obj) - case *objectV2.SearchRequest: - cnrV2 := req.GetBody().GetContainerID() - var cnr cid.ID - - if cnrV2 != nil { - if err := cnr.ReadFromV2(*cnrV2); err != nil { - return fmt.Errorf("can't parse container ID: %w", err) - } - } - - dst.objectHeaders = []eaclSDK.Header{cidHeader(cnr)} - } - return nil -} - -func (h *cfg) readObjectHeadersResponseXHeaderSource(m responseXHeaderSource, dst *headerSource) error { - switch resp := m.resp.(type) { - default: - objectHeaders, completed := h.localObjectHeaders(h.cnr, h.obj) - - dst.objectHeaders = objectHeaders - dst.incompleteObjectHeaders = !completed - case *objectV2.GetResponse: - if v, ok := resp.GetBody().GetObjectPart().(*objectV2.GetObjectPartInit); ok { - oV2 := new(objectV2.Object) - oV2.SetObjectID(v.GetObjectID()) - oV2.SetHeader(v.GetHeader()) - - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) - } - case *objectV2.HeadResponse: - oV2 := new(objectV2.Object) - - var hdr *objectV2.Header - - switch v := resp.GetBody().GetHeaderPart().(type) { - case *objectV2.ShortHeader: - hdr = new(objectV2.Header) - - var idV2 refsV2.ContainerID - h.cnr.WriteToV2(&idV2) - - hdr.SetContainerID(&idV2) - hdr.SetVersion(v.GetVersion()) - hdr.SetCreationEpoch(v.GetCreationEpoch()) - hdr.SetOwnerID(v.GetOwnerID()) - hdr.SetObjectType(v.GetObjectType()) - hdr.SetPayloadLength(v.GetPayloadLength()) - case *objectV2.HeaderWithSignature: - hdr = v.GetHeader() - } - - oV2.SetHeader(hdr) - - dst.objectHeaders = headersFromObject(objectSDK.NewFromV2(oV2), h.cnr, h.obj) - } - return nil -} - -func (h *cfg) localObjectHeaders(cnr cid.ID, idObj *oid.ID) ([]eaclSDK.Header, bool) { - if idObj != nil { - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(*idObj) - - obj, err := h.storage.Head(context.TODO(), addr) - if err == nil { - return headersFromObject(obj, cnr, idObj), true - } - } - - return addressHeaders(cnr, idObj), false -} - -func cidHeader(idCnr cid.ID) sysObjHdr { - return sysObjHdr{ - k: acl.FilterObjectContainerID, - v: idCnr.EncodeToString(), - } -} - -func oidHeader(obj oid.ID) sysObjHdr { - return sysObjHdr{ - k: acl.FilterObjectID, - v: obj.EncodeToString(), - } -} - -func ownerIDHeader(ownerID user.ID) sysObjHdr { - return sysObjHdr{ - k: acl.FilterObjectOwnerID, - v: ownerID.EncodeToString(), - } -} - -func addressHeaders(cnr cid.ID, oid *oid.ID) []eaclSDK.Header { - hh := make([]eaclSDK.Header, 0, 2) - hh = append(hh, cidHeader(cnr)) - - if oid != nil { - hh = append(hh, oidHeader(*oid)) - } - - return hh -} diff --git a/pkg/services/object/acl/eacl/v2/object.go b/pkg/services/object/acl/eacl/v2/object.go deleted file mode 100644 index 92570a3c5..000000000 --- a/pkg/services/object/acl/eacl/v2/object.go +++ /dev/null @@ -1,92 +0,0 @@ -package v2 - -import ( - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -type sysObjHdr struct { - k, v string -} - -func (s sysObjHdr) Key() string { - return s.k -} - -func (s sysObjHdr) Value() string { - return s.v -} - -func u64Value(v uint64) string { - return strconv.FormatUint(v, 10) -} - -func headersFromObject(obj *objectSDK.Object, cnr cid.ID, oid *oid.ID) []eaclSDK.Header { - var count int - for obj := obj; obj != nil; obj = obj.Parent() { - count += 9 + len(obj.Attributes()) - } - - res := make([]eaclSDK.Header, 0, count) - for ; obj != nil; obj = obj.Parent() { - res = append(res, - cidHeader(cnr), - // creation epoch - sysObjHdr{ - k: acl.FilterObjectCreationEpoch, - v: u64Value(obj.CreationEpoch()), - }, - // payload size - sysObjHdr{ - k: acl.FilterObjectPayloadLength, - v: u64Value(obj.PayloadSize()), - }, - // object version - sysObjHdr{ - k: acl.FilterObjectVersion, - v: obj.Version().String(), - }, - // object type - sysObjHdr{ - k: acl.FilterObjectType, - v: obj.Type().String(), - }, - ) - - if oid != nil { - res = append(res, oidHeader(*oid)) - } - - if idOwner := obj.OwnerID(); !idOwner.IsEmpty() { - res = append(res, ownerIDHeader(idOwner)) - } - - cs, ok := obj.PayloadChecksum() - if ok { - res = append(res, sysObjHdr{ - k: acl.FilterObjectPayloadHash, - v: cs.String(), - }) - } - - cs, ok = obj.PayloadHomomorphicHash() - if ok { - res = append(res, sysObjHdr{ - k: acl.FilterObjectHomomorphicHash, - v: cs.String(), - }) - } - - attrs := obj.Attributes() - for i := range attrs { - res = append(res, &attrs[i]) // only pointer attrs can implement eaclSDK.Header interface - } - } - - return res -} diff --git a/pkg/services/object/acl/eacl/v2/opts.go b/pkg/services/object/acl/eacl/v2/opts.go deleted file mode 100644 index d91a21c75..000000000 --- a/pkg/services/object/acl/eacl/v2/opts.go +++ /dev/null @@ -1,11 +0,0 @@ -package v2 - -import ( - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -func WithOID(v *oid.ID) Option { - return func(c *cfg) { - c.obj = v - } -} diff --git a/pkg/services/object/acl/eacl/v2/xheader.go b/pkg/services/object/acl/eacl/v2/xheader.go deleted file mode 100644 index ce380c117..000000000 --- a/pkg/services/object/acl/eacl/v2/xheader.go +++ /dev/null @@ -1,69 +0,0 @@ -package v2 - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" -) - -type XHeaderSource interface { - GetXHeaders() []eaclSDK.Header -} - -type requestXHeaderSource struct { - req Request -} - -func NewRequestXHeaderSource(req Request) XHeaderSource { - return requestXHeaderSource{req: req} -} - -type responseXHeaderSource struct { - resp Response - - req Request -} - -func NewResponseXHeaderSource(resp Response, req Request) XHeaderSource { - return responseXHeaderSource{resp: resp, req: req} -} - -func (s requestXHeaderSource) GetXHeaders() []eaclSDK.Header { - ln := 0 - - for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { - ln += len(meta.GetXHeaders()) - } - - res := make([]eaclSDK.Header, 0, ln) - for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { - x := meta.GetXHeaders() - for i := range x { - res = append(res, (xHeader)(x[i])) - } - } - - return res -} - -func (s responseXHeaderSource) GetXHeaders() []eaclSDK.Header { - ln := 0 - xHdrs := make([][]session.XHeader, 0) - - for meta := s.req.GetMetaHeader(); meta != nil; meta = meta.GetOrigin() { - x := meta.GetXHeaders() - - ln += len(x) - - xHdrs = append(xHdrs, x) - } - - res := make([]eaclSDK.Header, 0, ln) - - for i := range xHdrs { - for j := range xHdrs[i] { - res = append(res, xHeader(xHdrs[i][j])) - } - } - - return res -} diff --git a/pkg/services/object/acl/v2/errors.go b/pkg/services/object/acl/v2/errors.go deleted file mode 100644 index cd2de174a..000000000 --- a/pkg/services/object/acl/v2/errors.go +++ /dev/null @@ -1,20 +0,0 @@ -package v2 - -import ( - "fmt" -) - -const invalidRequestMessage = "malformed request" - -func malformedRequestError(reason string) error { - return fmt.Errorf("%s: %s", invalidRequestMessage, reason) -} - -var ( - errEmptyBody = malformedRequestError("empty body") - errEmptyVerificationHeader = malformedRequestError("empty verification header") - errEmptyBodySig = malformedRequestError("empty at body signature") - errInvalidSessionSig = malformedRequestError("invalid session token signature") - errInvalidSessionOwner = malformedRequestError("invalid session token owner") - errInvalidVerb = malformedRequestError("session token verb is invalid") -) diff --git a/pkg/services/object/acl/v2/opts.go b/pkg/services/object/acl/v2/opts.go deleted file mode 100644 index 15fcce884..000000000 --- a/pkg/services/object/acl/v2/opts.go +++ /dev/null @@ -1,12 +0,0 @@ -package v2 - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" -) - -// WithLogger returns option to set logger. -func WithLogger(v *logger.Logger) Option { - return func(c *cfg) { - c.log = v - } -} diff --git a/pkg/services/object/acl/v2/request.go b/pkg/services/object/acl/v2/request.go deleted file mode 100644 index 8bd34ccb3..000000000 --- a/pkg/services/object/acl/v2/request.go +++ /dev/null @@ -1,152 +0,0 @@ -package v2 - -import ( - "crypto/ecdsa" - "fmt" - - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -// RequestInfo groups parsed version-independent (from SDK library) -// request information and raw API request. -type RequestInfo struct { - basicACL acl.Basic - requestRole acl.Role - operation acl.Op // put, get, head, etc. - cnrOwner user.ID // container owner - - // cnrNamespace defined to which namespace a container is belonged. - cnrNamespace string - - idCnr cid.ID - - // optional for some request - // e.g. Put, Search - obj *oid.ID - - senderKey []byte - - bearer *bearer.Token // bearer token of request - - srcRequest any -} - -func (r *RequestInfo) SetBasicACL(basicACL acl.Basic) { - r.basicACL = basicACL -} - -func (r *RequestInfo) SetRequestRole(requestRole acl.Role) { - r.requestRole = requestRole -} - -func (r *RequestInfo) SetSenderKey(senderKey []byte) { - r.senderKey = senderKey -} - -// Request returns raw API request. -func (r RequestInfo) Request() any { - return r.srcRequest -} - -// ContainerOwner returns owner if the container. -func (r RequestInfo) ContainerOwner() user.ID { - return r.cnrOwner -} - -func (r RequestInfo) ContainerNamespace() string { - return r.cnrNamespace -} - -// ObjectID return object ID. -func (r RequestInfo) ObjectID() *oid.ID { - return r.obj -} - -// ContainerID return container ID. -func (r RequestInfo) ContainerID() cid.ID { - return r.idCnr -} - -// CleanBearer forces cleaning bearer token information. -func (r *RequestInfo) CleanBearer() { - r.bearer = nil -} - -// Bearer returns bearer token of the request. -func (r RequestInfo) Bearer() *bearer.Token { - return r.bearer -} - -// BasicACL returns basic ACL of the container. -func (r RequestInfo) BasicACL() acl.Basic { - return r.basicACL -} - -// SenderKey returns public key of the request's sender. -func (r RequestInfo) SenderKey() []byte { - return r.senderKey -} - -// Operation returns request's operation. -func (r RequestInfo) Operation() acl.Op { - return r.operation -} - -// RequestRole returns request sender's role. -func (r RequestInfo) RequestRole() acl.Role { - return r.requestRole -} - -// MetaWithToken groups session and bearer tokens, -// verification header and raw API request. -type MetaWithToken struct { - vheader *sessionV2.RequestVerificationHeader - token *sessionSDK.Object - bearer *bearer.Token - src any -} - -// RequestOwner returns ownerID and its public key -// according to internal meta information. -func (r MetaWithToken) RequestOwner() (*user.ID, *keys.PublicKey, error) { - if r.vheader == nil { - return nil, nil, errEmptyVerificationHeader - } - - if r.bearer != nil && r.bearer.Impersonate() { - return unmarshalPublicKeyWithOwner(r.bearer.SigningKeyBytes()) - } - - // if session token is presented, use it as truth source - if r.token != nil { - // verify signature of session token - return ownerFromToken(r.token) - } - - // otherwise get original body signature - bodySignature := originalBodySignature(r.vheader) - if bodySignature == nil { - return nil, nil, errEmptyBodySig - } - - return unmarshalPublicKeyWithOwner(bodySignature.GetKey()) -} - -func unmarshalPublicKeyWithOwner(rawKey []byte) (*user.ID, *keys.PublicKey, error) { - key, err := unmarshalPublicKey(rawKey) - if err != nil { - return nil, nil, fmt.Errorf("invalid signature key: %w", err) - } - - var idSender user.ID - user.IDFromKey(&idSender, (ecdsa.PublicKey)(*key)) - - return &idSender, key, nil -} diff --git a/pkg/services/object/acl/v2/request_test.go b/pkg/services/object/acl/v2/request_test.go deleted file mode 100644 index 618af3469..000000000 --- a/pkg/services/object/acl/v2/request_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package v2 - -import ( - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - sigutilV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/util/signature" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/google/uuid" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" -) - -func TestRequestOwner(t *testing.T) { - containerOwner, err := keys.NewPrivateKey() - require.NoError(t, err) - - userPk, err := keys.NewPrivateKey() - require.NoError(t, err) - - var userID user.ID - user.IDFromKey(&userID, userPk.PrivateKey.PublicKey) - - var userSignature refs.Signature - userSignature.SetKey(userPk.PublicKey().Bytes()) - - vh := new(sessionV2.RequestVerificationHeader) - vh.SetBodySignature(&userSignature) - - t.Run("empty verification header", func(t *testing.T) { - req := MetaWithToken{} - checkOwner(t, req, nil, errEmptyVerificationHeader) - }) - t.Run("empty verification header signature", func(t *testing.T) { - req := MetaWithToken{ - vheader: new(sessionV2.RequestVerificationHeader), - } - checkOwner(t, req, nil, errEmptyBodySig) - }) - t.Run("no tokens", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, - } - checkOwner(t, req, userPk.PublicKey(), nil) - }) - - t.Run("bearer without impersonate, no session", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, - bearer: newBearer(t, containerOwner, userID, false), - } - checkOwner(t, req, userPk.PublicKey(), nil) - }) - t.Run("bearer with impersonate, no session", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, - bearer: newBearer(t, containerOwner, userID, true), - } - checkOwner(t, req, containerOwner.PublicKey(), nil) - }) - t.Run("bearer with impersonate, with session", func(t *testing.T) { - // To check that bearer token takes priority, use different key to sign session token. - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - req := MetaWithToken{ - vheader: vh, - bearer: newBearer(t, containerOwner, userID, true), - token: newSession(t, pk), - } - checkOwner(t, req, containerOwner.PublicKey(), nil) - }) - t.Run("with session", func(t *testing.T) { - req := MetaWithToken{ - vheader: vh, - token: newSession(t, containerOwner), - } - checkOwner(t, req, containerOwner.PublicKey(), nil) - }) - t.Run("malformed session token", func(t *testing.T) { - // This test is tricky: session token has issuer field and signature, which must correspond to each other. - // SDK prevents constructing such token in the first place, but it is still possible via API. - // Thus, construct v2 token, convert it to SDK one and pass to our function. - pk, err := keys.NewPrivateKey() - require.NoError(t, err) - - var user1 user.ID - user.IDFromKey(&user1, pk.PrivateKey.PublicKey) - - var id refs.OwnerID - id.SetValue(user1.WalletBytes()) - - raw, err := uuid.New().MarshalBinary() - require.NoError(t, err) - - var cidV2 refs.ContainerID - cidtest.ID().WriteToV2(&cidV2) - - sessionCtx := new(sessionV2.ObjectSessionContext) - sessionCtx.SetTarget(&cidV2) - - var body sessionV2.TokenBody - body.SetOwnerID(&id) - body.SetID(raw) - body.SetLifetime(new(sessionV2.TokenLifetime)) - body.SetSessionKey(pk.PublicKey().Bytes()) - body.SetContext(sessionCtx) - - var tokV2 sessionV2.Token - tokV2.SetBody(&body) - require.NoError(t, sigutilV2.SignData(&containerOwner.PrivateKey, smWrapper{Token: &tokV2})) - require.NoError(t, sigutilV2.VerifyData(smWrapper{Token: &tokV2})) - - var tok sessionSDK.Object - require.NoError(t, tok.ReadFromV2(tokV2)) - - req := MetaWithToken{ - vheader: vh, - token: &tok, - } - checkOwner(t, req, nil, errInvalidSessionOwner) - }) -} - -type smWrapper struct { - *sessionV2.Token -} - -func (s smWrapper) ReadSignedData(data []byte) ([]byte, error) { - return s.Token.GetBody().StableMarshal(data), nil -} - -func (s smWrapper) SignedDataSize() int { - return s.Token.GetBody().StableSize() -} - -func newSession(t *testing.T, pk *keys.PrivateKey) *sessionSDK.Object { - var tok sessionSDK.Object - require.NoError(t, tok.Sign(pk.PrivateKey)) - return &tok -} - -func newBearer(t *testing.T, pk *keys.PrivateKey, user user.ID, impersonate bool) *bearer.Token { - var tok bearer.Token - tok.SetImpersonate(impersonate) - tok.ForUser(user) - require.NoError(t, tok.Sign(pk.PrivateKey)) - return &tok -} - -func checkOwner(t *testing.T, req MetaWithToken, expected *keys.PublicKey, expectedErr error) { - _, actual, err := req.RequestOwner() - if expectedErr != nil { - require.ErrorIs(t, err, expectedErr) - return - } - - require.NoError(t, err) - require.Equal(t, expected, actual) -} diff --git a/pkg/services/object/acl/v2/service.go b/pkg/services/object/acl/v2/service.go deleted file mode 100644 index 86daec6cc..000000000 --- a/pkg/services/object/acl/v2/service.go +++ /dev/null @@ -1,779 +0,0 @@ -package v2 - -import ( - "context" - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" - objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "go.uber.org/zap" -) - -// Service checks basic ACL rules. -type Service struct { - *cfg - - c objectCore.SenderClassifier -} - -type putStreamBasicChecker struct { - source *Service - next object.PutObjectStream -} - -type patchStreamBasicChecker struct { - source *Service - next object.PatchObjectStream - nonFirstSend bool -} - -// Option represents Service constructor option. -type Option func(*cfg) - -type cfg struct { - log *logger.Logger - - containers container.Source - - irFetcher InnerRingFetcher - - nm netmap.Source - - next object.ServiceServer -} - -// New is a constructor for object ACL checking service. -func New(next object.ServiceServer, - nm netmap.Source, - irf InnerRingFetcher, - cs container.Source, - opts ...Option, -) Service { - cfg := &cfg{ - log: logger.NewLoggerWrapper(zap.L()), - next: next, - nm: nm, - irFetcher: irf, - containers: cs, - } - - for i := range opts { - opts[i](cfg) - } - - return Service{ - cfg: cfg, - c: objectCore.NewSenderClassifier(cfg.irFetcher, cfg.nm, cfg.log), - } -} - -// wrappedGetObjectStream propagates RequestContext into GetObjectStream's context. -// This allows to retrieve already calculated immutable request-specific values in next handler invocation. -type wrappedGetObjectStream struct { - object.GetObjectStream - - requestInfo RequestInfo -} - -func (w *wrappedGetObjectStream) Context() context.Context { - return context.WithValue(w.GetObjectStream.Context(), object.RequestContextKey, &object.RequestContext{ - Namespace: w.requestInfo.ContainerNamespace(), - ContainerOwner: w.requestInfo.ContainerOwner(), - SenderKey: w.requestInfo.SenderKey(), - Role: w.requestInfo.RequestRole(), - BearerToken: w.requestInfo.Bearer(), - }) -} - -func newWrappedGetObjectStreamStream(getObjectStream object.GetObjectStream, reqInfo RequestInfo) object.GetObjectStream { - return &wrappedGetObjectStream{ - GetObjectStream: getObjectStream, - requestInfo: reqInfo, - } -} - -// wrappedRangeStream propagates RequestContext into GetObjectRangeStream's context. -// This allows to retrieve already calculated immutable request-specific values in next handler invocation. -type wrappedRangeStream struct { - object.GetObjectRangeStream - - requestInfo RequestInfo -} - -func (w *wrappedRangeStream) Context() context.Context { - return context.WithValue(w.GetObjectRangeStream.Context(), object.RequestContextKey, &object.RequestContext{ - Namespace: w.requestInfo.ContainerNamespace(), - ContainerOwner: w.requestInfo.ContainerOwner(), - SenderKey: w.requestInfo.SenderKey(), - Role: w.requestInfo.RequestRole(), - BearerToken: w.requestInfo.Bearer(), - }) -} - -func newWrappedRangeStream(rangeStream object.GetObjectRangeStream, reqInfo RequestInfo) object.GetObjectRangeStream { - return &wrappedRangeStream{ - GetObjectRangeStream: rangeStream, - requestInfo: reqInfo, - } -} - -// wrappedSearchStream propagates RequestContext into SearchStream's context. -// This allows to retrieve already calculated immutable request-specific values in next handler invocation. -type wrappedSearchStream struct { - object.SearchStream - - requestInfo RequestInfo -} - -func (w *wrappedSearchStream) Context() context.Context { - return context.WithValue(w.SearchStream.Context(), object.RequestContextKey, &object.RequestContext{ - Namespace: w.requestInfo.ContainerNamespace(), - ContainerOwner: w.requestInfo.ContainerOwner(), - SenderKey: w.requestInfo.SenderKey(), - Role: w.requestInfo.RequestRole(), - BearerToken: w.requestInfo.Bearer(), - }) -} - -func newWrappedSearchStream(searchStream object.SearchStream, reqInfo RequestInfo) object.SearchStream { - return &wrappedSearchStream{ - SearchStream: searchStream, - requestInfo: reqInfo, - } -} - -// Get implements ServiceServer interface, makes ACL checks and calls -// next Get method in the ServiceServer pipeline. -func (b Service) Get(request *objectV2.GetRequest, stream object.GetObjectStream) error { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectGet) - if err != nil { - return err - } - - reqInfo.obj = obj - - return b.next.Get(request, newWrappedGetObjectStreamStream(stream, reqInfo)) -} - -func (b Service) Put(ctx context.Context) (object.PutObjectStream, error) { - streamer, err := b.next.Put(ctx) - - return putStreamBasicChecker{ - source: &b, - next: streamer, - }, err -} - -func (b Service) Patch(ctx context.Context) (object.PatchObjectStream, error) { - streamer, err := b.next.Patch(ctx) - - return &patchStreamBasicChecker{ - source: &b, - next: streamer, - }, err -} - -func (b Service) Head( - ctx context.Context, - request *objectV2.HeadRequest, -) (*objectV2.HeadResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return nil, err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return nil, err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHead) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - return b.next.Head(requestContext(ctx, reqInfo), request) -} - -func (b Service) Search(request *objectV2.SearchRequest, stream object.SearchStream) error { - id, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, id, nil) - if err != nil { - return err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(stream.Context(), req, id, acl.OpObjectSearch) - if err != nil { - return err - } - - return b.next.Search(request, newWrappedSearchStream(stream, reqInfo)) -} - -func (b Service) Delete( - ctx context.Context, - request *objectV2.DeleteRequest, -) (*objectV2.DeleteResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return nil, err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return nil, err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectDelete) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - return b.next.Delete(requestContext(ctx, reqInfo), request) -} - -func (b Service) GetRange(request *objectV2.GetRangeRequest, stream object.GetObjectRangeStream) error { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(stream.Context(), req, cnr, acl.OpObjectRange) - if err != nil { - return err - } - - reqInfo.obj = obj - - return b.next.GetRange(request, newWrappedRangeStream(stream, reqInfo)) -} - -func requestContext(ctx context.Context, reqInfo RequestInfo) context.Context { - return context.WithValue(ctx, object.RequestContextKey, &object.RequestContext{ - Namespace: reqInfo.ContainerNamespace(), - ContainerOwner: reqInfo.ContainerOwner(), - SenderKey: reqInfo.SenderKey(), - Role: reqInfo.RequestRole(), - BearerToken: reqInfo.Bearer(), - }) -} - -func (b Service) GetRangeHash( - ctx context.Context, - request *objectV2.GetRangeHashRequest, -) (*objectV2.GetRangeHashResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - obj, err := getObjectIDFromRequestBody(request.GetBody()) - if err != nil { - return nil, err - } - - sTok, err := originalSessionToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - if sTok != nil { - err = assertSessionRelation(*sTok, cnr, obj) - if err != nil { - return nil, err - } - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectHash) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - return b.next.GetRangeHash(requestContext(ctx, reqInfo), request) -} - -func (b Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequest) (*objectV2.PutSingleResponse, error) { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return nil, err - } - - idV2 := request.GetBody().GetObject().GetHeader().GetOwnerID() - if idV2 == nil { - return nil, errors.New("missing object owner") - } - - var idOwner user.ID - - err = idOwner.ReadFromV2(*idV2) - if err != nil { - return nil, fmt.Errorf("invalid object owner: %w", err) - } - - obj, err := getObjectIDFromRefObjectID(request.GetBody().GetObject().GetObjectID()) - if err != nil { - return nil, err - } - - var sTok *sessionSDK.Object - sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) - if err != nil { - return nil, err - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return nil, err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := b.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) - if err != nil { - return nil, err - } - - reqInfo.obj = obj - - return b.next.PutSingle(requestContext(ctx, reqInfo), request) -} - -func (p putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutRequest) error { - body := request.GetBody() - if body == nil { - return errEmptyBody - } - - part := body.GetObjectPart() - if part, ok := part.(*objectV2.PutObjectPartInit); ok { - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - idV2 := part.GetHeader().GetOwnerID() - if idV2 == nil { - return errors.New("missing object owner") - } - - var idOwner user.ID - - err = idOwner.ReadFromV2(*idV2) - if err != nil { - return fmt.Errorf("invalid object owner: %w", err) - } - - objV2 := part.GetObjectID() - var obj *oid.ID - - if objV2 != nil { - obj = new(oid.ID) - - err = obj.ReadFromV2(*objV2) - if err != nil { - return err - } - } - - var sTok *sessionSDK.Object - sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) - if err != nil { - return err - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := p.source.findRequestInfo(ctx, req, cnr, acl.OpObjectPut) - if err != nil { - return err - } - - reqInfo.obj = obj - - ctx = requestContext(ctx, reqInfo) - } - - return p.next.Send(ctx, request) -} - -func readSessionToken(cnr cid.ID, obj *oid.ID, tokV2 *session.Token) (*sessionSDK.Object, error) { - var sTok *sessionSDK.Object - - if tokV2 != nil { - sTok = new(sessionSDK.Object) - - err := sTok.ReadFromV2(*tokV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - - if sTok.AssertVerb(sessionSDK.VerbObjectDelete) { - // if session relates to object's removal, we don't check - // relation of the tombstone to the session here since user - // can't predict tomb's ID. - err = assertSessionRelation(*sTok, cnr, nil) - } else { - err = assertSessionRelation(*sTok, cnr, obj) - } - - if err != nil { - return nil, err - } - } - - return sTok, nil -} - -func (p putStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PutResponse, error) { - return p.next.CloseAndRecv(ctx) -} - -func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.PatchRequest) error { - body := request.GetBody() - if body == nil { - return errEmptyBody - } - - if !p.nonFirstSend { - p.nonFirstSend = true - - cnr, err := getContainerIDFromRequest(request) - if err != nil { - return err - } - - objV2 := request.GetBody().GetAddress().GetObjectID() - if objV2 == nil { - return errors.New("missing oid") - } - obj := new(oid.ID) - err = obj.ReadFromV2(*objV2) - if err != nil { - return err - } - - var sTok *sessionSDK.Object - sTok, err = readSessionToken(cnr, obj, request.GetMetaHeader().GetSessionToken()) - if err != nil { - return err - } - - bTok, err := originalBearerToken(request.GetMetaHeader()) - if err != nil { - return err - } - - req := MetaWithToken{ - vheader: request.GetVerificationHeader(), - token: sTok, - bearer: bTok, - src: request, - } - - reqInfo, err := p.source.findRequestInfoWithoutACLOperationAssert(ctx, req, cnr) - if err != nil { - return err - } - - reqInfo.obj = obj - - ctx = requestContext(ctx, reqInfo) - } - - return p.next.Send(ctx, request) -} - -func (p patchStreamBasicChecker) CloseAndRecv(ctx context.Context) (*objectV2.PatchResponse, error) { - return p.next.CloseAndRecv(ctx) -} - -func (b Service) findRequestInfo(ctx context.Context, req MetaWithToken, idCnr cid.ID, op acl.Op) (info RequestInfo, err error) { - cnr, err := b.containers.Get(ctx, idCnr) // fetch actual container - if err != nil { - return info, err - } - - if req.token != nil { - currentEpoch, err := b.nm.Epoch(ctx) - if err != nil { - return info, errors.New("can't fetch current epoch") - } - if req.token.ExpiredAt(currentEpoch) { - return info, new(apistatus.SessionTokenExpired) - } - if req.token.InvalidAt(currentEpoch) { - return info, fmt.Errorf("%s: token is invalid at %d epoch)", - invalidRequestMessage, currentEpoch) - } - - if !assertVerb(*req.token, op) { - return info, errInvalidVerb - } - } - - // find request role and key - ownerID, ownerKey, err := req.RequestOwner() - if err != nil { - return info, err - } - res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) - if err != nil { - return info, err - } - - info.basicACL = cnr.Value.BasicACL() - info.requestRole = res.Role - info.operation = op - info.cnrOwner = cnr.Value.Owner() - info.idCnr = idCnr - - cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") - if hasNamespace { - info.cnrNamespace = cnrNamespace - } - - // it is assumed that at the moment the key will be valid, - // otherwise the request would not pass validation - info.senderKey = res.Key - - // add bearer token if it is present in request - info.bearer = req.bearer - - info.srcRequest = req.src - - return info, nil -} - -// findRequestInfoWithoutACLOperationAssert is findRequestInfo without session token verb assert. -func (b Service) findRequestInfoWithoutACLOperationAssert(ctx context.Context, req MetaWithToken, idCnr cid.ID) (info RequestInfo, err error) { - cnr, err := b.containers.Get(ctx, idCnr) // fetch actual container - if err != nil { - return info, err - } - - if req.token != nil { - currentEpoch, err := b.nm.Epoch(ctx) - if err != nil { - return info, errors.New("can't fetch current epoch") - } - if req.token.ExpiredAt(currentEpoch) { - return info, new(apistatus.SessionTokenExpired) - } - if req.token.InvalidAt(currentEpoch) { - return info, fmt.Errorf("%s: token is invalid at %d epoch)", - invalidRequestMessage, currentEpoch) - } - } - - // find request role and key - ownerID, ownerKey, err := req.RequestOwner() - if err != nil { - return info, err - } - res, err := b.c.Classify(ctx, ownerID, ownerKey, idCnr, cnr.Value) - if err != nil { - return info, err - } - - info.basicACL = cnr.Value.BasicACL() - info.requestRole = res.Role - info.cnrOwner = cnr.Value.Owner() - info.idCnr = idCnr - - cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") - if hasNamespace { - info.cnrNamespace = cnrNamespace - } - - // it is assumed that at the moment the key will be valid, - // otherwise the request would not pass validation - info.senderKey = res.Key - - // add bearer token if it is present in request - info.bearer = req.bearer - - info.srcRequest = req.src - - return info, nil -} diff --git a/pkg/services/object/acl/v2/types.go b/pkg/services/object/acl/v2/types.go deleted file mode 100644 index 3cf10eb56..000000000 --- a/pkg/services/object/acl/v2/types.go +++ /dev/null @@ -1,11 +0,0 @@ -package v2 - -import "context" - -// InnerRingFetcher is an interface that must provide -// Inner Ring information. -type InnerRingFetcher interface { - // InnerRingKeys must return list of public keys of - // the actual inner ring. - InnerRingKeys(ctx context.Context) ([][]byte, error) -} diff --git a/pkg/services/object/acl/v2/util.go b/pkg/services/object/acl/v2/util.go deleted file mode 100644 index e02f70771..000000000 --- a/pkg/services/object/acl/v2/util.go +++ /dev/null @@ -1,223 +0,0 @@ -package v2 - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "errors" - "fmt" - - objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" - refsV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/refs" - sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -var errMissingContainerID = errors.New("missing container ID") - -func getContainerIDFromRequest(req any) (cid.ID, error) { - var idV2 *refsV2.ContainerID - var id cid.ID - - switch v := req.(type) { - case *objectV2.GetRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.PutRequest: - part, ok := v.GetBody().GetObjectPart().(*objectV2.PutObjectPartInit) - if !ok { - return cid.ID{}, errors.New("can't get container ID in chunk") - } - - idV2 = part.GetHeader().GetContainerID() - case *objectV2.HeadRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.SearchRequest: - idV2 = v.GetBody().GetContainerID() - case *objectV2.DeleteRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.GetRangeRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.GetRangeHashRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - case *objectV2.PutSingleRequest: - idV2 = v.GetBody().GetObject().GetHeader().GetContainerID() - case *objectV2.PatchRequest: - idV2 = v.GetBody().GetAddress().GetContainerID() - default: - return cid.ID{}, errors.New("unknown request type") - } - - if idV2 == nil { - return cid.ID{}, errMissingContainerID - } - - return id, id.ReadFromV2(*idV2) -} - -// originalBearerToken goes down to original request meta header and fetches -// bearer token from there. -func originalBearerToken(header *sessionV2.RequestMetaHeader) (*bearer.Token, error) { - for header.GetOrigin() != nil { - header = header.GetOrigin() - } - - tokV2 := header.GetBearerToken() - if tokV2 == nil { - return nil, nil - } - - var tok bearer.Token - return &tok, tok.ReadFromV2(*tokV2) -} - -// originalSessionToken goes down to original request meta header and fetches -// session token from there. -func originalSessionToken(header *sessionV2.RequestMetaHeader) (*sessionSDK.Object, error) { - for header.GetOrigin() != nil { - header = header.GetOrigin() - } - - tokV2 := header.GetSessionToken() - if tokV2 == nil { - return nil, nil - } - - var tok sessionSDK.Object - - err := tok.ReadFromV2(*tokV2) - if err != nil { - return nil, fmt.Errorf("invalid session token: %w", err) - } - - return &tok, nil -} - -// getObjectIDFromRequestBody decodes oid.ID from the common interface of the -// object reference's holders. Returns an error if object ID is missing in the request. -func getObjectIDFromRequestBody(body interface{ GetAddress() *refsV2.Address }) (*oid.ID, error) { - idV2 := body.GetAddress().GetObjectID() - return getObjectIDFromRefObjectID(idV2) -} - -func getObjectIDFromRefObjectID(idV2 *refsV2.ObjectID) (*oid.ID, error) { - if idV2 == nil { - return nil, errors.New("missing object ID") - } - - var id oid.ID - - err := id.ReadFromV2(*idV2) - if err != nil { - return nil, err - } - - return &id, nil -} - -func ownerFromToken(token *sessionSDK.Object) (*user.ID, *keys.PublicKey, error) { - // 1. First check signature of session token. - if !token.VerifySignature() { - return nil, nil, errInvalidSessionSig - } - - // 2. Then check if session token owner issued the session token - // TODO(@cthulhu-rider): #468 implement and use another approach to avoid conversion - var tokV2 sessionV2.Token - token.WriteToV2(&tokV2) - - tokenIssuerKey, err := unmarshalPublicKey(tokV2.GetSignature().GetKey()) - if err != nil { - return nil, nil, fmt.Errorf("invalid key in session token signature: %w", err) - } - - tokenIssuer := token.Issuer() - - if !isOwnerFromKey(tokenIssuer, tokenIssuerKey) { - // TODO: #767 in this case we can issue all owner keys from frostfs.id and check once again - return nil, nil, errInvalidSessionOwner - } - - return &tokenIssuer, tokenIssuerKey, nil -} - -func originalBodySignature(v *sessionV2.RequestVerificationHeader) *refsV2.Signature { - if v == nil { - return nil - } - - for v.GetOrigin() != nil { - v = v.GetOrigin() - } - - return v.GetBodySignature() -} - -func unmarshalPublicKey(bs []byte) (*keys.PublicKey, error) { - return keys.NewPublicKeyFromBytes(bs, elliptic.P256()) -} - -func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool { - if key == nil { - return false - } - - var id2 user.ID - user.IDFromKey(&id2, (ecdsa.PublicKey)(*key)) - - return id2.Equals(id) -} - -// assertVerb checks that token verb corresponds to op. -func assertVerb(tok sessionSDK.Object, op acl.Op) bool { - switch op { - case acl.OpObjectPut: - return tok.AssertVerb(sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete, sessionSDK.VerbObjectPatch) - case acl.OpObjectDelete: - return tok.AssertVerb(sessionSDK.VerbObjectDelete) - case acl.OpObjectGet: - return tok.AssertVerb(sessionSDK.VerbObjectGet) - case acl.OpObjectHead: - return tok.AssertVerb( - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - sessionSDK.VerbObjectPatch, - ) - case acl.OpObjectSearch: - return tok.AssertVerb(sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete) - case acl.OpObjectRange: - return tok.AssertVerb(sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash, sessionSDK.VerbObjectPatch) - case acl.OpObjectHash: - return tok.AssertVerb(sessionSDK.VerbObjectRangeHash) - } - - return false -} - -// assertSessionRelation checks if given token describing the FrostFS session -// relates to the given container and optional object. Missing object -// means that the context isn't bound to any FrostFS object in the container. -// Returns no error iff relation is correct. Criteria: -// -// session is bound to the given container -// object is not specified or session is bound to this object -// -// Session MUST be bound to the particular container, otherwise behavior is undefined. -func assertSessionRelation(tok sessionSDK.Object, cnr cid.ID, obj *oid.ID) error { - if !tok.AssertContainer(cnr) { - return errors.New("requested container is not related to the session") - } - - if obj != nil && !tok.AssertObject(*obj) { - return errors.New("requested object is not related to the session") - } - - return nil -} diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go deleted file mode 100644 index 40fce8877..000000000 --- a/pkg/services/object/acl/v2/util_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package v2 - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "slices" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/acl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" - bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test" - aclsdk "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" - sessiontest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session/test" - "github.com/stretchr/testify/require" -) - -func TestOriginalTokens(t *testing.T) { - sToken := sessiontest.ObjectSigned() - bToken := bearertest.Token() - - pk, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - require.NoError(t, bToken.Sign(*pk)) - - var bTokenV2 acl.BearerToken - bToken.WriteToV2(&bTokenV2) - // This line is needed because SDK uses some custom format for - // reserved filters, so `cid.ID` is not converted to string immediately. - require.NoError(t, bToken.ReadFromV2(bTokenV2)) - - var sTokenV2 session.Token - sToken.WriteToV2(&sTokenV2) - - for i := range 10 { - metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2) - res, err := originalSessionToken(metaHeaders) - require.NoError(t, err) - require.Equal(t, sToken, res, i) - - bTok, err := originalBearerToken(metaHeaders) - require.NoError(t, err) - require.Equal(t, &bToken, bTok, i) - } -} - -func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token) *session.RequestMetaHeader { - metaHeader := new(session.RequestMetaHeader) - metaHeader.SetBearerToken(b) - metaHeader.SetSessionToken(s) - - for range depth { - link := metaHeader - metaHeader = new(session.RequestMetaHeader) - metaHeader.SetOrigin(link) - } - - return metaHeader -} - -func TestIsVerbCompatible(t *testing.T) { - // Source: https://nspcc.ru/upload/frostfs-spec-latest.pdf#page=28 - table := map[aclsdk.Op][]sessionSDK.ObjectVerb{ - aclsdk.OpObjectPut: {sessionSDK.VerbObjectPut, sessionSDK.VerbObjectDelete}, - aclsdk.OpObjectDelete: {sessionSDK.VerbObjectDelete}, - aclsdk.OpObjectGet: {sessionSDK.VerbObjectGet}, - aclsdk.OpObjectHead: { - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - }, - aclsdk.OpObjectRange: {sessionSDK.VerbObjectRange, sessionSDK.VerbObjectRangeHash}, - aclsdk.OpObjectHash: {sessionSDK.VerbObjectRangeHash}, - aclsdk.OpObjectSearch: {sessionSDK.VerbObjectSearch, sessionSDK.VerbObjectDelete}, - } - - verbs := []sessionSDK.ObjectVerb{ - sessionSDK.VerbObjectPut, - sessionSDK.VerbObjectDelete, - sessionSDK.VerbObjectHead, - sessionSDK.VerbObjectRange, - sessionSDK.VerbObjectRangeHash, - sessionSDK.VerbObjectGet, - sessionSDK.VerbObjectSearch, - } - - var tok sessionSDK.Object - - for op, list := range table { - for _, verb := range verbs { - contains := slices.Contains(list, verb) - - tok.ForVerb(verb) - - require.Equal(t, contains, assertVerb(tok, op), - "%v in token, %s executing", verb, op) - } - } -} - -func TestAssertSessionRelation(t *testing.T) { - var tok sessionSDK.Object - cnr := cidtest.ID() - cnrOther := cidtest.ID() - obj := oidtest.ID() - objOther := oidtest.ID() - - // make sure ids differ, otherwise test won't work correctly - require.False(t, cnrOther.Equals(cnr)) - require.False(t, objOther.Equals(obj)) - - // bind session to the container (required) - tok.BindContainer(cnr) - - // test container-global session - require.NoError(t, assertSessionRelation(tok, cnr, nil)) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnrOther, nil)) - require.Error(t, assertSessionRelation(tok, cnrOther, &obj)) - - // limit the session to the particular object - tok.LimitByObjects(obj) - - // test fixed object session (here obj arg must be non-nil everywhere) - require.NoError(t, assertSessionRelation(tok, cnr, &obj)) - require.Error(t, assertSessionRelation(tok, cnr, &objOther)) -} diff --git a/pkg/services/object/request_context.go b/pkg/services/object/request_context.go deleted file mode 100644 index eb4041f80..000000000 --- a/pkg/services/object/request_context.go +++ /dev/null @@ -1,24 +0,0 @@ -package object - -import ( - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" -) - -type RequestContextKeyT struct{} - -var RequestContextKey = RequestContextKeyT{} - -// RequestContext is a context passed between middleware handlers. -type RequestContext struct { - Namespace string - - SenderKey []byte - - ContainerOwner user.ID - - Role acl.Role - - BearerToken *bearer.Token -} From 3bb1fb744a6b09e796b6e33c80910e90ab69f67f Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 15:56:11 +0300 Subject: [PATCH 462/591] [#1689] morph/client: Reuse auto-generated wrappers for NNS Make code simpler, remove unused methods. Change-Id: I18807f2c14b5a96e533e5e3fc153e23c742c66c1 Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/client.go | 4 ++ pkg/morph/client/nns.go | 96 ++++++-------------------------------- 2 files changed, 19 insertions(+), 81 deletions(-) diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index a7c3c6d8d..1c12130b7 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -9,6 +9,7 @@ import ( "sync/atomic" "time" + nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" morphmetrics "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/metrics" @@ -62,6 +63,8 @@ type Client struct { rolemgmt *rolemgmt.Contract // neo-go Designation contract wrapper nnsHash util.Uint160 // NNS contract hash + nnsReader *nnsClient.ContractReader // NNS contract wrapper + acc *wallet.Account // neo account accAddr util.Uint160 // account's address @@ -576,6 +579,7 @@ func (c *Client) setActor(act *actor.Actor) { c.rpcActor = act c.gasToken = nep17.New(act, gas.Hash) c.rolemgmt = rolemgmt.New(act) + c.nnsReader = nnsClient.NewReader(act, c.nnsHash) } func (c *Client) GetActor() *actor.Actor { diff --git a/pkg/morph/client/nns.go b/pkg/morph/client/nns.go index b03967fdd..bc00eb889 100644 --- a/pkg/morph/client/nns.go +++ b/pkg/morph/client/nns.go @@ -8,14 +8,12 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" + nnsClient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/smartcontract" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/stackitem" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" ) const ( @@ -37,12 +35,8 @@ const ( NNSPolicyContractName = "policy.frostfs" ) -var ( - // ErrNNSRecordNotFound means that there is no such record in NNS contract. - ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract") - - errEmptyResultStack = errors.New("returned result stack is empty") -) +// ErrNNSRecordNotFound means that there is no such record in NNS contract. +var ErrNNSRecordNotFound = errors.New("record has not been found in NNS contract") // NNSAlphabetContractName returns contract name of the alphabet contract in NNS // based on alphabet index. @@ -61,67 +55,36 @@ func (c *Client) NNSContractAddress(name string) (sh util.Uint160, err error) { return util.Uint160{}, ErrConnectionLost } - nnsHash := c.NNSHash() - sh, err = nnsResolve(c.client, nnsHash, name) + sh, err = nnsResolve(c.nnsReader, name) if err != nil { return sh, fmt.Errorf("NNS.resolve: %w", err) } return sh, nil } -// NNSHash returns NNS contract hash. -func (c *Client) NNSHash() util.Uint160 { - return c.nnsHash -} - -func nnsResolveItem(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (stackitem.Item, error) { - found, err := exists(c, nnsHash, domain) +func nnsResolveItem(r *nnsClient.ContractReader, domain string) ([]stackitem.Item, error) { + available, err := r.IsAvailable(domain) if err != nil { return nil, fmt.Errorf("check presence in NNS contract for %s: %w", domain, err) } - if !found { + if available { return nil, ErrNNSRecordNotFound } - result, err := c.InvokeFunction(nnsHash, "resolve", []smartcontract.Parameter{ - { - Type: smartcontract.StringType, - Value: domain, - }, - { - Type: smartcontract.IntegerType, - Value: big.NewInt(int64(nns.TXT)), - }, - }, nil) - if err != nil { - return nil, err - } - if result.State != vmstate.Halt.String() { - return nil, fmt.Errorf("invocation failed: %s", result.FaultException) - } - if len(result.Stack) == 0 { - return nil, errEmptyResultStack - } - return result.Stack[0], nil + return r.Resolve(domain, big.NewInt(int64(nns.TXT))) } -func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (util.Uint160, error) { - res, err := nnsResolveItem(c, nnsHash, domain) +func nnsResolve(r *nnsClient.ContractReader, domain string) (util.Uint160, error) { + arr, err := nnsResolveItem(r, domain) if err != nil { return util.Uint160{}, err } - // Parse the result of resolving NNS record. - // It works with multiple formats (corresponding to multiple NNS versions). - // If array of hashes is provided, it returns only the first one. - if arr, ok := res.Value().([]stackitem.Item); ok { - if len(arr) == 0 { - return util.Uint160{}, errors.New("NNS record is missing") - } - res = arr[0] + if len(arr) == 0 { + return util.Uint160{}, errors.New("NNS record is missing") } - bs, err := res.TryBytes() + bs, err := arr[0].TryBytes() if err != nil { return util.Uint160{}, fmt.Errorf("malformed response: %w", err) } @@ -141,33 +104,6 @@ func nnsResolve(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (uti return util.Uint160{}, errors.New("no valid hashes are found") } -func exists(c *rpcclient.WSClient, nnsHash util.Uint160, domain string) (bool, error) { - result, err := c.InvokeFunction(nnsHash, "isAvailable", []smartcontract.Parameter{ - { - Type: smartcontract.StringType, - Value: domain, - }, - }, nil) - if err != nil { - return false, err - } - - if len(result.Stack) == 0 { - return false, errEmptyResultStack - } - - res := result.Stack[0] - - available, err := res.TryBool() - if err != nil { - return false, fmt.Errorf("malformed response: %w", err) - } - - // not available means that it is taken - // and, therefore, exists - return !available, nil -} - // SetGroupSignerScope makes the default signer scope include all FrostFS contracts. // Should be called for side-chain client only. func (c *Client) SetGroupSignerScope() error { @@ -211,14 +147,12 @@ func (c *Client) contractGroupKey() (*keys.PublicKey, error) { return gKey, nil } - nnsHash := c.NNSHash() - item, err := nnsResolveItem(c.client, nnsHash, NNSGroupKeyName) + arr, err := nnsResolveItem(c.nnsReader, NNSGroupKeyName) if err != nil { return nil, err } - arr, ok := item.Value().([]stackitem.Item) - if !ok || len(arr) == 0 { + if len(arr) == 0 { return nil, errors.New("NNS record is missing") } From 0a9d139e20fed1d6386f5913f2da9dc749d85d4e Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 16:03:10 +0300 Subject: [PATCH 463/591] [#1689] morph/client: Remove notary hash field from `notaryInfo` Notary contract hash is constant. Change-Id: I7935580acbced5c9d567875ea75daa57cc259a3c Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/notary.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index dbd58a53a..4e20a3639 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -38,8 +38,7 @@ type ( alphabetSource AlphabetKeys // source of alphabet node keys to prepare witness - notary util.Uint160 - proxy util.Uint160 + proxy util.Uint160 } notaryCfg struct { @@ -102,7 +101,6 @@ func (c *Client) EnableNotarySupport(opts ...NotaryOption) error { txValidTime: cfg.txValidTime, roundTime: cfg.roundTime, alphabetSource: cfg.alphabetSource, - notary: notary.Hash, } c.notary = notaryCfg @@ -188,7 +186,7 @@ func (c *Client) DepositEndlessNotary(ctx context.Context, amount fixedn.Fixed8) func (c *Client) depositNotary(ctx context.Context, amount fixedn.Fixed8, till int64) (util.Uint256, uint32, error) { txHash, vub, err := c.gasToken.Transfer( c.accAddr, - c.notary.notary, + notary.Hash, big.NewInt(int64(amount)), []any{c.acc.PrivateKey().GetScriptHash(), till}) if err != nil { From 0c664fa804bbecab361fdec2287fa42b051c82d4 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 24 Mar 2025 11:52:39 +0300 Subject: [PATCH 464/591] [#1689] qos: Fix metric description Change-Id: I460fdd3713e765d57ef3ff2945b9b3776f46c164 Signed-off-by: Dmitrii Stepanov --- internal/metrics/qos.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/metrics/qos.go b/internal/metrics/qos.go index 17fb67a27..be6878142 100644 --- a/internal/metrics/qos.go +++ b/internal/metrics/qos.go @@ -15,7 +15,7 @@ func newQoSMetrics() *QoSMetrics { Namespace: namespace, Subsystem: qosSubsystem, Name: "operations_total", - Help: "Count of pending, in progree, completed and failed due of resource exhausted error operations for each shard", + Help: "Count of pending, in progress, completed and failed due of resource exhausted error operations for each shard", }, []string{shardIDLabel, operationLabel, ioTagLabel, typeLabel}), } } From 163e2e9f83a79def2d7bfda6b2b6609cfa02c1b2 Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Mon, 24 Mar 2025 17:04:24 +0300 Subject: [PATCH 465/591] ci: Cache pre-commit installations on Jenkins Agent This change introduces a custom helper from our shared library [0] that defines ad-hoc container environment to execute CI steps in. Pre-commit installation will now be cached on Jenkins Agent: builds will tolerate network hiccups better and we will also save some run time (although on non-critical path of a parallel process). [0]: https://git.frostfs.info/TrueCloudLab/jenkins/pulls/8 Change-Id: I93b01f169c457aa35f4d8bc5b90f31b31e2bd8b2 Signed-off-by: Vitaliy Potyarkin --- .ci/Jenkinsfile | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index e21ce61c5..166fb9286 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -68,12 +68,14 @@ async { } task('pre-commit') { - sh ''' - apt update - apt install -y --no-install-recommends pre-commit - ''' // TODO: Make an OCI image for pre-commit + golang? Unpack golang tarball with a library function? - withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) { - sh 'pre-commit run --color=always --hook-stage=manual --all-files' + dockerfile(""" + FROM ${golangDefault} + RUN apt update && \ + apt install -y --no-install-recommends pre-commit + """) { + withEnv(['SKIP=make-lint,go-staticcheck-repo-mod,go-unit-tests,gofumpt']) { + sh 'pre-commit run --color=always --hook-stage=manual --all-files' + } } } } From 5470b205fd461d95c968c55ed122334c7e7f32cd Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Tue, 25 Mar 2025 10:15:51 +0300 Subject: [PATCH 466/591] [#1619] gc: Fix metric `frostfs_node.garbage_collector.marking_duration_seconds` Change-Id: I957f930d1babf179d0fb6de624a90f4fe9977862 Signed-off-by: Anton Nikiforov --- pkg/local_object_storage/shard/gc.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 32a377cd5..84fb6039e 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -527,7 +527,8 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { return } - release, err := s.opsLimiter.ReadRequest(ctx) + var release qos.ReleaseFunc + release, err = s.opsLimiter.ReadRequest(ctx) if err != nil { log.Error(ctx, logs.ShardIteratorOverGraveyardFailed, zap.Error(err)) s.m.RUnlock() From 9358938222bc44bd4f4638a78b74cd331cdba808 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Mon, 10 Mar 2025 11:42:55 +0300 Subject: [PATCH 467/591] [#1633] go.mod: Bump frostfs-sdk-go Change-Id: I50c1a0d5b88e307402a5b1b2883bb9b9a357a2c7 Signed-off-by: Anton Nikiforov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 753bfbd29..887f8bb2b 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b diff --git a/go.sum b/go.sum index 7f9417954..982fddf23 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275 h1:WqWxCnCl2ekfjWja/CpGeF2rf4h0x199xhdnsm/j+E8= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9 h1:svCl6NDAPZ/KuQPjdVKo74RkCIANesxUPM45zQZDhSw= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250306092416-69b0711d12d9/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592 h1:n7Pl8V7O1yS07J/fqdbzZjVe/mQW42a7eS0QHfgrzJw= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= From 3bbee1b554a1407e1b8c30ef0244cd4599c4299f Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Mon, 24 Mar 2025 12:32:41 +0300 Subject: [PATCH 468/591] [#1619] logger: Allow to set options for `zap.Logger` via `logger.Prm` Change-Id: I8eed951c25d1ecf18b0aea62c6825be65a450085 Signed-off-by: Anton Nikiforov --- cmd/frostfs-node/config.go | 16 ++++++++++------ pkg/util/logger/logger.go | 20 +++++++++++++------- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index c3c687763..431316258 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -108,6 +108,7 @@ type applicationConfiguration struct { level string destination string timestamp bool + options []zap.Option } ObjectCfg struct { @@ -232,6 +233,14 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error { a.LoggerCfg.level = loggerconfig.Level(c) a.LoggerCfg.destination = loggerconfig.Destination(c) a.LoggerCfg.timestamp = loggerconfig.Timestamp(c) + var opts []zap.Option + if loggerconfig.ToLokiConfig(c).Enabled { + opts = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { + lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(c)) + return lokiCore + })} + } + a.LoggerCfg.options = opts // Object @@ -718,12 +727,6 @@ func initCfg(appCfg *config.Config) *cfg { logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() log, err := logger.NewLogger(logPrm) fatalOnErr(err) - if loggerconfig.ToLokiConfig(appCfg).Enabled { - log.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { - lokiCore := lokicore.New(core, loggerconfig.ToLokiConfig(appCfg)) - return lokiCore - })) - } c.internals = initInternals(appCfg, log) @@ -1090,6 +1093,7 @@ func (c *cfg) loggerPrm() (logger.Prm, error) { return logger.Prm{}, errors.New("incorrect log destination format: " + c.LoggerCfg.destination) } prm.PrependTimestamp = c.LoggerCfg.timestamp + prm.Options = c.LoggerCfg.options return prm, nil } diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index 952a6f2dc..10c7e8dc9 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -36,6 +36,9 @@ type Prm struct { // PrependTimestamp specifies whether to prepend a timestamp in the log PrependTimestamp bool + + // Options for zap.Logger + Options []zap.Option } const ( @@ -103,10 +106,12 @@ func newConsoleLogger(prm Prm) (*Logger, error) { c.EncoderConfig.TimeKey = "" } - lZap, err := c.Build( + opts := []zap.Option{ zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1), - ) + } + opts = append(opts, prm.Options...) + lZap, err := c.Build(opts...) if err != nil { return nil, err } @@ -150,7 +155,12 @@ func newJournaldLogger(prm Prm) (*Logger, error) { c.Sampling.Thereafter, samplerOpts..., ) - lZap := zap.New(samplingCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), zap.AddCallerSkip(1)) + opts := []zap.Option{ + zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)), + zap.AddCallerSkip(1), + } + opts = append(opts, prm.Options...) + lZap := zap.New(samplingCore, opts...) l := &Logger{z: lZap, lvl: lvl} @@ -161,10 +171,6 @@ func (l *Logger) Reload(prm Prm) { l.lvl.SetLevel(prm.level) } -func (l *Logger) WithOptions(options ...zap.Option) { - l.z = l.z.WithOptions(options...) -} - func (l *Logger) With(fields ...zap.Field) *Logger { return &Logger{z: l.z.With(fields...)} } From 632bd8e38dd34e2438dd39c53111605dd94ef8b1 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 25 Mar 2025 12:15:46 +0300 Subject: [PATCH 469/591] [#1696] qos: Fix internal tag adjust If request has no tag, but request's public key is netmap node's key or one of allowed internal tag keys from config, then request must use internal IO tag. Change-Id: Iff93b626941a81b088d8999b3f2947f9501dcdf8 Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/qos.go | 39 ++++-- cmd/frostfs-node/qos_test.go | 226 ++++++++++++++++++++++++++++++ internal/logs/logs.go | 2 +- pkg/core/object/fmt_test.go | 42 ++---- pkg/util/testing/netmap_source.go | 36 +++++ 5 files changed, 298 insertions(+), 47 deletions(-) create mode 100644 cmd/frostfs-node/qos_test.go create mode 100644 pkg/util/testing/netmap_source.go diff --git a/cmd/frostfs-node/qos.go b/cmd/frostfs-node/qos.go index 9663fc6ae..6394b668b 100644 --- a/cmd/frostfs-node/qos.go +++ b/cmd/frostfs-node/qos.go @@ -43,6 +43,9 @@ func initQoSService(c *cfg) { func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublicKey []byte) context.Context { rawTag, defined := qosTagging.IOTagFromContext(ctx) if !defined { + if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { + return qosTagging.ContextWithIOTag(ctx, qos.IOTagInternal.String()) + } return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) } ioTag, err := qos.FromRawString(rawTag) @@ -73,20 +76,8 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) case qos.IOTagInternal: - for _, pk := range s.allowedInternalPubs { - if bytes.Equal(pk, requestSignPublicKey) { - return ctx - } - } - nm, err := s.netmapSource.GetNetMap(ctx, 0) - if err != nil { - s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) - return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) - } - for _, node := range nm.Nodes() { - if bytes.Equal(node.PublicKey(), requestSignPublicKey) { - return ctx - } + if s.isInternalIOTagPublicKey(ctx, requestSignPublicKey) { + return ctx } s.logger.Debug(ctx, logs.FailedToValidateIncomingIOTag) return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) @@ -95,3 +86,23 @@ func (s *cfgQoSService) AdjustIncomingTag(ctx context.Context, requestSignPublic return qosTagging.ContextWithIOTag(ctx, qos.IOTagClient.String()) } } + +func (s *cfgQoSService) isInternalIOTagPublicKey(ctx context.Context, publicKey []byte) bool { + for _, pk := range s.allowedInternalPubs { + if bytes.Equal(pk, publicKey) { + return true + } + } + nm, err := s.netmapSource.GetNetMap(ctx, 0) + if err != nil { + s.logger.Debug(ctx, logs.FailedToGetNetmapToAdjustIOTag, zap.Error(err)) + return false + } + for _, node := range nm.Nodes() { + if bytes.Equal(node.PublicKey(), publicKey) { + return true + } + } + + return false +} diff --git a/cmd/frostfs-node/qos_test.go b/cmd/frostfs-node/qos_test.go new file mode 100644 index 000000000..971f9eebf --- /dev/null +++ b/cmd/frostfs-node/qos_test.go @@ -0,0 +1,226 @@ +package main + +import ( + "context" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" + utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +func TestQoSService_Client(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + t.Run("IO tag client defined", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagClient.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("no IO tag defined, signed with unknown key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("no IO tag defined, signed with allowed critical key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with unknown key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with allowed internal key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with allowed critical key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag internal defined, signed with unknown key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag internal defined, signed with allowed critical key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag critical defined, signed with unknown key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Request) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag critical defined, signed with allowed internal key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) +} + +func TestQoSService_Internal(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) + t.Run("IO tag internal defined, signed with allowed internal key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) + t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) + t.Run("no IO tag defined, signed with allowed internal key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.Internal) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagInternal.String(), tag) + }) +} + +func TestQoSService_Critical(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagCritical.String(), tag) + }) + t.Run("IO tag critical defined, signed with allowed critical key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.Critical) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagCritical.String(), tag) + }) +} + +func TestQoSService_NetmapGetError(t *testing.T) { + t.Parallel() + s, pk := testQoSServicePrepare(t) + s.netmapSource = &utilTesting.TestNetmapSource{} + t.Run("IO tag internal defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagInternal.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("IO tag critical defined, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("no IO tag defined, signed with netmap key", func(t *testing.T) { + ctx := s.AdjustIncomingTag(context.Background(), pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) + t.Run("unknown IO tag, signed with netmap key", func(t *testing.T) { + ctx := tagging.ContextWithIOTag(context.Background(), "some IO tag we don't know") + ctx = s.AdjustIncomingTag(ctx, pk.NetmapNode) + tag, ok := tagging.IOTagFromContext(ctx) + require.True(t, ok) + require.Equal(t, qos.IOTagClient.String(), tag) + }) +} + +func testQoSServicePrepare(t *testing.T) (*cfgQoSService, *testQoSServicePublicKeys) { + nmSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + reqSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + allowedCritSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + allowedIntSigner, err := keys.NewPrivateKey() + require.NoError(t, err) + + var node netmap.NodeInfo + node.SetPublicKey(nmSigner.PublicKey().Bytes()) + nm := &netmap.NetMap{} + nm.SetEpoch(100) + nm.SetNodes([]netmap.NodeInfo{node}) + + return &cfgQoSService{ + logger: test.NewLogger(t), + netmapSource: &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ + 100: nm, + }, + CurrentEpoch: 100, + }, + allowedCriticalPubs: [][]byte{ + allowedCritSigner.PublicKey().Bytes(), + }, + allowedInternalPubs: [][]byte{ + allowedIntSigner.PublicKey().Bytes(), + }, + }, + &testQoSServicePublicKeys{ + NetmapNode: nmSigner.PublicKey().Bytes(), + Request: reqSigner.PublicKey().Bytes(), + Internal: allowedIntSigner.PublicKey().Bytes(), + Critical: allowedCritSigner.PublicKey().Bytes(), + } +} + +type testQoSServicePublicKeys struct { + NetmapNode []byte + Request []byte + Internal []byte + Critical []byte +} diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 3503c922e..5b42b25ba 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -512,7 +512,7 @@ const ( FailedToUpdateMultinetConfiguration = "failed to update multinet configuration" FailedToParseIncomingIOTag = "failed to parse incoming IO tag" NotSupportedIncomingIOTagReplacedWithClient = "incoming IO tag is not supported, replaced with `client`" - FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag, replaced with `client`" + FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag" FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`" WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object" ) diff --git a/pkg/core/object/fmt_test.go b/pkg/core/object/fmt_test.go index 239a9f389..dc336eb34 100644 --- a/pkg/core/object/fmt_test.go +++ b/pkg/core/object/fmt_test.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + utilTesting "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/testing" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -410,11 +411,11 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &testNetmapSource{ - netmaps: map[uint64]*netmap.NetMap{ + &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, }, - currentEpoch: curEpoch, + CurrentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -483,12 +484,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &testNetmapSource{ - netmaps: map[uint64]*netmap.NetMap{ + &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, curEpoch - 1: previousEpochNM, }, - currentEpoch: curEpoch, + CurrentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -559,12 +560,12 @@ func TestFormatValidator_ValidateTokenIssuer(t *testing.T) { }, ), WithNetmapSource( - &testNetmapSource{ - netmaps: map[uint64]*netmap.NetMap{ + &utilTesting.TestNetmapSource{ + Netmaps: map[uint64]*netmap.NetMap{ curEpoch: currentEpochNM, curEpoch - 1: previousEpochNM, }, - currentEpoch: curEpoch, + CurrentEpoch: curEpoch, }, ), WithLogger(logger.NewLoggerWrapper(zaptest.NewLogger(t))), @@ -596,26 +597,3 @@ func (s *testContainerSource) Get(ctx context.Context, cnrID cid.ID) (*container func (s *testContainerSource) DeletionInfo(context.Context, cid.ID) (*container.DelInfo, error) { return nil, nil } - -type testNetmapSource struct { - netmaps map[uint64]*netmap.NetMap - currentEpoch uint64 -} - -func (s *testNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { - if diff >= s.currentEpoch { - return nil, fmt.Errorf("invalid diff") - } - return s.GetNetMapByEpoch(ctx, s.currentEpoch-diff) -} - -func (s *testNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmap.NetMap, error) { - if nm, found := s.netmaps[epoch]; found { - return nm, nil - } - return nil, fmt.Errorf("netmap not found") -} - -func (s *testNetmapSource) Epoch(ctx context.Context) (uint64, error) { - return s.currentEpoch, nil -} diff --git a/pkg/util/testing/netmap_source.go b/pkg/util/testing/netmap_source.go new file mode 100644 index 000000000..7373e538f --- /dev/null +++ b/pkg/util/testing/netmap_source.go @@ -0,0 +1,36 @@ +package testing + +import ( + "context" + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) + +var ( + errInvalidDiff = errors.New("invalid diff") + errNetmapNotFound = errors.New("netmap not found") +) + +type TestNetmapSource struct { + Netmaps map[uint64]*netmap.NetMap + CurrentEpoch uint64 +} + +func (s *TestNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmap.NetMap, error) { + if diff >= s.CurrentEpoch { + return nil, errInvalidDiff + } + return s.GetNetMapByEpoch(ctx, s.CurrentEpoch-diff) +} + +func (s *TestNetmapSource) GetNetMapByEpoch(_ context.Context, epoch uint64) (*netmap.NetMap, error) { + if nm, found := s.Netmaps[epoch]; found { + return nm, nil + } + return nil, errNetmapNotFound +} + +func (s *TestNetmapSource) Epoch(context.Context) (uint64, error) { + return s.CurrentEpoch, nil +} From 0a38571a10df69da4200db8ea6c952f587c10c4d Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 08:20:40 +0300 Subject: [PATCH 470/591] [#1689] adm: Simplify getCandidateRegisterPrice() After all the refactoring, there is no more need to have custom branch for the local client. Change-Id: I274305b0c390578fb4583759135d3e7ce58873dc Signed-off-by: Evgenii Stratonikov --- .../morph/initialize/initialize_register.go | 31 ++----------------- 1 file changed, 3 insertions(+), 28 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go index 4c6607f9a..841acdc83 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go @@ -1,7 +1,6 @@ package initialize import ( - "errors" "fmt" "math/big" @@ -11,7 +10,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/io" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo" @@ -30,7 +28,9 @@ const ( ) func registerCandidateRange(c *helper.InitializeContext, start, end int) error { - regPrice, err := getCandidateRegisterPrice(c) + inv := invoker.New(c.Client, nil) + reader := neo.NewReader(inv) + regPrice, err := reader.GetRegisterPrice() if err != nil { return fmt.Errorf("can't fetch registration price: %w", err) } @@ -144,28 +144,3 @@ func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (boo bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash()) return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err } - -var errGetPriceInvalid = errors.New("`getRegisterPrice`: invalid response") - -func getCandidateRegisterPrice(c *helper.InitializeContext) (int64, error) { - switch c.Client.(type) { - case *rpcclient.Client: - inv := invoker.New(c.Client, nil) - reader := neo.NewReader(inv) - return reader.GetRegisterPrice() - default: - neoHash := neo.Hash - res, err := helper.InvokeFunction(c.Client, neoHash, "getRegisterPrice", nil, nil) - if err != nil { - return 0, err - } - if len(res.Stack) == 0 { - return 0, errGetPriceInvalid - } - bi, err := res.Stack[0].TryInteger() - if err != nil || !bi.IsInt64() { - return 0, errGetPriceInvalid - } - return bi.Int64(), nil - } -} From c2c05e222863945fdf1fb954135043cc98c5acef Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 08:21:50 +0300 Subject: [PATCH 471/591] [#1689] adm: Reuse ReadOnlyInvoker in registerCandidateRange() Change-Id: I544d10340825494b45a62700fa247404c18f746a Signed-off-by: Evgenii Stratonikov --- .../internal/modules/morph/initialize/initialize_register.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go index 841acdc83..3efa40c50 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go @@ -11,7 +11,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/transaction" "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo" "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" @@ -28,8 +27,7 @@ const ( ) func registerCandidateRange(c *helper.InitializeContext, start, end int) error { - inv := invoker.New(c.Client, nil) - reader := neo.NewReader(inv) + reader := neo.NewReader(c.ReadOnlyInvoker) regPrice, err := reader.GetRegisterPrice() if err != nil { return fmt.Errorf("can't fetch registration price: %w", err) From bce2f7bef0c31f251daeaec377b6900ec9c01c9d Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 08:24:33 +0300 Subject: [PATCH 472/591] [#1689] adm: Reuse `neo.NewReader` helper in transferNEOFinished() Change-Id: I27980ed87436958cb4d27278e30e05da021d1506 Signed-off-by: Evgenii Stratonikov --- .../modules/morph/initialize/initialize_register.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go index 3efa40c50..46e6621d2 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go @@ -12,7 +12,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/neo" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/nep17" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/util" @@ -114,7 +113,7 @@ func registerCandidates(c *helper.InitializeContext) error { func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { neoHash := neo.Hash - ok, err := transferNEOFinished(c, neoHash) + ok, err := transferNEOFinished(c) if ok || err != nil { return err } @@ -137,8 +136,8 @@ func transferNEOToAlphabetContracts(c *helper.InitializeContext) error { return c.AwaitTx() } -func transferNEOFinished(c *helper.InitializeContext, neoHash util.Uint160) (bool, error) { - r := nep17.NewReader(c.ReadOnlyInvoker, neoHash) +func transferNEOFinished(c *helper.InitializeContext) (bool, error) { + r := neo.NewReader(c.ReadOnlyInvoker) bal, err := r.BalanceOf(c.CommitteeAcc.Contract.ScriptHash()) return bal.Cmp(big.NewInt(native.NEOTotalSupply)) == -1, err } From bd8ab2d84a59a1c251820a71e63d5e741fd9450e Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 08:27:49 +0300 Subject: [PATCH 473/591] [#1689] adm: Remove useless switch in NNSIsAvailable() After all the refactorings, there is no need to have custom behaviour for local client. Change-Id: I99e297cdeffff979524b3f89d3580ab5780e7681 Signed-off-by: Evgenii Stratonikov --- .../internal/modules/morph/helper/initialize.go | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go index 961ceba53..48f7a62a4 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go @@ -13,7 +13,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/core/native/nativenames" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" @@ -187,19 +186,9 @@ func NNSResolveKey(inv *invoker.Invoker, nnsHash util.Uint160, domain string) (* } func NNSIsAvailable(c Client, nnsHash util.Uint160, name string) (bool, error) { - switch c.(type) { - case *rpcclient.Client: - inv := invoker.New(c, nil) - reader := nns2.NewReader(inv, nnsHash) - return reader.IsAvailable(name) - default: - b, err := unwrap.Bool(InvokeFunction(c, nnsHash, "isAvailable", []any{name}, nil)) - if err != nil { - return false, fmt.Errorf("`isAvailable`: invalid response: %w", err) - } - - return b, nil - } + inv := invoker.New(c, nil) + reader := nns2.NewReader(inv, nnsHash) + return reader.IsAvailable(name) } func CheckNotaryEnabled(c Client) error { From 60446bb66816e5d92bccf3a61022b708b6506297 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 08:31:10 +0300 Subject: [PATCH 474/591] [#1689] adm/helper: Use proper nns bindings import The one in `neo-go` is for another contract. Change-Id: Ia1ac2da5e419b48801afdb26df72892d77344e0d Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/helper/initialize.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go index 48f7a62a4..50b5c1ec7 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize.go @@ -6,6 +6,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" + nns2 "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" @@ -14,7 +15,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/encoding/address" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" - nns2 "github.com/nspcc-dev/neo-go/pkg/rpcclient/nns" "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" "github.com/nspcc-dev/neo-go/pkg/util" From d00c606feed8ad776fe6df65b601b81790e7dfbe Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 21 Mar 2025 08:12:20 +0300 Subject: [PATCH 475/591] [#652] adm: Group independent stages in batches Each stage waits until transaction persists. This is needed to ensure the next stage will see the result of the previous one. However, some of the stages do not depend one on another, so we may execute them in parallel. `AwaitDisabled` flag is used to localize this batching on the code level. We could've removed `AwaitTx()` from respective stages, but it seems more error prone. Close #652. Change-Id: Ib9c6f6cd5e0db0f31aa1cda8e127b1fad5166336 Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/helper/n3client.go | 4 +++- .../internal/modules/morph/initialize/initialize.go | 8 +++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go index 3f3a66cb6..d6ca012ce 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go @@ -40,6 +40,8 @@ type ClientContext struct { CommitteeAct *actor.Actor // committee actor with the Global witness scope ReadOnlyInvoker *invoker.Invoker // R/O contract invoker, does not contain any signer SentTxs []HashVUBPair + + AwaitDisabled bool } func NewRemoteClient(v *viper.Viper) (Client, error) { @@ -120,7 +122,7 @@ func (c *ClientContext) SendTx(tx *transaction.Transaction, cmd *cobra.Command, } func (c *ClientContext) AwaitTx(cmd *cobra.Command) error { - if len(c.SentTxs) == 0 { + if len(c.SentTxs) == 0 || c.AwaitDisabled { return nil } diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go index cdaf7d3bc..4d39dc662 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go @@ -39,6 +39,7 @@ func initializeSideChainCmd(cmd *cobra.Command, _ []string) error { return err } + initCtx.AwaitDisabled = true cmd.Println("Stage 4.1: Transfer GAS to proxy contract.") if err := transferGASToProxy(initCtx); err != nil { return err @@ -55,5 +56,10 @@ func initializeSideChainCmd(cmd *cobra.Command, _ []string) error { } cmd.Println("Stage 7: set addresses in NNS.") - return setNNS(initCtx) + if err := setNNS(initCtx); err != nil { + return err + } + + initCtx.AwaitDisabled = false + return initCtx.AwaitTx() } From a2053870e23b9d58529cd3cebb7898bff2abae15 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 20 Mar 2025 16:36:29 +0300 Subject: [PATCH 476/591] [#1692] metabase: Use bucket cache in ListWithCursor() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No changes in speed, but unified approach: ``` goos: linux goarch: amd64 pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz │ master │ new │ │ sec/op │ sec/op vs base │ ListWithCursor/1_item-8 6.067µ ± 8% 5.731µ ± 10% ~ (p=0.052 n=10) ListWithCursor/10_items-8 25.40µ ± 11% 26.12µ ± 13% ~ (p=0.971 n=10) ListWithCursor/100_items-8 210.7µ ± 9% 203.2µ ± 6% ~ (p=0.280 n=10) geomean 31.90µ 31.22µ -2.16% │ master │ new │ │ B/op │ B/op vs base │ ListWithCursor/1_item-8 3.287Ki ± 0% 3.287Ki ± 0% ~ (p=1.000 n=10) ¹ ListWithCursor/10_items-8 15.63Ki ± 0% 15.62Ki ± 0% ~ (p=0.328 n=10) ListWithCursor/100_items-8 138.1Ki ± 0% 138.1Ki ± 0% ~ (p=0.340 n=10) geomean 19.21Ki 19.21Ki -0.00% ¹ all samples are equal │ master │ new │ │ allocs/op │ allocs/op vs base │ ListWithCursor/1_item-8 109.0 ± 0% 109.0 ± 0% ~ (p=1.000 n=10) ¹ ListWithCursor/10_items-8 380.0 ± 0% 380.0 ± 0% ~ (p=1.000 n=10) ¹ ListWithCursor/100_items-8 3.082k ± 0% 3.082k ± 0% ~ (p=1.000 n=10) ¹ geomean 503.5 503.5 +0.00% ¹ all samples are equal ``` Change-Id: Ic11673427615053656b2a60068a6d4dbd27af2cb Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/metabase/list.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index 0b6cdf702..a1b3f4e2d 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -139,8 +139,7 @@ func (db *DB) listWithCursor(tx *bbolt.Tx, result []objectcore.Info, count int, var containerID cid.ID var offset []byte - graveyardBkt := tx.Bucket(graveyardBucketName) - garbageBkt := tx.Bucket(garbageBucketName) + bc := newBucketCache() rawAddr := make([]byte, cidSize, addressKeySize) @@ -169,7 +168,7 @@ loop: bkt := tx.Bucket(name) if bkt != nil { copy(rawAddr, cidRaw) - result, offset, cursor, err = selectNFromBucket(bkt, objType, graveyardBkt, garbageBkt, rawAddr, containerID, + result, offset, cursor, err = selectNFromBucket(bc, bkt, objType, rawAddr, containerID, result, count, cursor, threshold, currEpoch) if err != nil { return nil, nil, err @@ -204,9 +203,10 @@ loop: // selectNFromBucket similar to selectAllFromBucket but uses cursor to find // object to start selecting from. Ignores inhumed objects. -func selectNFromBucket(bkt *bbolt.Bucket, // main bucket +func selectNFromBucket( + bc *bucketCache, + bkt *bbolt.Bucket, // main bucket objType objectSDK.Type, // type of the objects stored in the main bucket - graveyardBkt, garbageBkt *bbolt.Bucket, // cached graveyard buckets cidRaw []byte, // container ID prefix, optimization cnt cid.ID, // container ID to []objectcore.Info, // listing result @@ -241,6 +241,8 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket } offset = k + graveyardBkt := getGraveyardBucket(bc, bkt.Tx()) + garbageBkt := getGarbageBucket(bc, bkt.Tx()) if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 { continue } @@ -251,7 +253,7 @@ func selectNFromBucket(bkt *bbolt.Bucket, // main bucket } expEpoch, hasExpEpoch := hasExpirationEpoch(&o) - if hasExpEpoch && expEpoch < currEpoch && !objectLocked(bkt.Tx(), cnt, obj) { + if hasExpEpoch && expEpoch < currEpoch && !objectLockedWithCache(bc, bkt.Tx(), cnt, obj) { continue } From d144abc9771fdc8780435e09e6abba50fca81620 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Thu, 20 Mar 2025 17:55:31 +0300 Subject: [PATCH 477/591] [#1692] metabase: Remove useless `count` variable It is always equal to `len(to)`. Change-Id: Id7a4c26e9711216b78f96e6b2511efa0773e3471 Signed-off-by: Evgenii Stratonikov --- pkg/local_object_storage/metabase/list.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go index a1b3f4e2d..2a0bd7f6a 100644 --- a/pkg/local_object_storage/metabase/list.go +++ b/pkg/local_object_storage/metabase/list.go @@ -219,7 +219,6 @@ func selectNFromBucket( cursor = new(Cursor) } - count := len(to) c := bkt.Cursor() k, v := c.First() @@ -231,7 +230,7 @@ func selectNFromBucket( } for ; k != nil; k, v = c.Next() { - if count >= limit { + if len(to) >= limit { break } @@ -275,7 +274,6 @@ func selectNFromBucket( a.SetContainer(cnt) a.SetObject(obj) to = append(to, objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}) - count++ } return to, offset, cursor, nil From d432bebef45be28487db419c7a005f455a08a0ac Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 28 Mar 2025 13:16:35 +0300 Subject: [PATCH 478/591] [#1689] go.mod: Bump frostfs-qos version Change-Id: Iaa28da1a1e7b2f4ab7fd8ed661939eb38f4c7782 Signed-off-by: Dmitrii Stepanov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 887f8bb2b..2dbd1e231 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 - git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275 + git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 diff --git a/go.sum b/go.sum index 982fddf23..6969fdc53 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275 h1:WqWxCnCl2ekfjWja/CpGeF2rf4h0x199xhdnsm/j+E8= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250320142439-32079ad7c275/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167 h1:NhqfqNcATndYwx413BaaYXxVJbkeu2vQOtVyxXw5xCQ= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592 h1:n7Pl8V7O1yS07J/fqdbzZjVe/mQW42a7eS0QHfgrzJw= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= From 2254c8aff5716a0de0c4367c268223ab2b0390a0 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 31 Mar 2025 10:53:25 +0300 Subject: [PATCH 479/591] [#1689] go.mod: Bump SDK version Change-Id: Ic946aa68c3d6da9e7d54363f8e9141c6547707d6 Signed-off-by: Airat Arifullin --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2dbd1e231..9ae49a90c 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b diff --git a/go.sum b/go.sum index 6969fdc53..63925fef2 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167 h1:NhqfqNcATndYwx413BaaYXxVJbkeu2vQOtVyxXw5xCQ= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592 h1:n7Pl8V7O1yS07J/fqdbzZjVe/mQW42a7eS0QHfgrzJw= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250307150202-749b4e9ab592/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945 h1:zM2l316J55h9p30snl6vHBI/h0xmnuqZjnxIjRDtJZw= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= From 30d4692c3eab07bba7cc472c06f8cb48b0d52263 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Fri, 28 Mar 2025 19:32:26 +0300 Subject: [PATCH 480/591] [#1640] go.mod: Bump version for `frostfs-locode-db` Change-Id: Ic45ae77d6209c0097575fc8f89b076b22d50d149 Signed-off-by: Anton Nikiforov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9ae49a90c..76662ff5e 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( code.gitea.io/sdk/gitea v0.17.1 git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 - git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d + git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945 diff --git a/go.sum b/go.sum index 63925fef2..42078073b 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1 h1:k1Qw8dWUQczfo0eVXlhrq9 git.frostfs.info/TrueCloudLab/frostfs-contract v0.21.1/go.mod h1:5fSm/l5xSjGWqsPUffSdboiGFUHa7y/1S0fvxzQowN8= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= -git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0= -git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= +git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88FkUgRj0QaHkJEbcWfgzvY= +git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167 h1:NhqfqNcATndYwx413BaaYXxVJbkeu2vQOtVyxXw5xCQ= From 12a0537a7a9f5af0b4c2bd20f1d5b9f1891897d4 Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Tue, 25 Mar 2025 17:15:21 +0300 Subject: [PATCH 481/591] [#1689] ci: Add commit checker to Jenkinsfile - Commit checker image is built from dco-go: https://git.frostfs.info/TrueCloudLab/dco-go/pulls/14 - 'pull_request_target' branch is defined in Jenkins job: https://git.frostfs.info/TrueCloudLab/jenkins/pulls/10 https://git.frostfs.info/TrueCloudLab/jenkins/pulls/11 Change-Id: Ib86c5749f9e084d736b868240c4b47014b02ba8d Signed-off-by: Vitaliy Potyarkin --- .ci/Jenkinsfile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 166fb9286..4ddd36406 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -78,6 +78,10 @@ async { } } } -} -// TODO: dco check + task('dco') { + container('git.frostfs.info/truecloudlab/commit-check:master') { + sh 'FROM=pull_request_target commit-check' + } + } +} From 115aae7c34bd85f11a866d62ebe453c393f55c70 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Sat, 22 Mar 2025 01:54:06 +0300 Subject: [PATCH 482/591] [#1656] qos: Add tests for MaxActiveRPCLimiter Interceptors Change-Id: Ib65890ae5aec34c34e15d4ec1f05952f74f1ad26 Signed-off-by: Ekaterina Lebedeva --- internal/qos/grpc_test.go | 121 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 internal/qos/grpc_test.go diff --git a/internal/qos/grpc_test.go b/internal/qos/grpc_test.go new file mode 100644 index 000000000..d6e2a689c --- /dev/null +++ b/internal/qos/grpc_test.go @@ -0,0 +1,121 @@ +package qos_test + +import ( + "context" + "errors" + "testing" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-qos/limiting" + "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +const ( + okKey = "ok" +) + +var ( + errTest = errors.New("mock") + errResExhausted *apistatus.ResourceExhausted +) + +type mockGRPCServerStream struct { + grpc.ServerStream + + ctx context.Context +} + +func (m *mockGRPCServerStream) Context() context.Context { + return m.ctx +} + +type limiter struct { + acquired bool + released bool +} + +func (l *limiter) Acquire(key string) (limiting.ReleaseFunc, bool) { + l.acquired = true + if key != okKey { + return nil, false + } + return func() { l.released = true }, true +} + +func unaryMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { + interceptor := qos.NewMaxActiveRPCLimiterUnaryServerInterceptor(func() limiting.Limiter { return lim }) + handler := func(ctx context.Context, req any) (any, error) { + return nil, errTest + } + _, err := interceptor(ctx, nil, &grpc.UnaryServerInfo{FullMethod: methodName}, handler) + return err +} + +func streamMaxActiveRPCLimiter(ctx context.Context, lim *limiter, methodName string) error { + interceptor := qos.NewMaxActiveRPCLimiterStreamServerInterceptor(func() limiting.Limiter { return lim }) + handler := func(srv any, stream grpc.ServerStream) error { + return errTest + } + err := interceptor(nil, &mockGRPCServerStream{ctx: ctx}, &grpc.StreamServerInfo{ + FullMethod: methodName, + }, handler) + return err +} + +func Test_MaxActiveRPCLimiter(t *testing.T) { + // UnaryServerInterceptor + t.Run("unary fail", func(t *testing.T) { + var lim limiter + + err := unaryMaxActiveRPCLimiter(context.Background(), &lim, "") + require.ErrorAs(t, err, &errResExhausted) + require.True(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("unary pass critical", func(t *testing.T) { + var lim limiter + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + + err := unaryMaxActiveRPCLimiter(ctx, &lim, "") + require.ErrorIs(t, err, errTest) + require.False(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("unary pass", func(t *testing.T) { + var lim limiter + + err := unaryMaxActiveRPCLimiter(context.Background(), &lim, okKey) + require.ErrorIs(t, err, errTest) + require.True(t, lim.acquired) + require.True(t, lim.released) + }) + // StreamServerInterceptor + t.Run("stream fail", func(t *testing.T) { + var lim limiter + + err := streamMaxActiveRPCLimiter(context.Background(), &lim, "") + require.ErrorAs(t, err, &errResExhausted) + require.True(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("stream pass critical", func(t *testing.T) { + var lim limiter + ctx := tagging.ContextWithIOTag(context.Background(), qos.IOTagCritical.String()) + + err := streamMaxActiveRPCLimiter(ctx, &lim, "") + require.ErrorIs(t, err, errTest) + require.False(t, lim.acquired) + require.False(t, lim.released) + }) + t.Run("stream pass", func(t *testing.T) { + var lim limiter + + err := streamMaxActiveRPCLimiter(context.Background(), &lim, okKey) + require.ErrorIs(t, err, errTest) + require.True(t, lim.acquired) + require.True(t, lim.released) + }) +} From dcb2b23a7d863d2e07c11fc22ebd7ceb83467486 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Sat, 22 Mar 2025 16:36:55 +0300 Subject: [PATCH 483/591] [#1656] qos: Add test for `SetCriticalIOTag` Interceptor Change-Id: I4a55fcb84e6f65408a1c0120ac917e49e23354a1 Signed-off-by: Ekaterina Lebedeva --- internal/qos/grpc_test.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/internal/qos/grpc_test.go b/internal/qos/grpc_test.go index d6e2a689c..d4030c39e 100644 --- a/internal/qos/grpc_test.go +++ b/internal/qos/grpc_test.go @@ -19,6 +19,7 @@ const ( var ( errTest = errors.New("mock") + errWrongTag = errors.New("wrong tag") errResExhausted *apistatus.ResourceExhausted ) @@ -119,3 +120,18 @@ func Test_MaxActiveRPCLimiter(t *testing.T) { require.True(t, lim.released) }) } + +func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) { + interceptor := qos.NewSetCriticalIOTagUnaryServerInterceptor() + called := false + handler := func(ctx context.Context, req any) (any, error) { + called = true + if tag, ok := tagging.IOTagFromContext(ctx); ok && tag == qos.IOTagCritical.String() { + return nil, nil + } + return nil, errWrongTag + } + _, err := interceptor(context.Background(), nil, nil, handler) + require.NoError(t, err) + require.True(t, called) +} From 5a13830a94ab866b8425c3a97115a29a60007650 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 31 Mar 2025 14:14:30 +0300 Subject: [PATCH 484/591] [#1699] mod: Bump frostfs-qos version Change-Id: Ie5e708c0ca653596c6e3346aa286618868a5aee8 Signed-off-by: Dmitrii Stepanov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 76662ff5e..23be9c822 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 - git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167 + git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 diff --git a/go.sum b/go.sum index 42078073b..e5d476ffe 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 h1:AovQs7bea0fLnYfldCZB88 git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 h1:fluzML8BIIabd07LyPSjc0JAV2qymWkPiFaLrXdALLA= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167 h1:NhqfqNcATndYwx413BaaYXxVJbkeu2vQOtVyxXw5xCQ= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250324133647-57d895c32167/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU= +git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945 h1:zM2l316J55h9p30snl6vHBI/h0xmnuqZjnxIjRDtJZw= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= From a5bae6c5af6252a6d340a957742c4095daeb22be Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 31 Mar 2025 14:14:58 +0300 Subject: [PATCH 485/591] [#1699] qos: Allow to prohibit operations for IO tag Change-Id: I2bee26885244e241d224860978b6de3526527e96 Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config/engine/config_test.go | 7 ++++--- cmd/frostfs-node/config/engine/shard/limits/config.go | 8 ++++++++ config/example/node.env | 1 + config/example/node.json | 3 ++- config/example/node.yaml | 1 + docs/storage-node-configuration.md | 1 + internal/qos/limiter.go | 10 ++++++++-- 7 files changed, 25 insertions(+), 6 deletions(-) diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go index eaf2a294e..22f26268d 100644 --- a/cmd/frostfs-node/config/engine/config_test.go +++ b/cmd/frostfs-node/config/engine/config_test.go @@ -168,9 +168,10 @@ func TestEngineSection(t *testing.T) { LimitOps: toPtr(25000), }, { - Tag: "policer", - Weight: toPtr(5), - LimitOps: toPtr(25000), + Tag: "policer", + Weight: toPtr(5), + LimitOps: toPtr(25000), + Prohibited: true, }, }) require.ElementsMatch(t, writeLimits.Tags, diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go index b9b5c4382..8444d6aa8 100644 --- a/cmd/frostfs-node/config/engine/shard/limits/config.go +++ b/cmd/frostfs-node/config/engine/shard/limits/config.go @@ -84,6 +84,7 @@ type IOTagConfig struct { Weight *float64 LimitOps *float64 ReservedOps *float64 + Prohibited bool } func tags(c *config.Config) []IOTagConfig { @@ -119,6 +120,13 @@ func tags(c *config.Config) []IOTagConfig { tagConfig.ReservedOps = &r } + v = c.Value(strconv.Itoa(i) + ".prohibited") + if v != nil { + r, err := cast.ToBoolE(v) + panicOnErr(err) + tagConfig.Prohibited = r + } + result = append(result, tagConfig) } } diff --git a/config/example/node.env b/config/example/node.env index 010b6840c..b7c798ad8 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -180,6 +180,7 @@ FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_3_LIMIT_OPS=25000 FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5 FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200 FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0 diff --git a/config/example/node.json b/config/example/node.json index b26c35d2c..2f4413e4d 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -252,7 +252,8 @@ { "tag": "policer", "weight": 5, - "limit_ops": 25000 + "limit_ops": 25000, + "prohibited": true } ] }, diff --git a/config/example/node.yaml b/config/example/node.yaml index 58b687d5c..a07795da5 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -249,6 +249,7 @@ storage: - tag: policer weight: 5 limit_ops: 25000 + prohibited: true write: max_running_ops: 1000 max_waiting_ops: 100 diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 51f0a9669..3944f663f 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -359,6 +359,7 @@ limits: | `tag.weight` | `float` | 0 (no weight) | Weight for queries with the specified tag. Weights must be specified for all tags or not specified for any one. | | `tag.limit_ops` | `float` | 0 (no limit) | Operations per second rate limit for queries with the specified tag. | | `tag.reserved_ops` | `float` | 0 (no reserve) | Reserved operations per second rate for queries with the specified tag. | +| `tag.prohibited` | `bool` | false | If true, operations with this specified tag will be prohibited. | # `node` section diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go index 98d254fd0..c73481c2c 100644 --- a/internal/qos/limiter.go +++ b/internal/qos/limiter.go @@ -90,6 +90,7 @@ func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.T if l.ReservedOps != nil && *l.ReservedOps != 0 { v.ReservedIOPS = l.ReservedOps } + v.Prohibited = l.Prohibited result[l.Tag] = v } return result @@ -164,8 +165,7 @@ func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (R rel, err := s.RequestArrival(ctx, tag) stat.inProgress.Add(1) if err != nil { - if errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) || - errors.Is(err, errSemaphoreLimitExceeded) { + if isResourceExhaustedErr(err) { stat.resourceExhausted.Add(1) return nil, &apistatus.ResourceExhausted{} } @@ -234,3 +234,9 @@ func exportMetrics(metrics Metrics, stats map[string]*stat, shardID, operation s metrics.SetOperationTagCounters(shardID, operation, tag, pending, inProgress, completed, resExh) } } + +func isResourceExhaustedErr(err error) bool { + return errors.Is(err, scheduling.ErrMClockSchedulerRequestLimitExceeded) || + errors.Is(err, errSemaphoreLimitExceeded) || + errors.Is(err, scheduling.ErrTagRequestsProhibited) +} From 6e1576cfdb02883a6bbc4f2800ce98f9cd06c71a Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Sat, 22 Mar 2025 16:40:31 +0300 Subject: [PATCH 486/591] [#1656] qos: Add tests for `AdjustOutgoingIOTag` Interceptors Change-Id: If534e756b26cf7f202039d48ecdf554b4283728b Signed-off-by: Ekaterina Lebedeva --- internal/qos/grpc_test.go | 82 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/internal/qos/grpc_test.go b/internal/qos/grpc_test.go index d4030c39e..7d0826754 100644 --- a/internal/qos/grpc_test.go +++ b/internal/qos/grpc_test.go @@ -3,6 +3,7 @@ package qos_test import ( "context" "errors" + "fmt" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" @@ -20,7 +21,9 @@ const ( var ( errTest = errors.New("mock") errWrongTag = errors.New("wrong tag") + errNoTag = errors.New("failed to get tag from context") errResExhausted *apistatus.ResourceExhausted + tags = []qos.IOTag{qos.IOTagBackground, qos.IOTagWritecache, qos.IOTagPolicer, qos.IOTagTreeSync} ) type mockGRPCServerStream struct { @@ -135,3 +138,82 @@ func TestSetCriticalIOTagUnaryServerInterceptor_Pass(t *testing.T) { require.NoError(t, err) require.True(t, called) } + +func TestAdjustOutgoingIOTagUnaryClientInterceptor(t *testing.T) { + interceptor := qos.NewAdjustOutgoingIOTagUnaryClientInterceptor() + + // check context with no value + called := false + invoker := func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + called = true + if _, ok := tagging.IOTagFromContext(ctx); ok { + return fmt.Errorf("%v: expected no IO tags", errWrongTag) + } + return nil + } + require.NoError(t, interceptor(context.Background(), "", nil, nil, nil, invoker, nil)) + require.True(t, called) + + // check context for internal tag + targetTag := qos.IOTagInternal.String() + invoker = func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, opts ...grpc.CallOption) error { + raw, ok := tagging.IOTagFromContext(ctx) + if !ok { + return errNoTag + } + if raw != targetTag { + return errWrongTag + } + return nil + } + for _, tag := range tags { + ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) + require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) + } + + // check context for client tag + ctx := tagging.ContextWithIOTag(context.Background(), "") + targetTag = qos.IOTagClient.String() + require.NoError(t, interceptor(ctx, "", nil, nil, nil, invoker, nil)) +} + +func TestAdjustOutgoingIOTagStreamClientInterceptor(t *testing.T) { + interceptor := qos.NewAdjustOutgoingIOTagStreamClientInterceptor() + + // check context with no value + called := false + streamer := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + called = true + if _, ok := tagging.IOTagFromContext(ctx); ok { + return nil, fmt.Errorf("%v: expected no IO tags", errWrongTag) + } + return nil, nil + } + _, err := interceptor(context.Background(), nil, nil, "", streamer, nil) + require.True(t, called) + require.NoError(t, err) + + // check context for internal tag + targetTag := qos.IOTagInternal.String() + streamer = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) { + raw, ok := tagging.IOTagFromContext(ctx) + if !ok { + return nil, errNoTag + } + if raw != targetTag { + return nil, errWrongTag + } + return nil, nil + } + for _, tag := range tags { + ctx := tagging.ContextWithIOTag(context.Background(), tag.String()) + _, err := interceptor(ctx, nil, nil, "", streamer, nil) + require.NoError(t, err) + } + + // check context for client tag + ctx := tagging.ContextWithIOTag(context.Background(), "") + targetTag = qos.IOTagClient.String() + _, err = interceptor(ctx, nil, nil, "", streamer, nil) + require.NoError(t, err) +} From bc6cc9ae2a2631b8a10d27d588c67d88e7b7041c Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 1 Apr 2025 10:55:33 +0300 Subject: [PATCH 487/591] [#1700] engine: Print stacks on test request limiter Change-Id: I4952769ca431d1049955823b41b99b0984b385fc Signed-off-by: Dmitrii Stepanov --- .../engine/engine_test.go | 71 ++++++++++++++++--- 1 file changed, 61 insertions(+), 10 deletions(-) diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 6ef3846ee..711a76100 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -2,8 +2,11 @@ package engine import ( "context" + "fmt" "path/filepath" - "sync/atomic" + "runtime/debug" + "strings" + "sync" "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" @@ -157,26 +160,74 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes var _ qos.Limiter = (*testQoSLimiter)(nil) type testQoSLimiter struct { - t testing.TB - read atomic.Int64 - write atomic.Int64 + t testing.TB + quard sync.Mutex + id int64 + readStacks map[int64][]byte + writeStacks map[int64][]byte } func (t *testQoSLimiter) SetMetrics(qos.Metrics) {} func (t *testQoSLimiter) Close() { - require.Equal(t.t, int64(0), t.read.Load(), "read requests count after limiter close must be 0") - require.Equal(t.t, int64(0), t.write.Load(), "write requests count after limiter close must be 0") + t.quard.Lock() + defer t.quard.Unlock() + + var sb strings.Builder + var seqN int + for _, stack := range t.readStacks { + seqN++ + sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack))) + } + for _, stack := range t.writeStacks { + seqN++ + sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack))) + } + require.True(t.t, seqN == 0, sb.String()) } func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) { - t.read.Add(1) - return func() { t.read.Add(-1) }, nil + t.quard.Lock() + defer t.quard.Unlock() + + stack := debug.Stack() + + t.id++ + id := t.id + + if t.readStacks == nil { + t.readStacks = make(map[int64][]byte) + } + t.readStacks[id] = stack + + return func() { + t.quard.Lock() + defer t.quard.Unlock() + + delete(t.readStacks, id) + }, nil } func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) { - t.write.Add(1) - return func() { t.write.Add(-1) }, nil + t.quard.Lock() + defer t.quard.Unlock() + + stack := debug.Stack() + + t.id++ + id := t.id + + if t.writeStacks == nil { + t.writeStacks = make(map[int64][]byte) + } + t.writeStacks[id] = stack + + return func() { + t.quard.Lock() + defer t.quard.Unlock() + + delete(t.writeStacks, id) + }, nil } func (t *testQoSLimiter) SetParentID(string) {} From 27899598dc2617b859bdcd99d28fc7810329632a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 1 Apr 2025 12:37:35 +0300 Subject: [PATCH 488/591] [#1700] gc: Drop Event interface There is only one event: new epoch. Change-Id: I982f3650f7bc753ff2782393625452f0f8cdcc35 Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/engine/shards.go | 4 +- pkg/local_object_storage/shard/control.go | 24 +++-- pkg/local_object_storage/shard/gc.go | 109 +++++++--------------- pkg/local_object_storage/shard/gc_test.go | 4 +- 4 files changed, 47 insertions(+), 94 deletions(-) diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index a38c85151..dfc3b1a35 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -318,8 +318,6 @@ func (e *StorageEngine) SetShardMode(ctx context.Context, id *shard.ID, m mode.M // HandleNewEpoch notifies every shard about NewEpoch event. func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { - ev := shard.EventNewEpoch(epoch) - e.mtx.RLock() defer e.mtx.RUnlock() @@ -327,7 +325,7 @@ func (e *StorageEngine) HandleNewEpoch(ctx context.Context, epoch uint64) { select { case <-ctx.Done(): return - case sh.NotificationChannel() <- ev: + case sh.NotificationChannel() <- epoch: default: e.log.Debug(ctx, logs.ShardEventProcessingInProgress, zap.Uint64("epoch", epoch), zap.Stringer("shard", sh.ID())) diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 6dee2da3f..19b13a8ab 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -108,19 +108,17 @@ func (s *Shard) Init(ctx context.Context) error { s.updateMetrics(ctx) s.gc = &gc{ - gcCfg: &s.gcCfg, - remover: s.removeGarbage, - stopChannel: make(chan struct{}), - eventChan: make(chan Event), - mEventHandler: map[eventType]*eventHandlers{ - eventNewEpoch: { - cancelFunc: func() {}, - handlers: []eventHandler{ - s.collectExpiredLocks, - s.collectExpiredObjects, - s.collectExpiredTombstones, - s.collectExpiredMetrics, - }, + gcCfg: &s.gcCfg, + remover: s.removeGarbage, + stopChannel: make(chan struct{}), + newEpochChan: make(chan uint64), + newEpochHandlers: &newEpochHandlers{ + cancelFunc: func() {}, + handlers: []newEpochHandler{ + s.collectExpiredLocks, + s.collectExpiredObjects, + s.collectExpiredTombstones, + s.collectExpiredMetrics, }, }, } diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 84fb6039e..82e76f1a7 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -33,41 +33,14 @@ type TombstoneSource interface { IsTombstoneAvailable(ctx context.Context, addr oid.Address, epoch uint64) bool } -// Event represents class of external events. -type Event interface { - typ() eventType -} +type newEpochHandler func(context.Context, uint64) -type eventType int - -const ( - _ eventType = iota - eventNewEpoch -) - -type newEpoch struct { - epoch uint64 -} - -func (e newEpoch) typ() eventType { - return eventNewEpoch -} - -// EventNewEpoch returns new epoch event. -func EventNewEpoch(e uint64) Event { - return newEpoch{ - epoch: e, - } -} - -type eventHandler func(context.Context, Event) - -type eventHandlers struct { +type newEpochHandlers struct { prevGroup sync.WaitGroup cancelFunc context.CancelFunc - handlers []eventHandler + handlers []newEpochHandler } type gcRunResult struct { @@ -109,10 +82,10 @@ type gc struct { remover func(context.Context) gcRunResult - // eventChan is used only for listening for the new epoch event. + // newEpochChan is used only for listening for the new epoch event. // It is ok to keep opened, we are listening for context done when writing in it. - eventChan chan Event - mEventHandler map[eventType]*eventHandlers + newEpochChan chan uint64 + newEpochHandlers *newEpochHandlers } type gcCfg struct { @@ -142,15 +115,7 @@ func defaultGCCfg() gcCfg { } func (gc *gc) init(ctx context.Context) { - sz := 0 - - for _, v := range gc.mEventHandler { - sz += len(v.handlers) - } - - if sz > 0 { - gc.workerPool = gc.workerPoolInit(sz) - } + gc.workerPool = gc.workerPoolInit(len(gc.newEpochHandlers.handlers)) ctx = tagging.ContextWithIOTag(ctx, qos.IOTagBackground.String()) gc.wg.Add(2) go gc.tickRemover(ctx) @@ -168,7 +133,7 @@ func (gc *gc) listenEvents(ctx context.Context) { case <-ctx.Done(): gc.log.Warn(ctx, logs.ShardStopEventListenerByContext) return - case event, ok := <-gc.eventChan: + case event, ok := <-gc.newEpochChan: if !ok { gc.log.Warn(ctx, logs.ShardStopEventListenerByClosedEventChannel) return @@ -179,38 +144,33 @@ func (gc *gc) listenEvents(ctx context.Context) { } } -func (gc *gc) handleEvent(ctx context.Context, event Event) { - v, ok := gc.mEventHandler[event.typ()] - if !ok { - return - } - - v.cancelFunc() - v.prevGroup.Wait() +func (gc *gc) handleEvent(ctx context.Context, epoch uint64) { + gc.newEpochHandlers.cancelFunc() + gc.newEpochHandlers.prevGroup.Wait() var runCtx context.Context - runCtx, v.cancelFunc = context.WithCancel(ctx) + runCtx, gc.newEpochHandlers.cancelFunc = context.WithCancel(ctx) - v.prevGroup.Add(len(v.handlers)) + gc.newEpochHandlers.prevGroup.Add(len(gc.newEpochHandlers.handlers)) - for i := range v.handlers { + for i := range gc.newEpochHandlers.handlers { select { case <-ctx.Done(): return default: } - h := v.handlers[i] + h := gc.newEpochHandlers.handlers[i] err := gc.workerPool.Submit(func() { - defer v.prevGroup.Done() - h(runCtx, event) + defer gc.newEpochHandlers.prevGroup.Done() + h(runCtx, epoch) }) if err != nil { gc.log.Warn(ctx, logs.ShardCouldNotSubmitGCJobToWorkerPool, zap.Error(err), ) - v.prevGroup.Done() + gc.newEpochHandlers.prevGroup.Done() } } } @@ -362,7 +322,7 @@ func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) { return } -func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { +func (s *Shard) collectExpiredObjects(ctx context.Context, epoch uint64) { var err error startedAt := time.Now() @@ -370,8 +330,8 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeRegular) }() - s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) - defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsStarted, zap.Uint64("epoch", epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredObjectsCompleted, zap.Uint64("epoch", epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -380,7 +340,7 @@ func (s *Shard) collectExpiredObjects(ctx context.Context, e Event) { errGroup.Go(func() error { batch := make([]oid.Address, 0, batchSize) - expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) { + expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { if o.Type() != objectSDK.TypeTombstone && o.Type() != objectSDK.TypeLock { batch = append(batch, o.Address()) @@ -486,7 +446,7 @@ func (s *Shard) inhumeGC(ctx context.Context, addrs []oid.Address) (meta.InhumeR return s.metaBase.Inhume(ctx, inhumePrm) } -func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { +func (s *Shard) collectExpiredTombstones(ctx context.Context, epoch uint64) { var err error startedAt := time.Now() @@ -494,7 +454,6 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeTombstone) }() - epoch := e.(newEpoch).epoch log := s.log.With(zap.Uint64("epoch", epoch)) log.Debug(ctx, logs.ShardStartedExpiredTombstonesHandling) @@ -566,7 +525,7 @@ func (s *Shard) collectExpiredTombstones(ctx context.Context, e Event) { } } -func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { +func (s *Shard) collectExpiredLocks(ctx context.Context, epoch uint64) { var err error startedAt := time.Now() @@ -574,8 +533,8 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { s.gc.metrics.AddExpiredObjectCollectionDuration(time.Since(startedAt), err == nil, objectTypeLock) }() - s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", e.(newEpoch).epoch)) - defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", e.(newEpoch).epoch)) + s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksStarted, zap.Uint64("epoch", epoch)) + defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredLocksCompleted, zap.Uint64("epoch", epoch)) workersCount, batchSize := s.getExpiredObjectsParameters() @@ -585,14 +544,14 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { errGroup.Go(func() error { batch := make([]oid.Address, 0, batchSize) - expErr := s.getExpiredObjects(egCtx, e.(newEpoch).epoch, func(o *meta.ExpiredObject) { + expErr := s.getExpiredObjects(egCtx, epoch, func(o *meta.ExpiredObject) { if o.Type() == objectSDK.TypeLock { batch = append(batch, o.Address()) if len(batch) == batchSize { expired := batch errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired) + s.expiredLocksCallback(egCtx, epoch, expired) return egCtx.Err() }) batch = make([]oid.Address, 0, batchSize) @@ -606,7 +565,7 @@ func (s *Shard) collectExpiredLocks(ctx context.Context, e Event) { if len(batch) > 0 { expired := batch errGroup.Go(func() error { - s.expiredLocksCallback(egCtx, e.(newEpoch).epoch, expired) + s.expiredLocksCallback(egCtx, epoch, expired) return egCtx.Err() }) } @@ -785,17 +744,15 @@ func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { } } -// NotificationChannel returns channel for shard events. -func (s *Shard) NotificationChannel() chan<- Event { - return s.gc.eventChan +// NotificationChannel returns channel for new epoch events. +func (s *Shard) NotificationChannel() chan<- uint64 { + return s.gc.newEpochChan } -func (s *Shard) collectExpiredMetrics(ctx context.Context, e Event) { +func (s *Shard) collectExpiredMetrics(ctx context.Context, epoch uint64) { ctx, span := tracing.StartSpanFromContext(ctx, "shard.collectExpiredMetrics") defer span.End() - epoch := e.(newEpoch).epoch - s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsStarted, zap.Uint64("epoch", epoch)) defer s.log.Debug(ctx, logs.ShardGCCollectingExpiredMetricsCompleted, zap.Uint64("epoch", epoch)) diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go index e3670b441..f512a488a 100644 --- a/pkg/local_object_storage/shard/gc_test.go +++ b/pkg/local_object_storage/shard/gc_test.go @@ -69,7 +69,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) { require.NoError(t, err) epoch.Value = 105 - sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value)) + sh.gc.handleEvent(context.Background(), epoch.Value) var getPrm GetPrm getPrm.SetAddress(objectCore.AddressOf(obj)) @@ -165,7 +165,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) { require.True(t, errors.As(err, &splitInfoError), "split info must be provided") epoch.Value = 105 - sh.gc.handleEvent(context.Background(), EventNewEpoch(epoch.Value)) + sh.gc.handleEvent(context.Background(), epoch.Value) _, err = sh.Get(context.Background(), getPrm) require.True(t, client.IsErrObjectNotFound(err) || IsErrObjectExpired(err), "expired complex object must be deleted on epoch after lock expires") From f62d81e26a6b4ca44fbb15fd7dd4e1343bdbed71 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 1 Apr 2025 12:50:21 +0300 Subject: [PATCH 489/591] [#1700] gc: Take mode mutex in locks handlers Change-Id: I4408eae3aed936f85427b6246dcf727bd6813a0d Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/shard/gc.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 82e76f1a7..3b9ad690b 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -664,7 +664,10 @@ func (s *Shard) HandleExpiredTombstones(ctx context.Context, tss []meta.Tombston // HandleExpiredLocks unlocks all objects which were locked by lockers. // If successful, marks lockers themselves as garbage. func (s *Shard) HandleExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - if s.GetMode().NoMetabase() { + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { return } @@ -727,7 +730,10 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc // HandleDeletedLocks unlocks all objects which were locked by lockers. func (s *Shard) HandleDeletedLocks(ctx context.Context, lockers []oid.Address) { - if s.GetMode().NoMetabase() { + s.m.RLock() + defer s.m.RUnlock() + + if s.info.Mode.NoMetabase() { return } From 19ca9072237254f1b32b18c19e75586d8bb1c88a Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 1 Apr 2025 13:16:11 +0300 Subject: [PATCH 490/591] [#1689] treesvc: Untie `createConnection()` from `Service` struct Change-Id: I6212de4b81afe8c2516981a7bb2fea099c7df773 Signed-off-by: Evgenii Stratonikov --- pkg/services/tree/sync.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 1c3521344..92ff97c1c 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -301,7 +301,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, return false } - cc, err := s.createConnection(a) + cc, err := createConnection(a) if err != nil { s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr)) return false @@ -339,7 +339,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, return from } -func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) { +func createConnection(a network.Address) (*grpc.ClientConn, error) { return grpc.NewClient(a.URIAddr(), grpc.WithChainUnaryInterceptor( qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), From b112a92408fac6a2da5509d5c5c9a86b044ea908 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 1 Apr 2025 13:20:20 +0300 Subject: [PATCH 491/591] [#1689] treesvc: Create request after client is initialized Make it easier to follow. Change-Id: I40c4db77f015bb45cb25f16ce24e68188fc14380 Signed-off-by: Evgenii Stratonikov --- pkg/services/tree/cache.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index e2be2f4a2..a74fdc5dd 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -117,13 +117,6 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (* opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) } - req := &HealthcheckRequest{ - Body: &HealthcheckRequest_Body{}, - } - if err := SignMessage(req, c.key); err != nil { - return nil, err - } - cc, err := grpc.NewClient(netAddr.URIAddr(), opts...) if err != nil { return nil, err @@ -131,6 +124,14 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (* ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) defer cancel() + + req := &HealthcheckRequest{ + Body: &HealthcheckRequest_Body{}, + } + if err := SignMessage(req, c.key); err != nil { + return nil, err + } + // perform some request to check connection if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil { _ = cc.Close() From a11b54ca150a9ae8900698c95fb3fc26eed44bb3 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 1 Apr 2025 13:31:48 +0300 Subject: [PATCH 492/591] [#1689] treesvc: Unify gRPC client creation for cache and sync They connect to the same endpoints, the only difference is that connection for synchronization is limited in lifetime and is closed after the sync is finished. This is probably not intentional, as synchronization was implemented before cache was introduced. However, reusing dialTreeService() in sync.go has possible perfomance implications, so is avoided for now. Change-Id: I2e37befd783b4d873ff833969f932deded1195be Signed-off-by: Evgenii Stratonikov --- pkg/services/tree/cache.go | 29 +---------------------------- pkg/services/tree/sync.go | 8 +++++--- 2 files changed, 6 insertions(+), 31 deletions(-) diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index a74fdc5dd..d250f577a 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -9,15 +9,10 @@ import ( "time" internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" - "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" - metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc" - tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" "github.com/hashicorp/golang-lru/v2/simplelru" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" ) type clientCache struct { @@ -95,29 +90,7 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (* return nil, err } - opts := []grpc.DialOption{ - grpc.WithChainUnaryInterceptor( - qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), - metrics.NewUnaryClientInterceptor(), - tracing.NewUnaryClientInterceptor(), - tagging.NewUnaryClientInterceptor(), - ), - grpc.WithChainStreamInterceptor( - qos.NewAdjustOutgoingIOTagStreamClientInterceptor(), - metrics.NewStreamClientInterceptor(), - tracing.NewStreamClientInterceptor(), - tagging.NewStreamClientInterceptor(), - ), - grpc.WithContextDialer(c.ds.GrpcContextDialer()), - grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), - grpc.WithDisableServiceConfig(), - } - - if !netAddr.IsTLSEnabled() { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - - cc, err := grpc.NewClient(netAddr.URIAddr(), opts...) + cc, err := createConnection(netAddr, grpc.WithContextDialer(c.ds.GrpcContextDialer())) if err != nil { return nil, err } diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 92ff97c1c..7107b2bad 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -339,8 +339,8 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, return from } -func createConnection(a network.Address) (*grpc.ClientConn, error) { - return grpc.NewClient(a.URIAddr(), +func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + defaultOpts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), metrics.NewUnaryClientInterceptor(), @@ -356,7 +356,9 @@ func createConnection(a network.Address) (*grpc.ClientConn, error) { grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), grpc.WithDisableServiceConfig(), - ) + } + + return grpc.NewClient(a.URIAddr(), append(defaultOpts, opts...)...) } // ErrAlreadySyncing is returned when a service synchronization has already From 3cd808023285c226a086d7756db837f5b0559339 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 1 Apr 2025 13:35:41 +0300 Subject: [PATCH 493/591] [#1689] treesvc: Fix dial options for TLS connections There are two problems with the current approach: 1. For TLS connections we need different transport credentials. 2. grpc.NewClient() considers scheme from `URIAddr()` as a scheme for a resolver. `grpcs://` scheme doesn't exist, though, so the default one is picked. The default resolver (`dns://`) is in turn unable to parse the https://github.com/grpc/grpc-go/blob/5edab9e55414068e74320716117a2659c5d2174e/internal/resolver/dns/dns_resolver.go#L405 The error is `grpcs://192.168.198.248:8081:443: too many colons in address`. Both problems don't exist in the SDK code, take it from there. Change-Id: Ia1212050f539162a560796685efdc3f9cfbf80a0 Signed-off-by: Evgenii Stratonikov --- pkg/services/tree/sync.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 7107b2bad..1a084a47f 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -3,6 +3,7 @@ package tree import ( "context" "crypto/sha256" + "crypto/tls" "errors" "fmt" "io" @@ -22,12 +23,14 @@ import ( tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/panjf2000/ants/v2" "go.uber.org/zap" "golang.org/x/sync/errgroup" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) @@ -340,6 +343,16 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, } func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + host, isTLS, err := client.ParseURI(a.URIAddr()) + if err != nil { + return nil, err + } + + creds := insecure.NewCredentials() + if isTLS { + creds = credentials.NewTLS(&tls.Config{}) + } + defaultOpts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( qos.NewAdjustOutgoingIOTagUnaryClientInterceptor(), @@ -353,12 +366,12 @@ func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientC tracing_grpc.NewStreamClientInterceptor(), tagging.NewStreamClientInterceptor(), ), - grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithTransportCredentials(creds), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), grpc.WithDisableServiceConfig(), } - return grpc.NewClient(a.URIAddr(), append(defaultOpts, opts...)...) + return grpc.NewClient(host, append(defaultOpts, opts...)...) } // ErrAlreadySyncing is returned when a service synchronization has already From b27f7d1d175b7d132497087497270b4bf8ecff64 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 1 Apr 2025 13:43:20 +0300 Subject: [PATCH 494/591] [#1689] treesvc: Use context dialer in synchronizeTree() This dialer supports source-based routing and is already used in cache. Change-Id: Ic7852edd2faea4e5d8667221e6f681cc82bb143a Signed-off-by: Evgenii Stratonikov --- pkg/services/tree/sync.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 1a084a47f..8abdafaa7 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -304,7 +304,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, return false } - cc, err := createConnection(a) + cc, err := createConnection(a, grpc.WithContextDialer(s.ds.GrpcContextDialer())) if err != nil { s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr)) return false From bd1c18e117f7a4eb7d23e1f173fbd802252374d4 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Tue, 1 Apr 2025 13:51:53 +0300 Subject: [PATCH 495/591] [#1689] cli/tree: Copy dial options from the service code There should be no `grpcs://` prefix in address and credentials should be picked. Change-Id: I58cdc98b079eac2c7db7dc088f4f131794a91b9f Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-cli/modules/tree/client.go | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go index 421b96ccd..d71a94b98 100644 --- a/cmd/frostfs-cli/modules/tree/client.go +++ b/cmd/frostfs-cli/modules/tree/client.go @@ -2,17 +2,19 @@ package tree import ( "context" + "crypto/tls" "fmt" - "strings" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/spf13/cobra" "github.com/spf13/viper" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) @@ -31,6 +33,16 @@ func _client() (tree.TreeServiceClient, error) { return nil, err } + host, isTLS, err := client.ParseURI(netAddr.URIAddr()) + if err != nil { + return nil, err + } + + creds := insecure.NewCredentials() + if isTLS { + creds = credentials.NewTLS(&tls.Config{}) + } + opts := []grpc.DialOption{ grpc.WithChainUnaryInterceptor( tracing.NewUnaryClientInterceptor(), @@ -40,13 +52,10 @@ func _client() (tree.TreeServiceClient, error) { ), grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), grpc.WithDisableServiceConfig(), + grpc.WithTransportCredentials(creds), } - if !strings.HasPrefix(netAddr.URIAddr(), "grpcs:") { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } - - cc, err := grpc.NewClient(netAddr.URIAddr(), opts...) + cc, err := grpc.NewClient(host, opts...) return tree.NewTreeServiceClient(cc), err } From e142d25fac6c5a3377e598639f36d7286a779a80 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 1 Apr 2025 12:56:55 +0300 Subject: [PATCH 496/591] [#1700] gc: Wait for handlers on GC stopping First wait for goroutine handles epoch events to not to get data race on `gc.newEpochHandlers.cancelFunc`. Then cancel handlers and wait for them. Change-Id: I71f11f8526961f8356f582a95b10eb8340c0aedd Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/shard/gc.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 3b9ad690b..9b327f6f1 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -227,6 +227,9 @@ func (gc *gc) stop(ctx context.Context) { gc.log.Info(ctx, logs.ShardWaitingForGCWorkersToStop) gc.wg.Wait() + + gc.newEpochHandlers.cancelFunc() + gc.newEpochHandlers.prevGroup.Wait() } // iterates over metabase and deletes objects From b924ecb850d7812395e1ff9427857403e1e1f663 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 31 Mar 2025 10:53:40 +0300 Subject: [PATCH 497/591] [#1689] object: Make patch streamer use `ApplyHeaderPatch` Change-Id: I4fb94936621544f70ef4e08815c42efaa5ba846f Signed-off-by: Airat Arifullin --- pkg/services/object/patch/streamer.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go index 642b9f9fa..ff13b1d3e 100644 --- a/pkg/services/object/patch/streamer.go +++ b/pkg/services/object/patch/streamer.go @@ -195,7 +195,12 @@ func (s *Streamer) Send(ctx context.Context, req *objectV2.PatchRequest) error { patch.FromV2(req.GetBody()) if !s.nonFirstSend { - err := s.patcher.ApplyAttributesPatch(ctx, patch.NewAttributes, patch.ReplaceAttributes) + err := s.patcher.ApplyHeaderPatch(ctx, + patcher.ApplyHeaderPatchPrm{ + NewSplitHeader: patch.NewSplitHeader, + NewAttributes: patch.NewAttributes, + ReplaceAttributes: patch.ReplaceAttributes, + }) if err != nil { return fmt.Errorf("patch attributes: %w", err) } From 11493d587bd31de7e8d0c52eb671a7abe934294e Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Wed, 2 Apr 2025 11:07:15 +0300 Subject: [PATCH 498/591] [#579] config: Change config example to be compatible with YAML 1.2 standard In accordance with the YAML 1.2 specification, octal numbers must begin with the 0o prefix. Change-Id: Icb2e83a4aa75c1eb91decd0b7c9b146aaa9fb3e2 Signed-off-by: Alexander Chuprov --- config/example/node.yaml | 8 ++++---- docs/storage-node-configuration.md | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/config/example/node.yaml b/config/example/node.yaml index a07795da5..a448ba7ce 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -148,7 +148,7 @@ storage: flush_worker_count: 30 # number of write-cache flusher threads metabase: - perm: 0644 # permissions for metabase files(directories: +x for current user and group) + perm: 0o644 # permissions for metabase files(directories: +x for current user and group) max_batch_size: 200 max_batch_delay: 20ms @@ -161,13 +161,13 @@ storage: blobstor: - size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) + perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) depth: 1 # max depth of object tree storage in key-value DB width: 4 # max width of object tree storage in key-value DB opened_cache_capacity: 50 # maximum number of opened database files opened_cache_ttl: 5m # ttl for opened database file opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's - - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) + - perm: 0o644 # permissions for blobstor files(directories: +x for current user and group) depth: 5 # max depth of object tree storage in FS gc: @@ -291,7 +291,7 @@ storage: pilorama: path: tmp/1/blob/pilorama.db no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted. - perm: 0644 # permission to use for the database file and intermediate directories + perm: 0o644 # permission to use for the database file and intermediate directories tracing: enabled: true diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 3944f663f..b5c8aadfe 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -209,7 +209,7 @@ blobstor: width: 4 - type: fstree path: /path/to/blobstor/blobovnicza - perm: 0644 + perm: 0o644 size: 4194304 depth: 1 width: 4 @@ -269,7 +269,7 @@ gc: ```yaml metabase: path: /path/to/meta.db - perm: 0644 + perm: 0o644 max_batch_size: 200 max_batch_delay: 20ms ``` From 2a6cdbdb72a3acb07948bf1e0124df98fc964925 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 31 Mar 2025 10:53:56 +0300 Subject: [PATCH 499/591] [#1689] cli: Add `split-header` option for object patch command * Make `split-header` option read binary- or JSON-encoded split-header; * Use `PatchHeader` instead of `PatchAttributes`. Change-Id: I50ae1bd93d4695657249dacbea981199a39e1a35 Signed-off-by: Airat Arifullin --- cmd/frostfs-cli/internal/client/client.go | 8 +++++++- cmd/frostfs-cli/modules/object/patch.go | 25 +++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index 3f235f070..299d0a830 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -858,6 +858,8 @@ type PatchObjectPrm struct { ReplaceAttribute bool + NewSplitHeader *objectSDK.SplitHeader + PayloadPatches []PayloadPatch } @@ -888,7 +890,11 @@ func Patch(ctx context.Context, prm PatchObjectPrm) (*PatchRes, error) { return nil, fmt.Errorf("init payload reading: %w", err) } - if patcher.PatchAttributes(ctx, prm.NewAttributes, prm.ReplaceAttribute) { + if patcher.PatchHeader(ctx, client.PatchHeaderPrm{ + NewSplitHeader: prm.NewSplitHeader, + NewAttributes: prm.NewAttributes, + ReplaceAttributes: prm.ReplaceAttribute, + }) { for _, pp := range prm.PayloadPatches { payloadFile, err := os.OpenFile(pp.PayloadPath, os.O_RDONLY, os.ModePerm) if err != nil { diff --git a/cmd/frostfs-cli/modules/object/patch.go b/cmd/frostfs-cli/modules/object/patch.go index d98182679..ebbde76a2 100644 --- a/cmd/frostfs-cli/modules/object/patch.go +++ b/cmd/frostfs-cli/modules/object/patch.go @@ -2,6 +2,7 @@ package object import ( "fmt" + "os" "strconv" "strings" @@ -9,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key" commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -20,6 +22,7 @@ const ( replaceAttrsFlagName = "replace-attrs" rangeFlagName = "range" payloadFlagName = "payload" + splitHeaderFlagName = "split-header" ) var objectPatchCmd = &cobra.Command{ @@ -50,6 +53,7 @@ func initObjectPatchCmd() { flags.Bool(replaceAttrsFlagName, false, "Replace object attributes by new ones.") flags.StringSlice(rangeFlagName, []string{}, "Range to which patch payload is applied. Format: offset:length") flags.StringSlice(payloadFlagName, []string{}, "Path to file with patch payload.") + flags.String(splitHeaderFlagName, "", "Path to binary or JSON-encoded split header") } func patch(cmd *cobra.Command, _ []string) { @@ -84,6 +88,8 @@ func patch(cmd *cobra.Command, _ []string) { prm.NewAttributes = newAttrs prm.ReplaceAttribute = replaceAttrs + prm.NewSplitHeader = parseSplitHeaderBinaryOrJSON(cmd) + for i := range ranges { prm.PayloadPatches = append(prm.PayloadPatches, internalclient.PayloadPatch{ Range: ranges[i], @@ -147,3 +153,22 @@ func patchPayloadPaths(cmd *cobra.Command) []string { v, _ := cmd.Flags().GetStringSlice(payloadFlagName) return v } + +func parseSplitHeaderBinaryOrJSON(cmd *cobra.Command) *objectSDK.SplitHeader { + path, _ := cmd.Flags().GetString(splitHeaderFlagName) + if path == "" { + return nil + } + + data, err := os.ReadFile(path) + commonCmd.ExitOnErr(cmd, "read file error: %w", err) + + splitHdrV2 := new(objectV2.SplitHeader) + err = splitHdrV2.Unmarshal(data) + if err != nil { + err = splitHdrV2.UnmarshalJSON(data) + commonCmd.ExitOnErr(cmd, "unmarshal error: %w", err) + } + + return objectSDK.NewSplitHeaderFromV2(splitHdrV2) +} From 634de975094b8f83b5b9eeff15b7fd2bfdd6f1fc Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 3 Apr 2025 10:09:05 +0300 Subject: [PATCH 500/591] [#1704] metabase: Do not ignore errors by Delete Change-Id: Ie7b89071a007f53f55879ff9e7e0c25d24ad5dbf Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/metabase/delete.go | 59 ++++++++++++--------- 1 file changed, 35 insertions(+), 24 deletions(-) diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index d338e228f..cf1d563e9 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -376,11 +376,12 @@ func parentLength(tx *bbolt.Tx, addr oid.Address) int { return len(lst) } -func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) { +func delUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error { bkt := tx.Bucket(item.name) if bkt != nil { - _ = bkt.Delete(item.key) // ignore error, best effort there + return bkt.Delete(item.key) } + return nil } func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { @@ -405,19 +406,16 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error { // if list empty, remove the key from bucket if len(lst) == 0 { - _ = bkt.Delete(item.key) // ignore error, best effort there - - return nil + return bkt.Delete(item.key) } // if list is not empty, then update it encodedLst, err := encodeList(lst) if err != nil { - return nil // ignore error, best effort there + return err } - _ = bkt.Put(item.key, encodedLst) // ignore error, best effort there - return nil + return bkt.Put(item.key, encodedLst) } func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error { @@ -480,35 +478,47 @@ func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error return ErrUnknownObjectType } - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: bucketName, key: objKey, - }) + }); err != nil { + return err + } } else { - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: parentBucketName(cnr, bucketName), key: objKey, - }) + }); err != nil { + return err + } } - delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index + if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from storage id index name: smallBucketName(cnr, bucketName), key: objKey, - }) - delUniqueIndexItem(tx, namedBucketItem{ // remove from root index + }); err != nil { + return err + } + if err := delUniqueIndexItem(tx, namedBucketItem{ // remove from root index name: rootBucketName(cnr, bucketName), key: objKey, - }) + }); err != nil { + return err + } if expEpoch, ok := hasExpirationEpoch(obj); ok { - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: expEpochToObjectBucketName, key: expirationEpochKey(expEpoch, cnr, addr.Object()), - }) - delUniqueIndexItem(tx, namedBucketItem{ + }); err != nil { + return err + } + if err := delUniqueIndexItem(tx, namedBucketItem{ name: objectToExpirationEpochBucketName(cnr, make([]byte, bucketKeySize)), key: objKey, - }) + }); err != nil { + return err + } } return nil @@ -535,10 +545,12 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. // also drop EC parent root info if current EC chunk is the last one if !hasAnyChunks { - delUniqueIndexItem(tx, namedBucketItem{ + if err := delUniqueIndexItem(tx, namedBucketItem{ name: rootBucketName(cnr, make([]byte, bucketKeySize)), key: objectKey(ech.Parent(), make([]byte, objectKeySize)), - }) + }); err != nil { + return err + } } if ech.ParentSplitParentID() == nil { @@ -572,11 +584,10 @@ func deleteECRelatedInfo(tx *bbolt.Tx, garbageBKT *bbolt.Bucket, obj *objectSDK. } // drop split info - delUniqueIndexItem(tx, namedBucketItem{ + return delUniqueIndexItem(tx, namedBucketItem{ name: rootBucketName(cnr, make([]byte, bucketKeySize)), key: objectKey(*ech.ParentSplitParentID(), make([]byte, objectKeySize)), }) - return nil } func hasAnyECChunks(tx *bbolt.Tx, ech *objectSDK.ECHeader, cnr cid.ID) bool { From 50dccff7c1e16eb242bfe192ddc3b93d91c03427 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Mon, 10 Mar 2025 11:43:40 +0300 Subject: [PATCH 501/591] [#1633] morph/netmap: Merge node info in netmap with candidates list Applicable for both cases: when node uses local cache for netmap and when it disabled. Change-Id: I3050f537e20312a4b39e944aca763b77bd1e74c4 Signed-off-by: Anton Nikiforov --- cmd/frostfs-node/cache.go | 292 +++++++++++++++++++----- cmd/frostfs-node/cache_test.go | 74 ++++++ cmd/frostfs-node/config/morph/config.go | 17 ++ cmd/frostfs-node/morph.go | 5 +- cmd/frostfs-node/netmap_source.go | 55 +++++ config/example/node.yaml | 3 + docs/storage-node-configuration.md | 18 +- internal/logs/logs.go | 1 + 8 files changed, 397 insertions(+), 68 deletions(-) create mode 100644 cmd/frostfs-node/netmap_source.go diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go index 0fe56d2b0..38cee5837 100644 --- a/cmd/frostfs-node/cache.go +++ b/cmd/frostfs-node/cache.go @@ -1,20 +1,27 @@ package main import ( + "bytes" + "cmp" "context" + "slices" "sync" + "sync/atomic" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - lru "github.com/hashicorp/golang-lru/v2" "github.com/hashicorp/golang-lru/v2/expirable" + "github.com/hashicorp/golang-lru/v2/simplelru" + "go.uber.org/zap" ) type netValueReader[K any, V any] func(ctx context.Context, cid K) (V, error) @@ -110,55 +117,6 @@ func (c *ttlNetCache[K, V]) remove(key K) { hit = c.cache.Remove(key) } -// entity that provides LRU cache interface. -type lruNetCache struct { - cache *lru.Cache[uint64, *netmapSDK.NetMap] - - netRdr netValueReader[uint64, *netmapSDK.NetMap] - - metrics cacheMetrics -} - -// newNetworkLRUCache returns wrapper over netValueReader with LRU cache. -func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap], metrics cacheMetrics) *lruNetCache { - cache, err := lru.New[uint64, *netmapSDK.NetMap](sz) - fatalOnErr(err) - - return &lruNetCache{ - cache: cache, - netRdr: netRdr, - metrics: metrics, - } -} - -// reads value by the key. -// -// updates the value from the network on cache miss. -// -// returned value should not be modified. -func (c *lruNetCache) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { - hit := false - startedAt := time.Now() - defer func() { - c.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) - }() - - val, ok := c.cache.Get(key) - if ok { - hit = true - return val, nil - } - - val, err := c.netRdr(ctx, key) - if err != nil { - return nil, err - } - - c.cache.Add(key, val) - - return val, nil -} - // wrapper over TTL cache of values read from the network // that implements container storage. type ttlContainerStorage struct { @@ -200,20 +158,236 @@ func (s ttlContainerStorage) DeletionInfo(ctx context.Context, cnr cid.ID) (*con type lruNetmapSource struct { netState netmap.State - cache *lruNetCache + client rawSource + cache *simplelru.LRU[uint64, *atomic.Pointer[netmapSDK.NetMap]] + mtx sync.RWMutex + metrics cacheMetrics + log *logger.Logger + candidates atomic.Pointer[[]netmapSDK.NodeInfo] } -func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source { +type rawSource interface { + GetCandidates(ctx context.Context) ([]netmapSDK.NodeInfo, error) + GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) +} + +func newCachedNetmapStorage(ctx context.Context, log *logger.Logger, + netState netmap.State, client rawSource, wg *sync.WaitGroup, d time.Duration, +) netmap.Source { const netmapCacheSize = 10 - lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { - return v.GetNetMapByEpoch(ctx, key) - }, metrics.NewCacheMetrics("netmap")) + cache, err := simplelru.NewLRU[uint64, *atomic.Pointer[netmapSDK.NetMap]](netmapCacheSize, nil) + fatalOnErr(err) - return &lruNetmapSource{ - netState: s, - cache: lruNetmapCache, + src := &lruNetmapSource{ + netState: netState, + client: client, + cache: cache, + log: log, + metrics: metrics.NewCacheMetrics("netmap"), } + + wg.Add(1) + go func() { + defer wg.Done() + src.updateCandidates(ctx, d) + }() + + return src +} + +// updateCandidates routine to merge netmap in cache with candidates list. +func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) { + timer := time.NewTimer(d) + defer timer.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-timer.C: + newCandidates, err := s.client.GetCandidates(ctx) + if err != nil { + s.log.Debug(ctx, logs.FailedToUpdateNetmapCandidates, zap.Error(err)) + timer.Reset(d) + break + } + if len(newCandidates) == 0 { + s.candidates.Store(&newCandidates) + timer.Reset(d) + break + } + slices.SortFunc(newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { + return cmp.Compare(n1.Hash(), n2.Hash()) + }) + + // Check once state changed + v := s.candidates.Load() + if v == nil { + s.candidates.Store(&newCandidates) + s.mergeCacheWithCandidates(newCandidates) + timer.Reset(d) + break + } + ret := slices.CompareFunc(*v, newCandidates, func(n1 netmapSDK.NodeInfo, n2 netmapSDK.NodeInfo) int { + if !bytes.Equal(n1.PublicKey(), n2.PublicKey()) || + uint32(n1.Status()) != uint32(n2.Status()) || + slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 { + return 1 + } + var ne1 []string + n1.IterateNetworkEndpoints(func(s string) bool { + ne1 = append(ne1, s) + return false + }) + var ne2 []string + n2.IterateNetworkEndpoints(func(s string) bool { + ne2 = append(ne2, s) + return false + }) + return slices.Compare(ne1, ne2) + }) + if ret != 0 { + s.candidates.Store(&newCandidates) + s.mergeCacheWithCandidates(newCandidates) + } + timer.Reset(d) + } + } +} + +func (s *lruNetmapSource) mergeCacheWithCandidates(candidates []netmapSDK.NodeInfo) { + s.mtx.Lock() + tmp := s.cache.Values() + s.mtx.Unlock() + for _, pointer := range tmp { + nm := pointer.Load() + updates := getNetMapNodesToUpdate(nm, candidates) + if len(updates) > 0 { + nm = nm.Clone() + mergeNetmapWithCandidates(updates, nm) + pointer.Store(nm) + } + } +} + +// reads value by the key. +// +// updates the value from the network on cache miss. +// +// returned value should not be modified. +func (s *lruNetmapSource) get(ctx context.Context, key uint64) (*netmapSDK.NetMap, error) { + hit := false + startedAt := time.Now() + defer func() { + s.metrics.AddMethodDuration("Get", time.Since(startedAt), hit) + }() + + s.mtx.RLock() + val, ok := s.cache.Get(key) + s.mtx.RUnlock() + if ok { + hit = true + return val.Load(), nil + } + + s.mtx.Lock() + defer s.mtx.Unlock() + + val, ok = s.cache.Get(key) + if ok { + hit = true + return val.Load(), nil + } + + nm, err := s.client.GetNetMapByEpoch(ctx, key) + if err != nil { + return nil, err + } + v := s.candidates.Load() + if v != nil { + updates := getNetMapNodesToUpdate(nm, *v) + if len(updates) > 0 { + mergeNetmapWithCandidates(updates, nm) + } + } + + p := atomic.Pointer[netmapSDK.NetMap]{} + p.Store(nm) + s.cache.Add(key, &p) + + return nm, nil +} + +// mergeNetmapWithCandidates updates nodes state in the provided netmap with state in the list of candidates. +func mergeNetmapWithCandidates(updates []nodeToUpdate, nm *netmapSDK.NetMap) { + for _, v := range updates { + if v.status != netmapSDK.UnspecifiedState { + nm.Nodes()[v.netmapIndex].SetStatus(v.status) + } + if v.externalAddresses != nil { + nm.Nodes()[v.netmapIndex].SetExternalAddresses(v.externalAddresses...) + } + if v.endpoints != nil { + nm.Nodes()[v.netmapIndex].SetNetworkEndpoints(v.endpoints...) + } + } +} + +type nodeToUpdate struct { + netmapIndex int + status netmapSDK.NodeState + externalAddresses []string + endpoints []string +} + +// getNetMapNodesToUpdate checks for the changes between provided netmap and the list of candidates. +func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInfo) []nodeToUpdate { + var res []nodeToUpdate + for i := range nm.Nodes() { + for _, cnd := range candidates { + if bytes.Equal(nm.Nodes()[i].PublicKey(), cnd.PublicKey()) { + var tmp nodeToUpdate + var update bool + + if cnd.Status() != nm.Nodes()[i].Status() && + (cnd.Status() == netmapSDK.Online || cnd.Status() == netmapSDK.Maintenance) { + update = true + tmp.status = cnd.Status() + } + + externalAddresses := cnd.ExternalAddresses() + if externalAddresses != nil && + slices.Compare(externalAddresses, nm.Nodes()[i].ExternalAddresses()) != 0 { + update = true + tmp.externalAddresses = externalAddresses + } + + nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints()) + nm.Nodes()[i].IterateNetworkEndpoints(func(s string) bool { + nodeEndpoints = append(nodeEndpoints, s) + return false + }) + candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints()) + cnd.IterateNetworkEndpoints(func(s string) bool { + candidateEndpoints = append(candidateEndpoints, s) + return false + }) + if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 { + update = true + tmp.endpoints = candidateEndpoints + } + + if update { + tmp.netmapIndex = i + res = append(res, tmp) + } + + break + } + } + } + return res } func (s *lruNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { @@ -225,7 +399,7 @@ func (s *lruNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (* } func (s *lruNetmapSource) getNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { - val, err := s.cache.get(ctx, epoch) + val, err := s.get(ctx, epoch) if err != nil { return nil, err } diff --git a/cmd/frostfs-node/cache_test.go b/cmd/frostfs-node/cache_test.go index b1601aa67..24286826f 100644 --- a/cmd/frostfs-node/cache_test.go +++ b/cmd/frostfs-node/cache_test.go @@ -3,9 +3,11 @@ package main import ( "context" "errors" + "sync" "testing" "time" + netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/stretchr/testify/require" ) @@ -59,3 +61,75 @@ func testNetValueReader(_ context.Context, key string) (time.Time, error) { type noopCacheMetricts struct{} func (m *noopCacheMetricts) AddMethodDuration(method string, d time.Duration, hit bool) {} + +type rawSrc struct{} + +func (r *rawSrc) GetCandidates(_ context.Context) ([]netmapSDK.NodeInfo, error) { + node0 := netmapSDK.NodeInfo{} + node0.SetPublicKey([]byte{byte(1)}) + node0.SetStatus(netmapSDK.Online) + node0.SetExternalAddresses("1", "0") + node0.SetNetworkEndpoints("1", "0") + + node1 := netmapSDK.NodeInfo{} + node1.SetPublicKey([]byte{byte(1)}) + node1.SetStatus(netmapSDK.Online) + node1.SetExternalAddresses("1", "0") + node1.SetNetworkEndpoints("1", "0") + + return []netmapSDK.NodeInfo{node0, node1}, nil +} + +func (r *rawSrc) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + nm := netmapSDK.NetMap{} + nm.SetEpoch(1) + + node0 := netmapSDK.NodeInfo{} + node0.SetPublicKey([]byte{byte(1)}) + node0.SetStatus(netmapSDK.Maintenance) + node0.SetExternalAddresses("0") + node0.SetNetworkEndpoints("0") + + node1 := netmapSDK.NodeInfo{} + node1.SetPublicKey([]byte{byte(1)}) + node1.SetStatus(netmapSDK.Maintenance) + node1.SetExternalAddresses("0") + node1.SetNetworkEndpoints("0") + + nm.SetNodes([]netmapSDK.NodeInfo{node0, node1}) + + return &nm, nil +} + +type st struct{} + +func (s *st) CurrentEpoch() uint64 { + return 1 +} + +func TestNetmapStorage(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + wg := sync.WaitGroup{} + cache := newCachedNetmapStorage(ctx, nil, &st{}, &rawSrc{}, &wg, time.Millisecond*50) + + nm, err := cache.GetNetMapByEpoch(ctx, 1) + require.NoError(t, err) + require.True(t, nm.Nodes()[0].Status() == netmapSDK.Maintenance) + require.True(t, len(nm.Nodes()[0].ExternalAddresses()) == 1) + require.True(t, nm.Nodes()[0].NumberOfNetworkEndpoints() == 1) + + require.Eventually(t, func() bool { + nm, err := cache.GetNetMapByEpoch(ctx, 1) + require.NoError(t, err) + for _, node := range nm.Nodes() { + if !(node.Status() == netmapSDK.Online && len(node.ExternalAddresses()) == 2 && + node.NumberOfNetworkEndpoints() == 2) { + return false + } + } + return true + }, time.Second*5, time.Millisecond*10) + + cancel() + wg.Wait() +} diff --git a/cmd/frostfs-node/config/morph/config.go b/cmd/frostfs-node/config/morph/config.go index d089870ea..a9f774d18 100644 --- a/cmd/frostfs-node/config/morph/config.go +++ b/cmd/frostfs-node/config/morph/config.go @@ -33,6 +33,9 @@ const ( // ContainerCacheSizeDefault represents the default size for the container cache. ContainerCacheSizeDefault = 100 + + // PollCandidatesTimeoutDefault is a default poll timeout for netmap candidates. + PollCandidatesTimeoutDefault = 20 * time.Second ) var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section") @@ -154,3 +157,17 @@ func FrostfsIDCacheSize(c *config.Config) uint32 { } return config.Uint32Safe(c.Sub(subsection), "frostfsid_cache_size") } + +// NetmapCandidatesPollInterval returns the value of "netmap.candidates.poll_interval" config parameter +// from "morph" section. +// +// Returns PollCandidatesTimeoutDefault if the value is not positive duration. +func NetmapCandidatesPollInterval(c *config.Config) time.Duration { + v := config.DurationSafe(c.Sub(subsection). + Sub("netmap").Sub("candidates"), "poll_interval") + if v > 0 { + return v + } + + return PollCandidatesTimeoutDefault +} diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 657e22389..d3c0f7b81 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -60,10 +60,11 @@ func (c *cfg) initMorphComponents(ctx context.Context) { } if c.cfgMorph.cacheTTL < 0 { - netmapSource = wrap + netmapSource = newRawNetmapStorage(wrap) } else { // use RPC node as source of netmap (with caching) - netmapSource = newCachedNetmapStorage(c.cfgNetmap.state, wrap) + netmapSource = newCachedNetmapStorage(ctx, c.log, c.cfgNetmap.state, wrap, &c.wg, + morphconfig.NetmapCandidatesPollInterval(c.appCfg)) } c.netMapSource = netmapSource diff --git a/cmd/frostfs-node/netmap_source.go b/cmd/frostfs-node/netmap_source.go new file mode 100644 index 000000000..e6be9cdf5 --- /dev/null +++ b/cmd/frostfs-node/netmap_source.go @@ -0,0 +1,55 @@ +package main + +import ( + "context" + + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) + +type rawNetmapSource struct { + client *netmapClient.Client +} + +func newRawNetmapStorage(client *netmapClient.Client) netmap.Source { + return &rawNetmapSource{ + client: client, + } +} + +func (s *rawNetmapSource) GetNetMap(ctx context.Context, diff uint64) (*netmapSDK.NetMap, error) { + nm, err := s.client.GetNetMap(ctx, diff) + if err != nil { + return nil, err + } + candidates, err := s.client.GetCandidates(ctx) + if err != nil { + return nil, err + } + updates := getNetMapNodesToUpdate(nm, candidates) + if len(updates) > 0 { + mergeNetmapWithCandidates(updates, nm) + } + return nm, nil +} + +func (s *rawNetmapSource) GetNetMapByEpoch(ctx context.Context, epoch uint64) (*netmapSDK.NetMap, error) { + nm, err := s.client.GetNetMapByEpoch(ctx, epoch) + if err != nil { + return nil, err + } + candidates, err := s.client.GetCandidates(ctx) + if err != nil { + return nil, err + } + updates := getNetMapNodesToUpdate(nm, candidates) + if len(updates) > 0 { + mergeNetmapWithCandidates(updates, nm) + } + return nm, nil +} + +func (s *rawNetmapSource) Epoch(ctx context.Context) (uint64, error) { + return s.client.Epoch(ctx) +} diff --git a/config/example/node.yaml b/config/example/node.yaml index a448ba7ce..0b6c7b12c 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -95,6 +95,9 @@ morph: - address: wss://rpc2.morph.frostfs.info:40341/ws priority: 2 ape_chain_cache_size: 100000 + netmap: + candidates: + poll_interval: 20s apiclient: dial_timeout: 15s # timeout for FrostFS API client connection diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index b5c8aadfe..5fe011ece 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -148,15 +148,19 @@ morph: - address: wss://rpc2.morph.frostfs.info:40341/ws priority: 2 switch_interval: 2m + netmap: + candidates: + poll_interval: 20s ``` -| Parameter | Type | Default value | Description | -| ---------------------- | --------------------------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. | -| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. | -| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. | -| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. | -| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. | +| Parameter | Type | Default value | Description | +|-----------------------------------|-----------------------------------------------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `dial_timeout` | `duration` | `5s` | Timeout for dialing connections to N3 RPCs. | +| `cache_ttl` | `duration` | Morph block time | Sidechain cache TTL value (min interval between similar calls).
Negative value disables caching.
Cached entities: containers, container lists, eACL tables. | +| `rpc_endpoint` | list of [endpoint descriptions](#rpc_endpoint-subsection) | | Array of endpoint descriptions. | +| `switch_interval` | `duration` | `2m` | Time interval between the attempts to connect to the highest priority RPC node if the connection is not established yet. | +| `ape_chain_cache_size` | `int` | `10000` | Size of the morph cache for APE chains. | +| `netmap.candidates.poll_interval` | `duration` | `20s` | Timeout to set up frequency of merge candidates to netmap with netmap in local cache. | ## `rpc_endpoint` subsection | Parameter | Type | Default value | Description | diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 5b42b25ba..3a3ceb150 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -515,4 +515,5 @@ const ( FailedToGetNetmapToAdjustIOTag = "failed to get netmap to adjust IO tag" FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`" WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object" + FailedToUpdateNetmapCandidates = "update netmap candidates failed" ) From 272128e61fd20c53aa0a3b02da8bdc77f7779e26 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 4 Apr 2025 13:32:11 +0300 Subject: [PATCH 502/591] Revert "[#652] adm: Group independent stages in batches" This reverts commit d00c606feed8ad776fe6df65b601b81790e7dfbe. There are internal dependencies inside the last stage: first, we register NNS root, only then register add records. Revert for now, will revert back after more testing. Change-Id: I760632b5628caf04849d4a64c714cf286051f357 Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-adm/internal/modules/morph/helper/n3client.go | 4 +--- .../internal/modules/morph/initialize/initialize.go | 8 +------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go index d6ca012ce..3f3a66cb6 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/n3client.go @@ -40,8 +40,6 @@ type ClientContext struct { CommitteeAct *actor.Actor // committee actor with the Global witness scope ReadOnlyInvoker *invoker.Invoker // R/O contract invoker, does not contain any signer SentTxs []HashVUBPair - - AwaitDisabled bool } func NewRemoteClient(v *viper.Viper) (Client, error) { @@ -122,7 +120,7 @@ func (c *ClientContext) SendTx(tx *transaction.Transaction, cmd *cobra.Command, } func (c *ClientContext) AwaitTx(cmd *cobra.Command) error { - if len(c.SentTxs) == 0 || c.AwaitDisabled { + if len(c.SentTxs) == 0 { return nil } diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go index 4d39dc662..cdaf7d3bc 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize.go @@ -39,7 +39,6 @@ func initializeSideChainCmd(cmd *cobra.Command, _ []string) error { return err } - initCtx.AwaitDisabled = true cmd.Println("Stage 4.1: Transfer GAS to proxy contract.") if err := transferGASToProxy(initCtx); err != nil { return err @@ -56,10 +55,5 @@ func initializeSideChainCmd(cmd *cobra.Command, _ []string) error { } cmd.Println("Stage 7: set addresses in NNS.") - if err := setNNS(initCtx); err != nil { - return err - } - - initCtx.AwaitDisabled = false - return initCtx.AwaitTx() + return setNNS(initCtx) } From 2938498b52369bc3abaac3ab1f8e790bba414e8e Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 4 Apr 2025 15:23:41 +0300 Subject: [PATCH 503/591] [#1689] adm: Fix NNS root availability check After https://git.frostfs.info/TrueCloudLab/frostfs-contract/pulls/117 we allow checking for root domain availability directly. Before this commit, NNSRootRegistered() has always returned true, so the actual root registration happened as a side-effect of the following code, because NNS registers all parent domains, if they are missing. Change-Id: Icf98f130e77d31b4af7b69697989183c1c8f6a56 Signed-off-by: Evgenii Stratonikov --- .../internal/modules/morph/helper/initialize_ctx.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go index 8e5615baa..27052697f 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go @@ -21,6 +21,7 @@ import ( "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" "github.com/nspcc-dev/neo-go/pkg/rpcclient/management" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/unwrap" "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" "github.com/nspcc-dev/neo-go/pkg/smartcontract/context" "github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest" @@ -28,7 +29,6 @@ import ( "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/vm/emit" "github.com/nspcc-dev/neo-go/pkg/vm/opcode" - "github.com/nspcc-dev/neo-go/pkg/vm/vmstate" "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -524,12 +524,8 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U } func (c *InitializeContext) NNSRootRegistered(nnsHash util.Uint160, zone string) (bool, error) { - res, err := c.CommitteeAct.Call(nnsHash, "isAvailable", "name."+zone) - if err != nil { - return false, err - } - - return res.State == vmstate.Halt.String(), nil + avail, err := unwrap.Bool(c.CommitteeAct.Call(nnsHash, "isAvailable", zone)) + return !avail, err } func (c *InitializeContext) IsUpdated(ctrHash util.Uint160, cs *ContractState) bool { From 5350632e01f2b01f8068dbfec2b9829ae8721519 Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Fri, 4 Apr 2025 12:48:22 +0300 Subject: [PATCH 504/591] [#1705] engine/test: Increase evacuation timeout This test was flaky in CI probably because of runner load fluctuations. Let's increase the timeout and see if the flakiness goes away. (close #1705) Change-Id: I76f96e3d6f4adb3d5de0e27b8ee6b47685236277 Signed-off-by: Vitaliy Potyarkin --- pkg/local_object_storage/engine/evacuate_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index bd5222b78..359e49481 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -208,7 +208,7 @@ func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationS st, err = e.GetEvacuationState(context.Background()) require.NoError(t, err) return st.ProcessingStatus() == EvacuateProcessStateCompleted - }, 3*time.Second, 10*time.Millisecond) + }, 6*time.Second, 10*time.Millisecond) return st } From fbc623f34e5d50d3ca16f1b63ba0ac5fbf011a0b Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Wed, 2 Apr 2025 10:47:42 +0300 Subject: [PATCH 505/591] [#1701] go.mod: Bump `policy-engine` version Change-Id: I7aa359bf235034d6459275d366a276d9930fa227 Signed-off-by: Airat Arifullin --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 23be9c822..9ed5e4187 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945 git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 - git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b + git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88 git.frostfs.info/TrueCloudLab/tzhash v1.8.0 git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 github.com/VictoriaMetrics/easyproto v0.1.4 diff --git a/go.sum b/go.sum index e5d476ffe..9727d8786 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,8 @@ git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/96 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b h1:M50kdfrf/h8c3cz0bJ2AEUcbXvAlPFVC1Wp1WkfZ/8E= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88 h1:V0a7ia84ZpSM2YxpJq1SKLQfeYmsqFWqcxwweBHJIzc= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc= git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA= From 979d4bb2aec9ce74a45b83f00defa532e5d764db Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Wed, 2 Apr 2025 11:15:40 +0300 Subject: [PATCH 506/591] [#1701] tree: Form `$Tree:ID` resource property for APE * Make `verifyClient`, `checkAPE` receive `treeID` from request body; * Make `newAPERequest` set `$Tree:ID` property * Add unit-test to check if a rule for `$Tree:ID` works Close #1701 Change-Id: I834fed366e8adfd4b5c07bf50aac09af6239991b Signed-off-by: Airat Arifullin --- pkg/services/tree/ape.go | 12 +++++--- pkg/services/tree/ape_test.go | 43 +++++++++++++++++++++++++++-- pkg/services/tree/service.go | 12 ++++---- pkg/services/tree/signature.go | 4 +-- pkg/services/tree/signature_test.go | 36 ++++++++++++------------ 5 files changed, 76 insertions(+), 31 deletions(-) diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go index c4b03cbe6..58757ff6d 100644 --- a/pkg/services/tree/ape.go +++ b/pkg/services/tree/ape.go @@ -22,7 +22,7 @@ import ( ) func (s *Service) newAPERequest(ctx context.Context, namespace string, - cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, + cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, ) (aperequest.Request, error) { schemaMethod, err := converter.SchemaMethodFromACLOperation(operation) if err != nil { @@ -53,15 +53,19 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string, resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString()) } + resProps := map[string]string{ + nativeschema.ProperyKeyTreeID: treeID, + } + return aperequest.NewRequest( schemaMethod, - aperequest.NewResource(resourceName, make(map[string]string)), + aperequest.NewResource(resourceName, resProps), reqProps, ), nil } func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, - container *core.Container, cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, + container *core.Container, cid cid.ID, treeID string, operation acl.Op, role acl.Role, publicKey *keys.PublicKey, ) error { namespace := "" cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns") @@ -69,7 +73,7 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token, namespace = cntNamespace } - request, err := s.newAPERequest(ctx, namespace, cid, operation, role, publicKey) + request, err := s.newAPERequest(ctx, namespace, cid, treeID, operation, role, publicKey) if err != nil { return fmt.Errorf("failed to create ape request: %w", err) } diff --git a/pkg/services/tree/ape_test.go b/pkg/services/tree/ape_test.go index 0afc7660a..7b209fd47 100644 --- a/pkg/services/tree/ape_test.go +++ b/pkg/services/tree/ape_test.go @@ -107,6 +107,45 @@ func TestCheckAPE(t *testing.T) { cid := cid.ID{} _ = cid.DecodeString(containerID) + t.Run("treeID rule", func(t *testing.T) { + los := inmemory.NewInmemoryLocalStorage() + mcs := inmemory.NewInmemoryMorphRuleChainStorage() + fid := newFrostfsIDProviderMock(t) + s := Service{ + cfg: cfg{ + frostfsidSubjectProvider: fid, + }, + apeChecker: checkercore.New(los, mcs, fid, &stMock{}), + } + + mcs.AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(containerID), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.QuotaLimitReached, + Actions: chain.Actions{Names: []string{nativeschema.MethodGetObject}}, + Resources: chain.Resources{ + Names: []string{fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, containerID)}, + }, + Condition: []chain.Condition{ + { + Op: chain.CondStringEquals, + Kind: chain.KindResource, + Key: nativeschema.ProperyKeyTreeID, + Value: versionTreeID, + }, + }, + }, + }, + MatchType: chain.MatchTypeFirstMatch, + }) + + err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectGet, acl.RoleOwner, senderPrivateKey.PublicKey()) + + var chErr *checkercore.ChainRouterError + require.ErrorAs(t, err, &chErr) + require.Equal(t, chain.QuotaLimitReached, chErr.Status()) + }) + t.Run("put non-tombstone rule won't affect tree remove", func(t *testing.T) { los := inmemory.NewInmemoryLocalStorage() mcs := inmemory.NewInmemoryMorphRuleChainStorage() @@ -152,7 +191,7 @@ func TestCheckAPE(t *testing.T) { MatchType: chain.MatchTypeFirstMatch, }) - err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey()) + err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectDelete, acl.RoleOwner, senderPrivateKey.PublicKey()) require.NoError(t, err) }) @@ -201,7 +240,7 @@ func TestCheckAPE(t *testing.T) { MatchType: chain.MatchTypeFirstMatch, }) - err := s.checkAPE(context.Background(), nil, rootCnr, cid, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey()) + err := s.checkAPE(context.Background(), nil, rootCnr, cid, versionTreeID, acl.OpObjectPut, acl.RoleOwner, senderPrivateKey.PublicKey()) require.NoError(t, err) }) } diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index b9bb96bab..85bb03a28 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -117,7 +117,7 @@ func (s *Service) Add(ctx context.Context, req *AddRequest) (*AddResponse, error return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } @@ -161,7 +161,7 @@ func (s *Service) AddByPath(ctx context.Context, req *AddByPathRequest) (*AddByP return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } @@ -217,7 +217,7 @@ func (s *Service) Remove(ctx context.Context, req *RemoveRequest) (*RemoveRespon return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectDelete) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectDelete) if err != nil { return nil, err } @@ -262,7 +262,7 @@ func (s *Service) Move(ctx context.Context, req *MoveRequest) (*MoveResponse, er return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectPut) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectPut) if err != nil { return nil, err } @@ -306,7 +306,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest) return nil, err } - err := s.verifyClient(ctx, req, cid, b.GetBearerToken(), acl.OpObjectGet) + err := s.verifyClient(ctx, req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) if err != nil { return nil, err } @@ -377,7 +377,7 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS return err } - err := s.verifyClient(srv.Context(), req, cid, b.GetBearerToken(), acl.OpObjectGet) + err := s.verifyClient(srv.Context(), req, cid, req.GetBody().GetTreeId(), b.GetBearerToken(), acl.OpObjectGet) if err != nil { return err } diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index d15438e81..89056056d 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -38,7 +38,7 @@ var ( // Operation must be one of: // - 1. ObjectPut; // - 2. ObjectGet. -func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, rawBearer []byte, op acl.Op) error { +func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, treeID string, rawBearer []byte, op acl.Op) error { err := verifyMessage(req) if err != nil { return err @@ -64,7 +64,7 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID, return fmt.Errorf("can't get request role: %w", err) } - if err = s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey); err != nil { + if err = s.checkAPE(ctx, bt, cnr, cid, treeID, op, role, pubKey); err != nil { return apeErr(err) } return nil diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index 97f8a727a..947de8e58 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -31,6 +31,8 @@ import ( "github.com/stretchr/testify/require" ) +const versionTreeID = "version" + type dummyNetmapSource struct { netmap.Source } @@ -168,26 +170,26 @@ func TestMessageSign(t *testing.T) { cnr.Value.SetBasicACL(acl.PublicRW) t.Run("missing signature, no panic", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) }) require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, nil, op)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) t.Run("invalid CID", func(t *testing.T) { - require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) }) cnr.Value.SetBasicACL(acl.Private) t.Run("extension disabled", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid2, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid2, versionTreeID, nil, op)) }) t.Run("invalid key", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, nil, op)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, nil, op)) }) t.Run("bearer", func(t *testing.T) { @@ -200,7 +202,7 @@ func TestMessageSign(t *testing.T) { t.Run("invalid bearer", func(t *testing.T) { req.Body.BearerToken = []byte{0xFF} require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer CID", func(t *testing.T) { @@ -209,7 +211,7 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer owner", func(t *testing.T) { bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -217,7 +219,7 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("invalid bearer signature", func(t *testing.T) { bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -229,7 +231,7 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bv2.StableMarshal(nil) require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) t.Run("impersonate", func(t *testing.T) { @@ -241,8 +243,8 @@ func TestMessageSign(t *testing.T) { req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) @@ -252,18 +254,18 @@ func TestMessageSign(t *testing.T) { t.Run("put and get", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) t.Run("only get", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[2].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) t.Run("none", func(t *testing.T) { require.NoError(t, SignMessage(req, &privs[3].PrivateKey)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.Error(t, s.verifyClient(context.Background(), req, cid1, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) }) } From 52367dc9b259b6044f0fc7fda09e4339b7170f3d Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 4 Apr 2025 20:07:23 +0300 Subject: [PATCH 507/591] [#1689] go.mod: Update sdk-go Change-Id: I72052fe11e66e4c77f4aef6cb2c0f038aa7b0d1f Signed-off-by: Evgenii Stratonikov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9ed5e4187..5ed4a90be 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945 + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88 diff --git a/go.sum b/go.sum index 9727d8786..38aba9bde 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945 h1:zM2l316J55h9p30snl6vHBI/h0xmnuqZjnxIjRDtJZw= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250326101739-4d36a49d3945/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d h1:ZLKDupw362Ciing7kdIZhDYGMyo2QZyJ6sS/8X9QWJ0= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d/go.mod h1:2PWt5GwJTnhjHp+mankcfCeAJBMn7puxPm+RS+lliVk= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= From 3d771aa21c8fb961c53816566db77b3b0274dc40 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Sat, 5 Apr 2025 08:56:22 +0300 Subject: [PATCH 508/591] [#1689] morph: Remove `frostfsError` type It has no custom methods defined, only adds `frostfs error: ` prefix to the error message. The utility of this prefix is debatable, failed invocations already have `invocation failed` prefix. Change-Id: If25ebb3679497f3f10acde43b596c81d52351907 Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/client.go | 18 ++---------------- pkg/morph/client/notary.go | 8 +++----- pkg/morph/client/util.go | 2 +- pkg/morph/client/waiter.go | 2 +- 4 files changed, 7 insertions(+), 23 deletions(-) diff --git a/pkg/morph/client/client.go b/pkg/morph/client/client.go index 1c12130b7..aab058d27 100644 --- a/pkg/morph/client/client.go +++ b/pkg/morph/client/client.go @@ -151,20 +151,6 @@ func (e *notHaltStateError) Error() string { ) } -// implementation of error interface for FrostFS-specific errors. -type frostfsError struct { - err error -} - -func (e frostfsError) Error() string { - return fmt.Sprintf("frostfs error: %v", e.err) -} - -// wraps FrostFS-specific error into frostfsError. Arg must not be nil. -func wrapFrostFSError(err error) error { - return frostfsError{err} -} - // Invoke invokes contract method by sending transaction into blockchain. // Returns valid until block value. // Supported args types: int64, string, util.Uint160, []byte and bool. @@ -228,7 +214,7 @@ func (c *Client) TestInvokeIterator(cb func(stackitem.Item) error, batchSize int if err != nil { return err } else if val.State != HaltState { - return wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException}) + return ¬HaltStateError{state: val.State, exception: val.FaultException} } arr, sid, r, err := unwrap.ArrayAndSessionIterator(val, err) @@ -292,7 +278,7 @@ func (c *Client) TestInvoke(contract util.Uint160, method string, args ...any) ( } if val.State != HaltState { - return nil, wrapFrostFSError(¬HaltStateError{state: val.State, exception: val.FaultException}) + return nil, ¬HaltStateError{state: val.State, exception: val.FaultException} } success = true diff --git a/pkg/morph/client/notary.go b/pkg/morph/client/notary.go index 4e20a3639..448702613 100644 --- a/pkg/morph/client/notary.go +++ b/pkg/morph/client/notary.go @@ -461,7 +461,7 @@ func (c *Client) notaryInvoke(ctx context.Context, committee, invokedByAlpha boo mainH, fbH, untilActual, err := nAct.Notarize(nAct.MakeTunedCall(contract, method, nil, func(r *result.Invoke, t *transaction.Transaction) error { if r.State != vmstate.Halt.String() { - return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException}) + return ¬HaltStateError{state: r.State, exception: r.FaultException} } t.ValidUntilBlock = until @@ -608,8 +608,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB multisigAccount = wallet.NewAccountFromPrivateKey(c.acc.PrivateKey()) err := multisigAccount.ConvertMultisig(m, ir) if err != nil { - // wrap error as FrostFS-specific since the call is not related to any client - return nil, wrapFrostFSError(fmt.Errorf("convert account to inner ring multisig wallet: %w", err)) + return nil, fmt.Errorf("convert account to inner ring multisig wallet: %w", err) } } else { // alphabet multisig redeem script is @@ -617,8 +616,7 @@ func (c *Client) notaryMultisigAccount(ir []*keys.PublicKey, committee, invokedB // inner ring multiaddress witness multisigAccount, err = notary.FakeMultisigAccount(m, ir) if err != nil { - // wrap error as FrostFS-specific since the call is not related to any client - return nil, wrapFrostFSError(fmt.Errorf("make inner ring multisig wallet: %w", err)) + return nil, fmt.Errorf("make inner ring multisig wallet: %w", err) } } diff --git a/pkg/morph/client/util.go b/pkg/morph/client/util.go index f68d39beb..f7b6705a8 100644 --- a/pkg/morph/client/util.go +++ b/pkg/morph/client/util.go @@ -98,7 +98,7 @@ func StringFromStackItem(param stackitem.Item) (string, error) { func addFeeCheckerModifier(add int64) func(r *result.Invoke, t *transaction.Transaction) error { return func(r *result.Invoke, t *transaction.Transaction) error { if r.State != HaltState { - return wrapFrostFSError(¬HaltStateError{state: r.State, exception: r.FaultException}) + return ¬HaltStateError{state: r.State, exception: r.FaultException} } t.SystemFee += add diff --git a/pkg/morph/client/waiter.go b/pkg/morph/client/waiter.go index 5b9d2cbe0..87fcf84b8 100644 --- a/pkg/morph/client/waiter.go +++ b/pkg/morph/client/waiter.go @@ -47,5 +47,5 @@ func (c *Client) WaitTxHalt(ctx context.Context, vub uint32, h util.Uint256) err if res.VMState.HasFlag(vmstate.Halt) { return nil } - return wrapFrostFSError(¬HaltStateError{state: res.VMState.String(), exception: res.FaultException}) + return ¬HaltStateError{state: res.VMState.String(), exception: res.FaultException} } From d933609084a23595e36bf48e5c226664d315bc23 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Sat, 5 Apr 2025 09:07:03 +0300 Subject: [PATCH 509/591] [#1689] client/netmap: Refactor Client.config() There are problems with that code: - explicit casts, - `ErrConfigNotFound` which is not a part of a public API, - hand-rolled assertions, even though neo-go already has everything we need. So, remove the error, use `stackitem/Item.Try*()` methods for conversions. Note, that readUint64Config() returns an error if the parameter is missing. This is likely an error, but this behaviour is preserved in this PR: `TryInteger()` returns error when applied to `Null`. By contract, `TryBool()` returns false for `Null`, so this PR introduces no functional changes. Refs https://github.com/nspcc-dev/neo-go/blob/82c7a50b8a308698e1440a716d34232094f9f55f/pkg/vm/stackitem/item.go#L418 Change-Id: I445d28a7c6b5abb9a2bb97b57c0cc42d617e16f7 Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/netmap/config.go | 46 +++++++------------------------ 1 file changed, 10 insertions(+), 36 deletions(-) diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go index fcdb70e3f..78063b857 100644 --- a/pkg/morph/client/netmap/config.go +++ b/pkg/morph/client/netmap/config.go @@ -2,7 +2,6 @@ package netmap import ( "context" - "errors" "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" @@ -106,29 +105,27 @@ func (c *Client) MaintenanceModeAllowed(ctx context.Context) (bool, error) { } func (c *Client) readUInt64Config(ctx context.Context, key string) (uint64, error) { - v, err := c.config(ctx, []byte(key), IntegerAssert) + v, err := c.config(ctx, []byte(key)) if err != nil { return 0, fmt.Errorf("read netconfig value '%s': %w", key, err) } - // IntegerAssert is guaranteed to return int64 if the error is nil. - return uint64(v.(int64)), nil + bi, err := v.TryInteger() + if err != nil { + return 0, err + } + return bi.Uint64(), nil } // reads boolean value by the given key from the FrostFS network configuration // stored in the Sidechain. Returns false if key is not presented. func (c *Client) readBoolConfig(ctx context.Context, key string) (bool, error) { - v, err := c.config(ctx, []byte(key), BoolAssert) + v, err := c.config(ctx, []byte(key)) if err != nil { - if errors.Is(err, ErrConfigNotFound) { - return false, nil - } - return false, fmt.Errorf("read netconfig value '%s': %w", key, err) } - // BoolAssert is guaranteed to return bool if the error is nil. - return v.(bool), nil + return v.TryBool() } // SetConfigPrm groups parameters of SetConfig operation. @@ -277,15 +274,11 @@ func bytesToBool(val []byte) bool { return false } -// ErrConfigNotFound is returned when the requested key was not found -// in the network config (returned value is `Null`). -var ErrConfigNotFound = errors.New("config value not found") - // config performs the test invoke of get config value // method of FrostFS Netmap contract. // // Returns ErrConfigNotFound if config key is not found in the contract. -func (c *Client) config(ctx context.Context, key []byte, assert func(stackitem.Item) (any, error)) (any, error) { +func (c *Client) config(ctx context.Context, key []byte) (stackitem.Item, error) { prm := client.TestInvokePrm{} prm.SetMethod(configMethod) prm.SetArgs(key) @@ -301,26 +294,7 @@ func (c *Client) config(ctx context.Context, key []byte, assert func(stackitem.I configMethod, ln) } - if _, ok := items[0].(stackitem.Null); ok { - return nil, ErrConfigNotFound - } - - return assert(items[0]) -} - -// IntegerAssert converts stack item to int64. -func IntegerAssert(item stackitem.Item) (any, error) { - return client.IntFromStackItem(item) -} - -// StringAssert converts stack item to string. -func StringAssert(item stackitem.Item) (any, error) { - return client.StringFromStackItem(item) -} - -// BoolAssert converts stack item to bool. -func BoolAssert(item stackitem.Item) (any, error) { - return client.BoolFromStackItem(item) + return items[0], nil } // iterateRecords iterates over all config records and passes them to f. From c4f941a5f5217024ed8ba3cebda22d3518bd18f7 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Sat, 5 Apr 2025 09:16:25 +0300 Subject: [PATCH 510/591] [#1689] client/netmap: Remove useless error-handling No functional changes. Change-Id: I3a53c992c3ce5e8c6db252abb09aa40626142a97 Signed-off-by: Evgenii Stratonikov --- pkg/morph/client/netmap/config.go | 42 +++++-------------------------- 1 file changed, 6 insertions(+), 36 deletions(-) diff --git a/pkg/morph/client/netmap/config.go b/pkg/morph/client/netmap/config.go index 78063b857..3f6aed506 100644 --- a/pkg/morph/client/netmap/config.go +++ b/pkg/morph/client/netmap/config.go @@ -25,44 +25,24 @@ const ( // MaxObjectSize receives max object size configuration // value through the Netmap contract call. func (c *Client) MaxObjectSize(ctx context.Context) (uint64, error) { - objectSize, err := c.readUInt64Config(ctx, MaxObjectSizeConfig) - if err != nil { - return 0, err - } - - return objectSize, nil + return c.readUInt64Config(ctx, MaxObjectSizeConfig) } // EpochDuration returns number of sidechain blocks per one FrostFS epoch. func (c *Client) EpochDuration(ctx context.Context) (uint64, error) { - epochDuration, err := c.readUInt64Config(ctx, EpochDurationConfig) - if err != nil { - return 0, err - } - - return epochDuration, nil + return c.readUInt64Config(ctx, EpochDurationConfig) } // ContainerFee returns fee paid by container owner to each alphabet node // for container registration. func (c *Client) ContainerFee(ctx context.Context) (uint64, error) { - fee, err := c.readUInt64Config(ctx, ContainerFeeConfig) - if err != nil { - return 0, err - } - - return fee, nil + return c.readUInt64Config(ctx, ContainerFeeConfig) } // ContainerAliasFee returns additional fee paid by container owner to each // alphabet node for container nice name registration. func (c *Client) ContainerAliasFee(ctx context.Context) (uint64, error) { - fee, err := c.readUInt64Config(ctx, ContainerAliasFeeConfig) - if err != nil { - return 0, err - } - - return fee, nil + return c.readUInt64Config(ctx, ContainerAliasFeeConfig) } // HomomorphicHashDisabled returns global configuration value of homomorphic hashing @@ -76,23 +56,13 @@ func (c *Client) HomomorphicHashDisabled(ctx context.Context) (bool, error) { // InnerRingCandidateFee returns global configuration value of fee paid by // node to be in inner ring candidates list. func (c *Client) InnerRingCandidateFee(ctx context.Context) (uint64, error) { - fee, err := c.readUInt64Config(ctx, IrCandidateFeeConfig) - if err != nil { - return 0, err - } - - return fee, nil + return c.readUInt64Config(ctx, IrCandidateFeeConfig) } // WithdrawFee returns global configuration value of fee paid by user to // withdraw assets from FrostFS contract. func (c *Client) WithdrawFee(ctx context.Context) (uint64, error) { - fee, err := c.readUInt64Config(ctx, WithdrawFeeConfig) - if err != nil { - return 0, err - } - - return fee, nil + return c.readUInt64Config(ctx, WithdrawFeeConfig) } // MaintenanceModeAllowed reads admission of "maintenance" state from the From 0c5d74729c62d535959965952fd88fe3fbdeffb2 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 7 Apr 2025 13:43:30 +0300 Subject: [PATCH 511/591] [#1679] node: Fix 'revive' warning Change-Id: I74ff6332b10f17a329c5d108d01d43002e92aafd Signed-off-by: Alexander Chuprov --- pkg/util/ape/parser.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/util/ape/parser.go b/pkg/util/ape/parser.go index a34a17f6f..6f114d45b 100644 --- a/pkg/util/ape/parser.go +++ b/pkg/util/ape/parser.go @@ -174,11 +174,11 @@ func parseStatus(lexeme string) (apechain.Status, error) { case "deny": if !found { return apechain.AccessDenied, nil - } else if strings.EqualFold(expression, "QuotaLimitReached") { - return apechain.QuotaLimitReached, nil - } else { - return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression) } + if strings.EqualFold(expression, "QuotaLimitReached") { + return apechain.QuotaLimitReached, nil + } + return 0, fmt.Errorf("%w: %s", errUnknownStatusDetail, expression) case "allow": if found { return 0, errUnknownStatusDetail From b4b053cecd4c85361eee345b4b628e98412d270e Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 7 Apr 2025 13:46:49 +0300 Subject: [PATCH 512/591] [#1679] node: Fix 'gocognit' warning Change-Id: I6e2a278af51869c05c306c2910ba85130e39532e Signed-off-by: Alexander Chuprov --- pkg/local_object_storage/engine/lock.go | 29 ++++++++++++++++--------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go index 5d43e59df..18fff9cad 100644 --- a/pkg/local_object_storage/engine/lock.go +++ b/pkg/local_object_storage/engine/lock.go @@ -84,17 +84,11 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo var siErr *objectSDK.SplitInfoError var eiErr *objectSDK.ECInfoError if errors.As(err, &eiErr) { - eclocked := []oid.ID{locked} - for _, chunk := range eiErr.ECInfo().Chunks { - var objID oid.ID - err = objID.ReadFromV2(chunk.ID) - if err != nil { - e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), - zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) - return false - } - eclocked = append(eclocked, objID) + eclocked, ok := e.checkECLocked(ctx, sh, idCnr, locker, locked, eiErr) + if !ok { + return false } + err = sh.Lock(ctx, idCnr, locker, eclocked) if err != nil { e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), @@ -137,3 +131,18 @@ func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, lo }) return } + +func (e *StorageEngine) checkECLocked(ctx context.Context, sh hashedShard, idCnr cid.ID, locker, locked oid.ID, eiErr *objectSDK.ECInfoError) ([]oid.ID, bool) { + eclocked := []oid.ID{locked} + for _, chunk := range eiErr.ECInfo().Chunks { + var objID oid.ID + err := objID.ReadFromV2(chunk.ID) + if err != nil { + e.reportShardError(ctx, sh, "could not lock object in shard", err, zap.Stringer("container_id", idCnr), + zap.Stringer("locker_id", locker), zap.Stringer("locked_id", locked)) + return nil, false + } + eclocked = append(eclocked, objID) + } + return eclocked, true +} From 9b5c1da40f2eac063ce38ca05d9211deba541234 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 7 Apr 2025 14:21:44 +0300 Subject: [PATCH 513/591] [#1679] linter: Bump 'golangci-lint' to v1.64.8 - Removed deprecated config option 'linters.govet.check-shadowing', replaced with enabling the 'shadow' linter. - Removed usage of deprecated 'tenv' linter, replaced by 'usetesting'. Change-Id: Ib1bd1ec83b0fd55a47e405b290bc2bc967b9389c Signed-off-by: Alexander Chuprov --- .golangci.yml | 7 ++----- Makefile | 4 ++-- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index f21a46248..18de49425 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -27,9 +27,6 @@ linters-settings: - standard - default custom-order: true - govet: - # report about shadowed variables - check-shadowing: false staticcheck: checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed. funlen: @@ -60,8 +57,8 @@ linters-settings: linters: enable: # mandatory linters - - govet - revive + - predeclared # some default golangci-lint linters - errcheck @@ -94,8 +91,8 @@ linters: - testifylint - protogetter - intrange - - tenv - unconvert - unparam + - usetesting disable-all: true fast: false diff --git a/Makefile b/Makefile index 321365f0d..5b55c9eec 100755 --- a/Makefile +++ b/Makefile @@ -9,8 +9,8 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" GO_VERSION ?= 1.23 -LINT_VERSION ?= 1.62.2 -TRUECLOUDLAB_LINT_VERSION ?= 0.0.8 +LINT_VERSION ?= 1.64.8 +TRUECLOUDLAB_LINT_VERSION ?= 0.0.10 PROTOC_VERSION ?= 25.0 PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) PROTOC_OS_VERSION=osx-x86_64 From 923f0acf8f03a485c665f9b610bd5f96b5c0200d Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Mon, 31 Mar 2025 05:12:22 +0300 Subject: [PATCH 514/591] [#1564] cli: Fix output of `object nodes` command The object nodes command misleadingly reported the number of "found data objects" as if it matched the actual expected amount, which could be incorrect for EC objects. Updated the output wording to explicitly distinguish between currently available data objects and total objects per the EC schema. Change-Id: Ib36b89db58ae66d8978baf5a16b59435db9a068d Signed-off-by: Ekaterina Lebedeva --- cmd/frostfs-cli/modules/object/nodes.go | 59 ++++++++++++++++++------- 1 file changed, 42 insertions(+), 17 deletions(-) diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index bc34b370d..734b557a4 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -48,6 +48,12 @@ type ecHeader struct { parent oid.ID } +type objectCounter struct { + sync.Mutex + total uint32 + isECcounted bool +} + type objectPlacement struct { requiredNodes []netmapSDK.NodeInfo confirmedNodes []netmapSDK.NodeInfo @@ -56,6 +62,7 @@ type objectPlacement struct { type objectNodesResult struct { errors []error placements map[oid.ID]objectPlacement + total uint32 } type ObjNodesDataObject struct { @@ -106,18 +113,18 @@ func objectNodes(cmd *cobra.Command, _ []string) { pk := key.GetOrGenerate(cmd) cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC) - objects := getPhyObjects(cmd, cnrID, objID, cli, pk) + objects, count := getPhyObjects(cmd, cnrID, objID, cli, pk) placementPolicy, netmap := getPlacementPolicyAndNetmap(cmd, cnrID, cli) result := getRequiredPlacement(cmd, objects, placementPolicy, netmap) - getActualPlacement(cmd, netmap, pk, objects, result) + getActualPlacement(cmd, netmap, pk, objects, count, result) printPlacement(cmd, objID, objects, result) } -func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) []phyObject { +func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) ([]phyObject, int) { var addrObj oid.Address addrObj.SetContainer(cnrID) addrObj.SetObject(objID) @@ -145,7 +152,7 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C parent: res.Header().ECHeader().Parent(), } } - return []phyObject{obj} + return []phyObject{obj}, 1 } var errSplitInfo *objectSDK.SplitInfoError @@ -155,29 +162,34 @@ func getPhyObjects(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C var ecInfoError *objectSDK.ECInfoError if errors.As(err, &ecInfoError) { - return getECObjectChunks(cmd, cnrID, objID, ecInfoError) + return getECObjectChunks(cmd, cnrID, objID, ecInfoError), 1 } commonCmd.ExitOnErr(cmd, "failed to get object info: %w", err) - return nil + return nil, 0 } -func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []phyObject { - members := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) - return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead) +func getComplexObjectParts(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]phyObject, int) { + members, total := getCompexObjectMembers(cmd, cnrID, objID, cli, prmHead, errSplitInfo) + return flattenComplexMembersIfECContainer(cmd, cnrID, members, prmHead), total } -func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) []oid.ID { +func getCompexObjectMembers(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, prmHead internalclient.HeadObjectPrm, errSplitInfo *objectSDK.SplitInfoError) ([]oid.ID, int) { + var total int splitInfo := errSplitInfo.SplitInfo() if members, ok := tryGetSplitMembersByLinkingObject(cmd, splitInfo, prmHead, cnrID); ok { - return members + if total = len(members); total > 0 { + total-- // linking object is not data object + } + return members, total } if members, ok := tryGetSplitMembersBySplitID(cmd, splitInfo, cli, cnrID); ok { - return members + return members, len(members) } - return tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) + members := tryRestoreChainInReverse(cmd, splitInfo, prmHead, cli, cnrID, objID) + return members, len(members) } func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, members []oid.ID, prmHead internalclient.HeadObjectPrm) []phyObject { @@ -383,8 +395,11 @@ func getECRequiredPlacementInternal(cmd *cobra.Command, object phyObject, placem } } -func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, result *objectNodesResult) { +func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.PrivateKey, objects []phyObject, count int, result *objectNodesResult) { resultMtx := &sync.Mutex{} + counter := &objectCounter{ + total: uint32(count), + } candidates := getNodesToCheckObjectExistance(cmd, netmap, result) @@ -401,7 +416,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. for _, object := range objects { eg.Go(func() error { - stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk) + stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk, counter) resultMtx.Lock() defer resultMtx.Unlock() if err == nil && stored { @@ -420,6 +435,7 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. } commonCmd.ExitOnErr(cmd, "failed to get actual placement: %w", eg.Wait()) + result.total = counter.total } func getNodesToCheckObjectExistance(cmd *cobra.Command, netmap *netmapSDK.NetMap, result *objectNodesResult) []netmapSDK.NodeInfo { @@ -478,7 +494,7 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N return cli, nil } -func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey) (bool, error) { +func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.Client, pk *ecdsa.PrivateKey, counter *objectCounter) (bool, error) { var addrObj oid.Address addrObj.SetContainer(cnrID) addrObj.SetObject(objID) @@ -493,6 +509,14 @@ func isObjectStoredOnNode(ctx context.Context, cmd *cobra.Command, cnrID cid.ID, res, err := internalclient.HeadObject(ctx, prmHead) if err == nil && res != nil { + if res.Header().ECHeader() != nil { + counter.Lock() + defer counter.Unlock() + if !counter.isECcounted { + counter.total *= res.Header().ECHeader().Total() + } + counter.isECcounted = true + } return true, nil } var notFound *apistatus.ObjectNotFound @@ -512,7 +536,8 @@ func printPlacement(cmd *cobra.Command, objID oid.ID, objects []phyObject, resul } func printObjectNodesAsText(cmd *cobra.Command, objID oid.ID, objects []phyObject, result *objectNodesResult) { - fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects:\n", objID.EncodeToString(), len(objects)) + fmt.Fprintf(cmd.OutOrStdout(), "Object %s stores payload in %d data objects\n", objID.EncodeToString(), result.total) + fmt.Fprintf(cmd.OutOrStdout(), "Found %d:\n", len(objects)) for _, object := range objects { fmt.Fprintf(cmd.OutOrStdout(), "- %s\n", object.objectID) From 6f7b6b65f3b8cc3f372d900ee41242f0624b5b67 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 7 Apr 2025 16:54:58 +0300 Subject: [PATCH 515/591] [#1689] linter: Fix staticcheck warning: 'embedded field can be simplified' Change-Id: I8f454f7d09973cdea096495c3949b88cdd01102e Signed-off-by: Alexander Chuprov --- .../modules/morph/contract/dump_hashes.go | 2 +- cmd/frostfs-lens/internal/tui/input.go | 8 ++-- cmd/frostfs-lens/internal/tui/ui.go | 2 +- cmd/frostfs-node/apemanager.go | 2 +- cmd/frostfs-node/container.go | 6 +-- cmd/frostfs-node/metrics.go | 28 ++++++------- cmd/frostfs-node/object.go | 6 +-- cmd/frostfs-node/pprof.go | 26 ++++++------ cmd/frostfs-node/tree.go | 4 +- pkg/local_object_storage/blobstor/blobstor.go | 2 +- pkg/local_object_storage/engine/container.go | 4 +- pkg/local_object_storage/engine/control.go | 2 +- pkg/local_object_storage/engine/inhume.go | 4 +- pkg/local_object_storage/engine/shards.go | 2 +- pkg/local_object_storage/pilorama/boltdb.go | 6 +-- pkg/local_object_storage/pilorama/forest.go | 4 +- pkg/local_object_storage/pilorama/inmemory.go | 10 ++--- pkg/local_object_storage/shard/control.go | 4 +- pkg/local_object_storage/shard/gc.go | 4 +- pkg/local_object_storage/shard/id.go | 2 +- pkg/local_object_storage/shard/shard.go | 42 +++++++++---------- pkg/services/control/server/evacuate_async.go | 2 +- pkg/services/object/common/writer/common.go | 6 +-- pkg/services/object/get/assemble.go | 2 +- pkg/services/object/get/get.go | 2 +- pkg/services/object/get/v2/streamer.go | 6 +-- pkg/services/object/patch/service.go | 2 +- pkg/services/object/put/single.go | 22 +++++----- pkg/services/policer/ec.go | 14 +++---- .../session/storage/persistent/storage.go | 2 +- pkg/services/tree/cache.go | 6 +-- pkg/services/tree/replicator.go | 2 +- pkg/services/tree/service.go | 2 +- pkg/services/tree/sync.go | 4 +- 34 files changed, 121 insertions(+), 121 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go index fb7e4ff62..7630a226e 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go @@ -242,7 +242,7 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu script := sub.Bytes() emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0}) - bw.BinWriter.WriteBytes(script) + bw.WriteBytes(script) emit.Instruction(bw.BinWriter, opcode.ENDTRY, []byte{2 + 1}) emit.Opcodes(bw.BinWriter, opcode.PUSH0) } diff --git a/cmd/frostfs-lens/internal/tui/input.go b/cmd/frostfs-lens/internal/tui/input.go index 90729c119..471514e5d 100644 --- a/cmd/frostfs-lens/internal/tui/input.go +++ b/cmd/frostfs-lens/internal/tui/input.go @@ -53,17 +53,17 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo f.historyPointer++ // Stop iterating over history. if f.historyPointer == len(f.history) { - f.InputField.SetText(f.currentContent) + f.SetText(f.currentContent) return } - f.InputField.SetText(f.history[f.historyPointer]) + f.SetText(f.history[f.historyPointer]) case tcell.KeyUp: if len(f.history) == 0 { return } // Start iterating over history. if f.historyPointer == len(f.history) { - f.currentContent = f.InputField.GetText() + f.currentContent = f.GetText() } // End of history. if f.historyPointer == 0 { @@ -71,7 +71,7 @@ func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFo } // Iterate to least recent prompts. f.historyPointer-- - f.InputField.SetText(f.history[f.historyPointer]) + f.SetText(f.history[f.historyPointer]) default: f.InputField.InputHandler()(event, func(tview.Primitive) {}) } diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go index bcc082821..bd7540b01 100644 --- a/cmd/frostfs-lens/internal/tui/ui.go +++ b/cmd/frostfs-lens/internal/tui/ui.go @@ -482,7 +482,7 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) { ui.searchBar.InputHandler()(event, func(tview.Primitive) {}) } - ui.Box.MouseHandler() + ui.MouseHandler() } func (ui *UI) WithPrompt(prompt string) error { diff --git a/cmd/frostfs-node/apemanager.go b/cmd/frostfs-node/apemanager.go index e761a1b14..513314712 100644 --- a/cmd/frostfs-node/apemanager.go +++ b/cmd/frostfs-node/apemanager.go @@ -14,7 +14,7 @@ import ( func initAPEManagerService(c *cfg) { contractStorage := ape_contract.NewProxyVerificationContractStorage( morph.NewSwitchRPCGuardedActor(c.cfgMorph.client), - c.shared.key, + c.key, c.cfgMorph.proxyScriptHash, c.cfgObject.cfgAccessPolicyEngine.policyContractHash) diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go index 012012297..bdb280d87 100644 --- a/cmd/frostfs-node/container.go +++ b/cmd/frostfs-node/container.go @@ -32,7 +32,7 @@ func initContainerService(_ context.Context, c *cfg) { wrap, err := cntClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0) fatalOnErr(err) - c.shared.cnrClient = wrap + c.cnrClient = wrap cnrSrc := cntClient.AsContainerSource(wrap) @@ -47,7 +47,7 @@ func initContainerService(_ context.Context, c *cfg) { frostfsIDSubjectProvider = newMorphFrostfsIDCache(frostfsIDSubjectProvider, int(cacheSize), c.cfgMorph.cacheTTL, metrics.NewCacheMetrics("frostfs_id")) } - c.shared.frostfsidClient = frostfsIDSubjectProvider + c.frostfsidClient = frostfsIDSubjectProvider c.cfgContainer.containerBatchSize = containerconfig.ContainerBatchSize(c.appCfg) defaultChainRouter := engine.NewDefaultChainRouterWithLocalOverrides( @@ -57,7 +57,7 @@ func initContainerService(_ context.Context, c *cfg) { service := containerService.NewSignService( &c.key.PrivateKey, containerService.NewAPEServer(defaultChainRouter, cnrRdr, - newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.shared.frostfsidClient, + newCachedIRFetcher(createInnerRingFetcher(c)), c.netMapSource, c.frostfsidClient, containerService.NewSplitterService( c.cfgContainer.containerBatchSize, c.respSvc, containerService.NewExecutionService(containerMorph.NewExecutor(cnrRdr, cnrWrt), c.respSvc)), diff --git a/cmd/frostfs-node/metrics.go b/cmd/frostfs-node/metrics.go index 19b4af51f..d9ca01e70 100644 --- a/cmd/frostfs-node/metrics.go +++ b/cmd/frostfs-node/metrics.go @@ -8,38 +8,38 @@ import ( func metricsComponent(c *cfg) (*httpComponent, bool) { var updated bool // check if it has been inited before - if c.dynamicConfiguration.metrics == nil { - c.dynamicConfiguration.metrics = new(httpComponent) - c.dynamicConfiguration.metrics.cfg = c - c.dynamicConfiguration.metrics.name = "metrics" - c.dynamicConfiguration.metrics.handler = metrics.Handler() + if c.metrics == nil { + c.metrics = new(httpComponent) + c.metrics.cfg = c + c.metrics.name = "metrics" + c.metrics.handler = metrics.Handler() updated = true } // (re)init read configuration enabled := metricsconfig.Enabled(c.appCfg) - if enabled != c.dynamicConfiguration.metrics.enabled { - c.dynamicConfiguration.metrics.enabled = enabled + if enabled != c.metrics.enabled { + c.metrics.enabled = enabled updated = true } address := metricsconfig.Address(c.appCfg) - if address != c.dynamicConfiguration.metrics.address { - c.dynamicConfiguration.metrics.address = address + if address != c.metrics.address { + c.metrics.address = address updated = true } dur := metricsconfig.ShutdownTimeout(c.appCfg) - if dur != c.dynamicConfiguration.metrics.shutdownDur { - c.dynamicConfiguration.metrics.shutdownDur = dur + if dur != c.metrics.shutdownDur { + c.metrics.shutdownDur = dur updated = true } - return c.dynamicConfiguration.metrics, updated + return c.metrics, updated } func enableMetricsSvc(c *cfg) { - c.shared.metricsSvc.Enable() + c.metricsSvc.Enable() } func disableMetricsSvc(c *cfg) { - c.shared.metricsSvc.Disable() + c.metricsSvc.Disable() } diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 2674be8c7..527746d26 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -186,9 +186,9 @@ func initObjectService(c *cfg) { respSvc, ) - c.shared.metricsSvc = objectService.NewMetricCollector( + c.metricsSvc = objectService.NewMetricCollector( signSvc, c.metricsCollector.ObjectService(), metricsconfig.Enabled(c.appCfg)) - qosService := objectService.NewQoSObjectService(c.shared.metricsSvc, &c.cfgQoSService) + qosService := objectService.NewQoSObjectService(c.metricsSvc, &c.cfgQoSService) auditSvc := objectService.NewAuditService(qosService, c.log, c.audit) server := objectTransportGRPC.New(auditSvc) @@ -432,7 +432,7 @@ func createAPEService(c *cfg, irFetcher *cachedIRFetcher, splitSvc *objectServic c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(), c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(), objectAPE.NewStorageEngineHeaderProvider(c.cfgObject.cfgLocalStorage.localStorage, c.cfgObject.getSvc), - c.shared.frostfsidClient, + c.frostfsidClient, c.netMapSource, c.cfgNetmap.state, c.cfgObject.cnrSource, diff --git a/cmd/frostfs-node/pprof.go b/cmd/frostfs-node/pprof.go index 5b40c8a88..e4da8119f 100644 --- a/cmd/frostfs-node/pprof.go +++ b/cmd/frostfs-node/pprof.go @@ -18,33 +18,33 @@ func initProfilerService(ctx context.Context, c *cfg) { func pprofComponent(c *cfg) (*httpComponent, bool) { var updated bool // check if it has been inited before - if c.dynamicConfiguration.pprof == nil { - c.dynamicConfiguration.pprof = new(httpComponent) - c.dynamicConfiguration.pprof.cfg = c - c.dynamicConfiguration.pprof.name = "pprof" - c.dynamicConfiguration.pprof.handler = httputil.Handler() - c.dynamicConfiguration.pprof.preReload = tuneProfilers + if c.pprof == nil { + c.pprof = new(httpComponent) + c.pprof.cfg = c + c.pprof.name = "pprof" + c.pprof.handler = httputil.Handler() + c.pprof.preReload = tuneProfilers updated = true } // (re)init read configuration enabled := profilerconfig.Enabled(c.appCfg) - if enabled != c.dynamicConfiguration.pprof.enabled { - c.dynamicConfiguration.pprof.enabled = enabled + if enabled != c.pprof.enabled { + c.pprof.enabled = enabled updated = true } address := profilerconfig.Address(c.appCfg) - if address != c.dynamicConfiguration.pprof.address { - c.dynamicConfiguration.pprof.address = address + if address != c.pprof.address { + c.pprof.address = address updated = true } dur := profilerconfig.ShutdownTimeout(c.appCfg) - if dur != c.dynamicConfiguration.pprof.shutdownDur { - c.dynamicConfiguration.pprof.shutdownDur = dur + if dur != c.pprof.shutdownDur { + c.pprof.shutdownDur = dur updated = true } - return c.dynamicConfiguration.pprof, updated + return c.pprof, updated } func tuneProfilers(c *cfg) { diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index 65414f0ca..67d9c9df0 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -51,9 +51,9 @@ func initTreeService(c *cfg) { c.treeService = tree.New( tree.WithContainerSource(cnrSource{ src: c.cfgObject.cnrSource, - cli: c.shared.cnrClient, + cli: c.cnrClient, }), - tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient), + tree.WithFrostfsidSubjectProvider(c.frostfsidClient), tree.WithNetmapSource(c.netMapSource), tree.WithPrivateKey(&c.key.PrivateKey), tree.WithLogger(c.log), diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go index f850f48b4..d7c333349 100644 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ b/pkg/local_object_storage/blobstor/blobstor.go @@ -153,5 +153,5 @@ func WithMetrics(m Metrics) Option { } func (b *BlobStor) Compressor() *compression.Config { - return &b.cfg.compression + return &b.compression } diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index 3160d7f83..03a299b93 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -74,7 +74,7 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) var csPrm shard.ContainerSizePrm csPrm.SetContainerID(prm.cnr) - csRes, err := sh.Shard.ContainerSize(ctx, csPrm) + csRes, err := sh.ContainerSize(ctx, csPrm) if err != nil { e.reportShardError(ctx, sh, "can't get container size", err, zap.Stringer("container_id", prm.cnr)) @@ -119,7 +119,7 @@ func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes { uniqueIDs := make(map[string]cid.ID) e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { - res, err := sh.Shard.ListContainers(ctx, shard.ListContainersPrm{}) + res, err := sh.ListContainers(ctx, shard.ListContainersPrm{}) if err != nil { e.reportShardError(ctx, sh, "can't get list of containers", err) return false diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index 7caa515d4..96b53581e 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -77,7 +77,7 @@ func (e *StorageEngine) Init(ctx context.Context) error { errCh := make(chan shardInitError, len(e.shards)) var eg errgroup.Group - if e.cfg.lowMem && e.anyShardRequiresRefill() { + if e.lowMem && e.anyShardRequiresRefill() { eg.SetLimit(1) } diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index c8ee33b53..6ec9a4ef0 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -227,7 +227,7 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e var outErr error e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { - locked, err = h.Shard.IsLocked(ctx, addr) + locked, err = h.IsLocked(ctx, addr) if err != nil { e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr)) outErr = err @@ -256,7 +256,7 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I var outErr error e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { - locks, err := h.Shard.GetLocks(ctx, addr) + locks, err := h.GetLocks(ctx, addr) if err != nil { e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr)) outErr = err diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index dfc3b1a35..6e6c08bb5 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -118,7 +118,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh return nil, fmt.Errorf("add %s shard: %w", sh.ID().String(), err) } - e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode()) + e.metrics.SetMode(sh.ID().String(), sh.GetMode()) return sh.ID(), nil } diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index fc7cdaabc..897b37ea0 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -1582,12 +1582,12 @@ func (t *boltForest) moveFromBytes(m *Move, data []byte) error { func (t *boltForest) logFromBytes(lm *Move, data []byte) error { lm.Child = binary.LittleEndian.Uint64(data) lm.Parent = binary.LittleEndian.Uint64(data[8:]) - return lm.Meta.FromBytes(data[16:]) + return lm.FromBytes(data[16:]) } func (t *boltForest) logToBytes(lm *Move) []byte { w := io.NewBufBinWriter() - size := 8 + 8 + lm.Meta.Size() + 1 + size := 8 + 8 + lm.Size() + 1 // if lm.HasOld { // size += 8 + lm.Old.Meta.Size() // } @@ -1595,7 +1595,7 @@ func (t *boltForest) logToBytes(lm *Move) []byte { w.Grow(size) w.WriteU64LE(lm.Child) w.WriteU64LE(lm.Parent) - lm.Meta.EncodeBinary(w.BinWriter) + lm.EncodeBinary(w.BinWriter) // w.WriteBool(lm.HasOld) // if lm.HasOld { // w.WriteU64LE(lm.Old.Parent) diff --git a/pkg/local_object_storage/pilorama/forest.go b/pkg/local_object_storage/pilorama/forest.go index b5320e42d..ebfd0bcc0 100644 --- a/pkg/local_object_storage/pilorama/forest.go +++ b/pkg/local_object_storage/pilorama/forest.go @@ -177,7 +177,7 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI var res []NodeInfo for _, nodeID := range nodeIDs { - children := s.tree.getChildren(nodeID) + children := s.getChildren(nodeID) for _, childID := range children { var found bool for _, kv := range s.infoMap[childID].Meta.Items { @@ -222,7 +222,7 @@ func (f *memoryForest) TreeGetChildren(_ context.Context, cid cid.ID, treeID str return nil, ErrTreeNotFound } - children := s.tree.getChildren(nodeID) + children := s.getChildren(nodeID) res := make([]NodeInfo, 0, len(children)) for _, childID := range children { res = append(res, NodeInfo{ diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go index ce7b3db1e..f74d12a1b 100644 --- a/pkg/local_object_storage/pilorama/inmemory.go +++ b/pkg/local_object_storage/pilorama/inmemory.go @@ -35,9 +35,9 @@ func newMemoryTree() *memoryTree { // undo un-does op and changes s in-place. func (s *memoryTree) undo(op *move) { if op.HasOld { - s.tree.infoMap[op.Child] = op.Old + s.infoMap[op.Child] = op.Old } else { - delete(s.tree.infoMap, op.Child) + delete(s.infoMap, op.Child) } } @@ -83,8 +83,8 @@ func (s *memoryTree) do(op *Move) move { }, } - shouldPut := !s.tree.isAncestor(op.Child, op.Parent) - p, ok := s.tree.infoMap[op.Child] + shouldPut := !s.isAncestor(op.Child, op.Parent) + p, ok := s.infoMap[op.Child] if ok { lm.HasOld = true lm.Old = p @@ -100,7 +100,7 @@ func (s *memoryTree) do(op *Move) move { p.Meta = m p.Parent = op.Parent - s.tree.infoMap[op.Child] = p + s.infoMap[op.Child] = p return lm } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 19b13a8ab..72e650c5e 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -214,8 +214,8 @@ func (s *Shard) refillMetabase(ctx context.Context) error { } eg, egCtx := errgroup.WithContext(ctx) - if s.cfg.refillMetabaseWorkersCount > 0 { - eg.SetLimit(s.cfg.refillMetabaseWorkersCount) + if s.refillMetabaseWorkersCount > 0 { + eg.SetLimit(s.refillMetabaseWorkersCount) } var completedCount uint64 diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 9b327f6f1..19b6e2d12 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -320,8 +320,8 @@ func (s *Shard) getGarbage(ctx context.Context) ([]oid.Address, error) { } func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) { - workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount) - batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize) + workerCount = max(minExpiredWorkers, s.gc.expiredCollectorWorkerCount) + batchSize = max(minExpiredBatchSize, s.gc.expiredCollectorBatchSize) return } diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go index b233b705c..7391adef2 100644 --- a/pkg/local_object_storage/shard/id.go +++ b/pkg/local_object_storage/shard/id.go @@ -45,7 +45,7 @@ func (s *Shard) UpdateID(ctx context.Context) (err error) { } shardID := s.info.ID.String() - s.cfg.metricsWriter.SetShardID(shardID) + s.metricsWriter.SetShardID(shardID) if s.writeCache != nil && s.writeCache.GetMetrics() != nil { s.writeCache.GetMetrics().SetShardID(shardID) } diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index b9ec05f01..304a6bf9d 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -218,7 +218,7 @@ func WithWriteCache(use bool) Option { // hasWriteCache returns bool if write cache exists on shards. func (s *Shard) hasWriteCache() bool { - return s.cfg.useWriteCache + return s.useWriteCache } // NeedRefillMetabase returns true if metabase is needed to be refilled. @@ -379,15 +379,15 @@ func WithLimiter(l qos.Limiter) Option { } func (s *Shard) fillInfo() { - s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo() - s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo() - s.cfg.info.Mode = s.GetMode() + s.info.MetaBaseInfo = s.metaBase.DumpInfo() + s.info.BlobStorInfo = s.blobStor.DumpInfo() + s.info.Mode = s.GetMode() - if s.cfg.useWriteCache { - s.cfg.info.WriteCacheInfo = s.writeCache.DumpInfo() + if s.useWriteCache { + s.info.WriteCacheInfo = s.writeCache.DumpInfo() } if s.pilorama != nil { - s.cfg.info.PiloramaInfo = s.pilorama.DumpInfo() + s.info.PiloramaInfo = s.pilorama.DumpInfo() } } @@ -454,57 +454,57 @@ func (s *Shard) updateMetrics(ctx context.Context) { s.setContainerObjectsCount(contID.EncodeToString(), logical, count.Logic) s.setContainerObjectsCount(contID.EncodeToString(), user, count.User) } - s.cfg.metricsWriter.SetMode(s.info.Mode) + s.metricsWriter.SetMode(s.info.Mode) } // incObjectCounter increment both physical and logical object // counters. func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) { - s.cfg.metricsWriter.IncObjectCounter(physical) - s.cfg.metricsWriter.IncObjectCounter(logical) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) + s.metricsWriter.IncObjectCounter(physical) + s.metricsWriter.IncObjectCounter(logical) + s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical) + s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical) if isUser { - s.cfg.metricsWriter.IncObjectCounter(user) - s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) + s.metricsWriter.IncObjectCounter(user) + s.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user) } } func (s *Shard) decObjectCounterBy(typ string, v uint64) { if v > 0 { - s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v)) + s.metricsWriter.AddToObjectCounter(typ, -int(v)) } } func (s *Shard) setObjectCounterBy(typ string, v uint64) { if v > 0 { - s.cfg.metricsWriter.SetObjectCounter(typ, v) + s.metricsWriter.SetObjectCounter(typ, v) } } func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) { for cnrID, count := range byCnr { if count.Phy > 0 { - s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) + s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy) } if count.Logic > 0 { - s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) + s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), logical, count.Logic) } if count.User > 0 { - s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) + s.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), user, count.User) } } } func (s *Shard) addToContainerSize(cnr string, size int64) { if size != 0 { - s.cfg.metricsWriter.AddToContainerSize(cnr, size) + s.metricsWriter.AddToContainerSize(cnr, size) } } func (s *Shard) addToPayloadSize(size int64) { if size != 0 { - s.cfg.metricsWriter.AddToPayloadSize(size) + s.metricsWriter.AddToPayloadSize(size) } } diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go index 7469ea74e..f3ba9015e 100644 --- a/pkg/services/control/server/evacuate_async.go +++ b/pkg/services/control/server/evacuate_async.go @@ -220,7 +220,7 @@ func (s *Server) replicateTreeToNode(ctx context.Context, forest pilorama.Forest TreeId: treeID, Operation: &tree.LogMove{ ParentId: op.Parent, - Meta: op.Meta.Bytes(), + Meta: op.Bytes(), ChildId: op.Child, }, }, diff --git a/pkg/services/object/common/writer/common.go b/pkg/services/object/common/writer/common.go index 1998e9638..6593d3ca0 100644 --- a/pkg/services/object/common/writer/common.go +++ b/pkg/services/object/common/writer/common.go @@ -29,7 +29,7 @@ func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator { } func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error { - traverser, err := placement.NewTraverser(ctx, n.Traversal.Opts...) + traverser, err := placement.NewTraverser(ctx, n.Opts...) if err != nil { return fmt.Errorf("could not create object placement traverser: %w", err) } @@ -56,7 +56,7 @@ func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, } // perform additional container broadcast if needed - if n.Traversal.submitPrimaryPlacementFinish() { + if n.submitPrimaryPlacementFinish() { err := n.ForEachNode(ctx, f) if err != nil { n.cfg.Logger.Error(ctx, logs.PutAdditionalContainerBroadcastFailure, zap.Error(err)) @@ -101,7 +101,7 @@ func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement. // in subsequent container broadcast. Note that we don't // process this node during broadcast if primary placement // on it failed. - n.Traversal.submitProcessed(addr, item) + n.submitProcessed(addr, item) } wg.Wait() diff --git a/pkg/services/object/get/assemble.go b/pkg/services/object/get/assemble.go index e164627d2..e80132489 100644 --- a/pkg/services/object/get/assemble.go +++ b/pkg/services/object/get/assemble.go @@ -146,5 +146,5 @@ func (r *request) getObjectWithIndependentRequest(ctx context.Context, prm Reque detachedExecutor.execute(ctx) - return detachedExecutor.statusError.err + return detachedExecutor.err } diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go index 557e9a028..9676fd914 100644 --- a/pkg/services/object/get/get.go +++ b/pkg/services/object/get/get.go @@ -87,7 +87,7 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error { exec.execute(ctx) - return exec.statusError.err + return exec.err } func (exec *request) execute(ctx context.Context) { diff --git a/pkg/services/object/get/v2/streamer.go b/pkg/services/object/get/v2/streamer.go index 98207336c..0d73bcd4d 100644 --- a/pkg/services/object/get/v2/streamer.go +++ b/pkg/services/object/get/v2/streamer.go @@ -24,14 +24,14 @@ func (s *streamObjectWriter) WriteHeader(_ context.Context, obj *objectSDK.Objec p.SetHeader(objV2.GetHeader()) p.SetSignature(objV2.GetSignature()) - return s.GetObjectStream.Send(newResponse(p)) + return s.Send(newResponse(p)) } func (s *streamObjectWriter) WriteChunk(_ context.Context, chunk []byte) error { p := new(objectV2.GetObjectPartChunk) p.SetChunk(chunk) - return s.GetObjectStream.Send(newResponse(p)) + return s.Send(newResponse(p)) } func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { @@ -46,7 +46,7 @@ func newResponse(p objectV2.GetObjectPart) *objectV2.GetResponse { } func (s *streamObjectRangeWriter) WriteChunk(_ context.Context, chunk []byte) error { - return s.GetObjectRangeStream.Send(newRangeResponse(chunk)) + return s.Send(newRangeResponse(chunk)) } func newRangeResponse(p []byte) *objectV2.GetRangeResponse { diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go index 953f82b48..5d298bfed 100644 --- a/pkg/services/object/patch/service.go +++ b/pkg/services/object/patch/service.go @@ -28,7 +28,7 @@ func NewService(cfg *objectwriter.Config, // Patch calls internal service and returns v2 object streamer. func (s *Service) Patch() (object.PatchObjectStream, error) { - nodeKey, err := s.Config.KeyStorage.GetKey(nil) + nodeKey, err := s.KeyStorage.GetKey(nil) if err != nil { return nil, err } diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go index f3c2dca1a..90f473254 100644 --- a/pkg/services/object/put/single.go +++ b/pkg/services/object/put/single.go @@ -102,7 +102,7 @@ func (s *Service) validarePutSingleSize(ctx context.Context, obj *objectSDK.Obje return target.ErrWrongPayloadSize } - maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize(ctx) + maxAllowedSize := s.MaxSizeSrc.MaxObjectSize(ctx) if obj.PayloadSize() > maxAllowedSize { return target.ErrExceedingMaxSize } @@ -166,13 +166,13 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o } func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error { - iter := s.Config.NewNodeIterator(placement.placementOptions) + iter := s.NewNodeIterator(placement.placementOptions) iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly) iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast signer := &putSingleRequestSigner{ req: req, - keyStorage: s.Config.KeyStorage, + keyStorage: s.KeyStorage, signer: &sync.Once{}, } @@ -186,13 +186,13 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace if err != nil { return err } - key, err := s.Config.KeyStorage.GetKey(nil) + key, err := s.KeyStorage.GetKey(nil) if err != nil { return err } signer := &putSingleRequestSigner{ req: req, - keyStorage: s.Config.KeyStorage, + keyStorage: s.KeyStorage, signer: &sync.Once{}, } @@ -225,7 +225,7 @@ func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectS if !ok { return result, errors.New("missing container ID") } - cnrInfo, err := s.Config.ContainerSource.Get(ctx, cnrID) + cnrInfo, err := s.ContainerSource.Get(ctx, cnrID) if err != nil { return result, fmt.Errorf("could not get container by ID: %w", err) } @@ -249,14 +249,14 @@ func (s *Service) getPutSinglePlacementOptions(ctx context.Context, obj *objectS } result.placementOptions = append(result.placementOptions, placement.ForObject(objID)) - latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.Config.NetmapSource) + latestNetmap, err := netmap.GetLatestNetworkMap(ctx, s.NetmapSource) if err != nil { return result, fmt.Errorf("could not get latest network map: %w", err) } builder := placement.NewNetworkMapBuilder(latestNetmap) if localOnly { result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1)) - builder = svcutil.NewLocalPlacement(builder, s.Config.NetmapKeys) + builder = svcutil.NewLocalPlacement(builder, s.NetmapKeys) } result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder)) return result, nil @@ -273,7 +273,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite client.NodeInfoFromNetmapElement(&info, nodeDesc.Info) - c, err := s.Config.ClientConstructor.Get(info) + c, err := s.ClientConstructor.Get(info) if err != nil { return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err) } @@ -283,7 +283,7 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error { localTarget := &objectwriter.LocalTarget{ - Storage: s.Config.LocalStore, + Storage: s.LocalStore, Container: container, } return localTarget.WriteObject(ctx, obj, meta) @@ -317,7 +317,7 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context, if err != nil { objID, _ := obj.ID() cnrID, _ := obj.ContainerID() - s.Config.Logger.Warn(ctx, logs.PutSingleRedirectFailure, + s.Logger.Warn(ctx, logs.PutSingleRedirectFailure, zap.Error(err), zap.Stringer("address", addr), zap.Stringer("object_id", objID), diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go index 1ee31d480..fbdeb3148 100644 --- a/pkg/services/policer/ec.go +++ b/pkg/services/policer/ec.go @@ -101,7 +101,7 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) ecChunkProcessResult { var removeLocalChunk bool requiredNode := nodes[int(objInfo.ECInfo.Index)%(len(nodes))] - if p.cfg.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { + if p.netmapKeys.IsLocalKey(requiredNode.PublicKey()) { // current node is required node, we are happy return ecChunkProcessResult{ validPlacement: true, @@ -185,7 +185,7 @@ func (p *Policer) collectRequiredECChunks(nodes []netmap.NodeInfo, objInfo objec if uint32(i) == objInfo.ECInfo.Total { break } - if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.netmapKeys.IsLocalKey(n.PublicKey()) { requiredChunkIndexes[uint32(i)] = []netmap.NodeInfo{} } } @@ -210,7 +210,7 @@ func (p *Policer) resolveLocalECChunks(ctx context.Context, parentAddress oid.Ad func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.Address, nodes []netmap.NodeInfo, required map[uint32][]netmap.NodeInfo, indexToObjectID map[uint32]oid.ID) bool { var eiErr *objectSDK.ECInfoError for _, n := range nodes { - if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.netmapKeys.IsLocalKey(n.PublicKey()) { continue } _, err := p.remoteHeader(ctx, n, parentAddress, true) @@ -260,7 +260,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info return } var err error - if p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) { + if p.netmapKeys.IsLocalKey(n.PublicKey()) { _, err = p.localHeader(ctx, parentAddress) } else { _, err = p.remoteHeader(ctx, n, parentAddress, true) @@ -283,7 +283,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info } } else if client.IsErrObjectAlreadyRemoved(err) { restore = false - } else if !p.cfg.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { + } else if !p.netmapKeys.IsLocalKey(n.PublicKey()) && uint32(idx) < objInfo.ECInfo.Total { p.log.Warn(ctx, logs.PolicerCouldNotGetObjectFromNodeMoving, zap.String("node", hex.EncodeToString(n.PublicKey())), zap.Stringer("object", parentAddress), zap.Error(err)) p.replicator.HandleReplicationTask(ctx, replicator.Task{ NumCopies: 1, @@ -343,7 +343,7 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, pID, _ := part.ID() addr.SetObject(pID) targetNode := nodes[idx%len(nodes)] - if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) { + if p.netmapKeys.IsLocalKey(targetNode.PublicKey()) { p.replicator.HandleLocalPutTask(ctx, replicator.Task{ Addr: addr, Obj: part, @@ -371,7 +371,7 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I var obj *objectSDK.Object var err error for _, node := range nodes { - if p.cfg.netmapKeys.IsLocalKey(node.PublicKey()) { + if p.netmapKeys.IsLocalKey(node.PublicKey()) { obj, err = p.localObject(egCtx, objID) } else { obj, err = p.remoteObject(egCtx, node, objID) diff --git a/pkg/services/session/storage/persistent/storage.go b/pkg/services/session/storage/persistent/storage.go index d312ea0ea..132d62445 100644 --- a/pkg/services/session/storage/persistent/storage.go +++ b/pkg/services/session/storage/persistent/storage.go @@ -64,7 +64,7 @@ func NewTokenStore(path string, opts ...Option) (*TokenStore, error) { // enable encryption if it // was configured so if cfg.privateKey != nil { - rawKey := make([]byte, (cfg.privateKey.Curve.Params().N.BitLen()+7)/8) + rawKey := make([]byte, (cfg.privateKey.Params().N.BitLen()+7)/8) cfg.privateKey.D.FillBytes(rawKey) c, err := aes.NewCipher(rawKey) diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index d250f577a..462c8554f 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -48,7 +48,7 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) { func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) { c.Lock() - ccInt, ok := c.LRU.Get(netmapAddr) + ccInt, ok := c.Get(netmapAddr) c.Unlock() if ok { @@ -71,9 +71,9 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl c.Lock() if err != nil { - c.LRU.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) + c.Add(netmapAddr, cacheItem{cc: nil, lastTry: lastTry}) } else { - c.LRU.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) + c.Add(netmapAddr, cacheItem{cc: cc, lastTry: lastTry}) } c.Unlock() diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index 164815c76..01a4ffde0 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -202,7 +202,7 @@ func newApplyRequest(op *movePair) *ApplyRequest { TreeId: op.treeID, Operation: &LogMove{ ParentId: op.op.Parent, - Meta: op.op.Meta.Bytes(), + Meta: op.op.Bytes(), ChildId: op.op.Child, }, }, diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 85bb03a28..a00c8c1cd 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -687,7 +687,7 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) Body: &GetOpLogResponse_Body{ Operation: &LogMove{ ParentId: lm.Parent, - Meta: lm.Meta.Bytes(), + Meta: lm.Bytes(), ChildId: lm.Child, }, }, diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 8abdafaa7..c3796fbd4 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -245,7 +245,7 @@ func (s *Service) startStream(ctx context.Context, cid cid.ID, treeID string, Parent: lm.GetParentId(), Child: lm.GetChildId(), } - if err := m.Meta.FromBytes(lm.GetMeta()); err != nil { + if err := m.FromBytes(lm.GetMeta()); err != nil { return err } select { @@ -415,7 +415,7 @@ func (s *Service) syncLoop(ctx context.Context) { start := time.Now() - cnrs, err := s.cfg.cnrSource.List(ctx) + cnrs, err := s.cnrSource.List(ctx) if err != nil { s.log.Error(ctx, logs.TreeCouldNotFetchContainers, zap.Error(err)) s.metrics.AddSyncDuration(time.Since(start), false) From b0ef737a74d9e0b789be24db0830fa927e7078e0 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 7 Apr 2025 16:58:21 +0300 Subject: [PATCH 516/591] [#1689] linter: Fix testifylint warning: 'len: use require.Len' Change-Id: I7a08f09c169ac237647dcb20b0737f1c51c441ad Signed-off-by: Alexander Chuprov --- .../blobstor/internal/blobstortest/iterate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go index c11d0888b..d54c54f59 100644 --- a/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go +++ b/pkg/local_object_storage/blobstor/internal/blobstortest/iterate.go @@ -50,7 +50,7 @@ func runTestNormalHandler(t *testing.T, s common.Storage, objects []objectDesc) _, err := s.Iterate(context.Background(), iterPrm) require.NoError(t, err) - require.Equal(t, len(objects), len(seen)) + require.Len(t, objects, len(seen)) for i := range objects { d, ok := seen[objects[i].addr.String()] require.True(t, ok) From f4696e8964786dd5489fdce1456eccab010d02b8 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 7 Apr 2025 17:03:19 +0300 Subject: [PATCH 517/591] [#1689] linter: Fix staticcheck warning: 'Use fmt.Fprintf(...) instead of WriteString(fmt.Sprintf(...))' Change-Id: I253ab717885cb01b4a2e471147e883ee351be277 Signed-off-by: Alexander Chuprov --- cmd/frostfs-cli/modules/control/evacuation.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go index 8032bf09a..b8d7eb046 100644 --- a/cmd/frostfs-cli/modules/control/evacuation.go +++ b/cmd/frostfs-cli/modules/control/evacuation.go @@ -296,7 +296,7 @@ func appendEstimation(sb *strings.Builder, resp *control.GetShardEvacuationStatu leftSeconds := avgObjEvacuationTimeSeconds * objectsLeft leftMinutes := int(leftSeconds / 60) - sb.WriteString(fmt.Sprintf(" Estimated time left: %d minutes.", leftMinutes)) + fmt.Fprintf(sb, " Estimated time left: %d minutes.", leftMinutes) } func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { @@ -305,20 +305,20 @@ func appendDuration(sb *strings.Builder, resp *control.GetShardEvacuationStatusR hour := int(duration.Seconds() / 3600) minute := int(duration.Seconds()/60) % 60 second := int(duration.Seconds()) % 60 - sb.WriteString(fmt.Sprintf(" Duration: %02d:%02d:%02d.", hour, minute, second)) + fmt.Fprintf(sb, " Duration: %02d:%02d:%02d.", hour, minute, second) } } func appendStartedAt(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { if resp.GetBody().GetStartedAt() != nil { startedAt := time.Unix(resp.GetBody().GetStartedAt().GetValue(), 0).UTC() - sb.WriteString(fmt.Sprintf(" Started at: %s UTC.", startedAt.Format(time.RFC3339))) + fmt.Fprintf(sb, " Started at: %s UTC.", startedAt.Format(time.RFC3339)) } } func appendError(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { if len(resp.GetBody().GetErrorMessage()) > 0 { - sb.WriteString(fmt.Sprintf(" Error: %s.", resp.GetBody().GetErrorMessage())) + fmt.Fprintf(sb, " Error: %s.", resp.GetBody().GetErrorMessage()) } } @@ -332,7 +332,7 @@ func appendStatus(sb *strings.Builder, resp *control.GetShardEvacuationStatusRes default: status = "undefined" } - sb.WriteString(fmt.Sprintf(" Status: %s.", status)) + fmt.Fprintf(sb, " Status: %s.", status) } func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { @@ -350,14 +350,14 @@ func appendShardIDs(sb *strings.Builder, resp *control.GetShardEvacuationStatusR } func appendCounts(sb *strings.Builder, resp *control.GetShardEvacuationStatusResponse) { - sb.WriteString(fmt.Sprintf(" Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", + fmt.Fprintf(sb, " Evacuated %d objects out of %d, failed to evacuate: %d, skipped: %d; evacuated %d trees out of %d, failed to evacuate: %d.", resp.GetBody().GetEvacuatedObjects(), resp.GetBody().GetTotalObjects(), resp.GetBody().GetFailedObjects(), resp.GetBody().GetSkippedObjects(), resp.GetBody().GetEvacuatedTrees(), resp.GetBody().GetTotalTrees(), - resp.GetBody().GetFailedTrees())) + resp.GetBody().GetFailedTrees()) } func initControlEvacuationShardCmd() { From 4c03561aa20c2bbc2e311c9438fd7979ff1d96db Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Mon, 7 Apr 2025 18:48:36 +0300 Subject: [PATCH 518/591] [#1693] internal/assert: Add `False` and `NoError` checks Change-Id: Ib3ab1671eeff8e8917673513477f158cadbb4287 Signed-off-by: Ekaterina Lebedeva --- internal/assert/cond.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/internal/assert/cond.go b/internal/assert/cond.go index 701036fa8..c6a034f94 100644 --- a/internal/assert/cond.go +++ b/internal/assert/cond.go @@ -1,9 +1,25 @@ package assert -import "strings" +import ( + "fmt" + "strings" +) func True(cond bool, details ...string) { if !cond { panic(strings.Join(details, " ")) } } + +func False(cond bool, details ...string) { + if cond { + panic(strings.Join(details, " ")) + } +} + +func NoError(err error, details ...string) { + if err != nil { + content := fmt.Sprintf("BUG: %v: %s", err, strings.Join(details, " ")) + panic(content) + } +} From 0e1b01b15ff21c07d809dbed00f23c6b2377bf4e Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Wed, 2 Apr 2025 19:06:10 +0300 Subject: [PATCH 519/591] [#1693] cli/adm: Replace conditional panics with asserts Change-Id: I3a46f7ac6d9e4ff51bb490e6fcfc07957418f1a7 Signed-off-by: Ekaterina Lebedeva --- .../internal/modules/morph/balance/balance.go | 9 +++----- .../modules/morph/container/container.go | 5 ++-- .../internal/modules/morph/contract/deploy.go | 10 ++++---- .../modules/morph/contract/dump_hashes.go | 9 +++----- .../modules/morph/helper/initialize_ctx.go | 23 ++++++++----------- .../modules/morph/helper/local_client.go | 9 +++----- .../morph/initialize/initialize_nns.go | 5 ++-- .../morph/initialize/initialize_register.go | 5 ++-- 8 files changed, 28 insertions(+), 47 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go index be42f2aa5..23dba14f4 100644 --- a/cmd/frostfs-adm/internal/modules/morph/balance/balance.go +++ b/cmd/frostfs-adm/internal/modules/morph/balance/balance.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "github.com/nspcc-dev/neo-go/pkg/core/native/noderoles" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -161,9 +162,7 @@ func printAlphabetContractBalances(cmd *cobra.Command, c helper.Client, inv *inv helper.GetAlphabetNNSDomain(i), int64(nns.TXT)) } - if w.Err != nil { - panic(w.Err) - } + assert.NoError(w.Err) alphaRes, err := c.InvokeScript(w.Bytes(), nil) if err != nil { @@ -226,9 +225,7 @@ func fetchBalances(c *invoker.Invoker, gasHash util.Uint160, accounts []accBalan for i := range accounts { emit.AppCall(w.BinWriter, gasHash, "balanceOf", callflag.ReadStates, accounts[i].scriptHash) } - if w.Err != nil { - panic(w.Err) - } + assert.NoError(w.Err) res, err := c.Run(w.Bytes()) if err != nil || res.State != vmstate.Halt.String() || len(res.Stack) != len(accounts) { diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go index e72dc15e9..79685f111 100644 --- a/cmd/frostfs-adm/internal/modules/morph/container/container.go +++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/io" @@ -235,9 +236,7 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd putContainer(bw, ch, cnt) - if bw.Err != nil { - panic(bw.Err) - } + assert.NoError(bw.Err) if err := wCtx.SendConsensusTx(bw.Bytes()); err != nil { return err diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go index 5adb480da..543b5fcb3 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/deploy.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/nspcc-dev/neo-go/cli/cmdargs" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/encoding/address" @@ -120,9 +121,7 @@ func deployContractCmd(cmd *cobra.Command, args []string) error { } } - if writer.Err != nil { - panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err)) - } + assert.NoError(writer.Err, "can't create deployment script") if err := c.SendCommitteeTx(writer.Bytes(), false); err != nil { return err @@ -173,9 +172,8 @@ func registerNNS(nnsCs *state.Contract, c *helper.InitializeContext, zone string domain, int64(nns.TXT), address.Uint160ToString(cs.Hash)) } - if bw.Err != nil { - panic(fmt.Errorf("BUG: can't create deployment script: %w", writer.Err)) - } else if bw.Len() != start { + assert.NoError(bw.Err, "can't create deployment script") + if bw.Len() != start { writer.WriteBytes(bw.Bytes()) emit.Opcodes(writer.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.AppCallNoArgs(writer.BinWriter, nnsCs.Hash, "setPrice", callflag.All) diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go index 7630a226e..fde58fd2b 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go @@ -11,6 +11,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/io" "github.com/nspcc-dev/neo-go/pkg/rpcclient/invoker" @@ -236,9 +237,7 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu } else { sub.Reset() emit.AppCall(sub.BinWriter, infos[i].hash, "version", callflag.NoneFlag) - if sub.Err != nil { - panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err)) - } + assert.NoError(sub.Err, "can't create version script") script := sub.Bytes() emit.Instruction(bw.BinWriter, opcode.TRY, []byte{byte(3 + len(script) + 2), 0}) @@ -248,9 +247,7 @@ func fillContractVersion(cmd *cobra.Command, c helper.Client, infos []contractDu } } emit.Opcodes(bw.BinWriter, opcode.NOP) // for the last ENDTRY target - if bw.Err != nil { - panic(fmt.Errorf("BUG: can't create version script: %w", bw.Err)) - } + assert.NoError(bw.Err, "can't create version script") res, err := c.InvokeScript(bw.Bytes(), nil) if err != nil { diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go index 27052697f..da5ffedae 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/initialize_ctx.go @@ -13,6 +13,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -375,9 +376,7 @@ func (c *InitializeContext) sendMultiTx(script []byte, tryGroup bool, withConsen } act, err = actor.New(c.Client, signers) } else { - if withConsensus { - panic("BUG: should never happen") - } + assert.False(withConsensus, "BUG: should never happen") act, err = c.CommitteeAct, nil } if err != nil { @@ -411,11 +410,9 @@ func (c *InitializeContext) MultiSignAndSend(tx *transaction.Transaction, accTyp func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType string) error { version, err := c.Client.GetVersion() - if err != nil { - // error appears only if client - // has not been initialized - panic(err) - } + // error appears only if client + // has not been initialized + assert.NoError(err) network := version.Protocol.Network // Use parameter context to avoid dealing with signature order. @@ -447,12 +444,12 @@ func (c *InitializeContext) MultiSign(tx *transaction.Transaction, accType strin for i := range tx.Signers { if tx.Signers[i].Account == h { + assert.True(i <= len(tx.Scripts), "BUG: invalid signing order") if i < len(tx.Scripts) { tx.Scripts[i] = *w - } else if i == len(tx.Scripts) { + } + if i == len(tx.Scripts) { tx.Scripts = append(tx.Scripts, *w) - } else { - panic("BUG: invalid signing order") } return nil } @@ -510,9 +507,7 @@ func (c *InitializeContext) NNSRegisterDomainScript(nnsHash, expectedHash util.U int64(constants.DefaultExpirationTime), constants.NNSTtlDefVal) emit.Opcodes(bw.BinWriter, opcode.ASSERT) - if bw.Err != nil { - panic(bw.Err) - } + assert.NoError(bw.Err) return bw.Bytes(), false, nil } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go index d0a05d5c7..46611c177 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go @@ -10,6 +10,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/google/uuid" "github.com/nspcc-dev/neo-go/pkg/config" "github.com/nspcc-dev/neo-go/pkg/core" @@ -316,9 +317,7 @@ func (l *LocalClient) SendRawTransaction(tx *transaction.Transaction) (util.Uint func (l *LocalClient) putTransactions() error { // 1. Prepare new block. lastBlock, err := l.bc.GetBlock(l.bc.CurrentBlockHash()) - if err != nil { - panic(err) - } + assert.NoError(err) defer func() { l.transactions = l.transactions[:0] }() b := &block.Block{ @@ -359,9 +358,7 @@ func InvokeFunction(c Client, h util.Uint160, method string, parameters []any, s w := io.NewBufBinWriter() emit.Array(w.BinWriter, parameters...) emit.AppCallNoArgs(w.BinWriter, h, method, callflag.All) - if w.Err != nil { - panic(fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) - } + assert.True(w.Err == nil, fmt.Sprintf("BUG: invalid parameters for '%s': %v", method, w.Err)) return c.InvokeScript(w.Bytes(), signers) } diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go index e127ca545..176356378 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_nns.go @@ -7,6 +7,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-contract/nns" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" morphClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -111,9 +112,7 @@ func wrapRegisterScriptWithPrice(w *io.BufBinWriter, nnsHash util.Uint160, s []b emit.Opcodes(w.BinWriter, opcode.LDSFLD0, opcode.PUSH1, opcode.PACK) emit.AppCallNoArgs(w.BinWriter, nnsHash, "setPrice", callflag.All) - if w.Err != nil { - panic(fmt.Errorf("BUG: can't wrap register script: %w", w.Err)) - } + assert.NoError(w.Err, "can't wrap register script") } func nnsRegisterDomain(c *helper.InitializeContext, nnsHash, expectedHash util.Uint160, domain string) error { diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go index 46e6621d2..7b7597d91 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_register.go @@ -6,6 +6,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/nspcc-dev/neo-go/pkg/core/native" "github.com/nspcc-dev/neo-go/pkg/core/state" "github.com/nspcc-dev/neo-go/pkg/core/transaction" @@ -39,9 +40,7 @@ func registerCandidateRange(c *helper.InitializeContext, start, end int) error { emit.Opcodes(w.BinWriter, opcode.ASSERT) } emit.AppCall(w.BinWriter, neo.Hash, "setRegisterPrice", callflag.States, regPrice) - if w.Err != nil { - panic(fmt.Sprintf("BUG: %v", w.Err)) - } + assert.NoError(w.Err) signers := []actor.SignerAccount{{ Signer: c.GetSigner(false, c.CommitteeAcc), From 766d9ec46b7b7da10dbbd4bf257db6f5f799a968 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Mon, 7 Apr 2025 19:25:57 +0300 Subject: [PATCH 520/591] [#1693] cli/lens: Replace conditional panics with asserts Change-Id: Id827da0cd9eef66efd806be6c9bc61044175a971 Signed-off-by: Ekaterina Lebedeva --- cmd/frostfs-lens/internal/schema/common/schema.go | 8 +++----- cmd/frostfs-lens/internal/tui/records.go | 5 ++--- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/cmd/frostfs-lens/internal/schema/common/schema.go b/cmd/frostfs-lens/internal/schema/common/schema.go index 9bad19032..077a68785 100644 --- a/cmd/frostfs-lens/internal/schema/common/schema.go +++ b/cmd/frostfs-lens/internal/schema/common/schema.go @@ -3,6 +3,8 @@ package common import ( "errors" "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) type FilterResult byte @@ -71,11 +73,7 @@ func (fp FallbackParser) ToParser() Parser { func (p Parser) ToFallbackParser() FallbackParser { return func(key, value []byte) (SchemaEntry, Parser) { entry, next, err := p(key, value) - if err != nil { - panic(fmt.Errorf( - "couldn't use that parser as a fallback parser, it returned an error: %w", err, - )) - } + assert.NoError(err, "couldn't use that parser as a fallback parser") return entry, next } } diff --git a/cmd/frostfs-lens/internal/tui/records.go b/cmd/frostfs-lens/internal/tui/records.go index 5f61df884..a4d392ab3 100644 --- a/cmd/frostfs-lens/internal/tui/records.go +++ b/cmd/frostfs-lens/internal/tui/records.go @@ -8,6 +8,7 @@ import ( "sync" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/gdamore/tcell/v2" "github.com/rivo/tview" ) @@ -94,9 +95,7 @@ func (v *RecordsView) Mount(ctx context.Context) error { } func (v *RecordsView) Unmount() { - if v.onUnmount == nil { - panic("try to unmount not mounted component") - } + assert.False(v.onUnmount == nil, "try to unmount not mounted component") v.onUnmount() v.onUnmount = nil } From 17cba3387e926c50ae47a224af47f3f6668134f4 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Tue, 8 Apr 2025 19:57:43 +0300 Subject: [PATCH 521/591] [#1332] cli/playground: Prevent prompt artifacts by writing to 'readline' stdout Change-Id: I1c3cbb0b762f29c0995d3f6fc79bae5246ee7bc3 Signed-off-by: Alexander Chuprov --- .../modules/container/policy_playground.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index dcd755510..825e1d222 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -19,8 +19,9 @@ import ( ) type policyPlaygroundREPL struct { - cmd *cobra.Command - nodes map[string]netmap.NodeInfo + cmd *cobra.Command + nodes map[string]netmap.NodeInfo + console *readline.Instance } func newPolicyPlaygroundREPL(cmd *cobra.Command) *policyPlaygroundREPL { @@ -40,7 +41,7 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error { node.IterateAttributes(func(k, v string) { attrs = append(attrs, fmt.Sprintf("%s:%q", k, v)) }) - fmt.Printf("\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) + fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) i++ } return nil @@ -147,7 +148,7 @@ func (repl *policyPlaygroundREPL) handleEval(args []string) error { for _, node := range ns { ids = append(ids, hex.EncodeToString(node.PublicKey())) } - fmt.Printf("\t%2d: %v\n", i+1, ids) + fmt.Fprintf(repl.console, "\t%2d: %v\n", i+1, ids) } return nil @@ -208,6 +209,7 @@ func (repl *policyPlaygroundREPL) run() error { if err != nil { return fmt.Errorf("error initializing readline: %w", err) } + repl.console = rl defer rl.Close() var exit bool @@ -232,10 +234,10 @@ func (repl *policyPlaygroundREPL) run() error { cmd := parts[0] if handler, exists := cmdHandlers[cmd]; exists { if err := handler(parts[1:]); err != nil { - fmt.Printf("error: %v\n", err) + fmt.Fprintf(repl.console, "error: %v\n", err) } } else { - fmt.Printf("error: unknown command %q\n", cmd) + fmt.Fprintf(repl.console, "error: unknown command %q\n", cmd) } } } From 46fd5e17b2366c22dba259d1349c6e3fa8662cb6 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Tue, 8 Apr 2025 20:23:22 +0300 Subject: [PATCH 522/591] [#1332] cli/playground: Add help Change-Id: I6160cfddf427b161619e4b96ceec8396b75c4d08 Signed-off-by: Alexander Chuprov --- .../modules/container/policy_playground.go | 119 ++++++++++++++++-- 1 file changed, 109 insertions(+), 10 deletions(-) diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index 825e1d222..f747b3252 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -5,7 +5,9 @@ import ( "encoding/json" "errors" "fmt" + "maps" "os" + "slices" "strings" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" @@ -154,6 +156,23 @@ func (repl *policyPlaygroundREPL) handleEval(args []string) error { return nil } +func (repl *policyPlaygroundREPL) handleHelp(args []string) error { + if len(args) != 0 { + if _, ok := commands[args[0]]; !ok { + return fmt.Errorf("unknown command: %q", args[0]) + } + fmt.Fprintln(repl.console, commands[args[0]].usage) + return nil + } + + commandList := slices.Collect(maps.Keys(commands)) + slices.Sort(commandList) + for _, command := range commandList { + fmt.Fprintf(repl.console, "%s: %s\n", command, commands[command].descriprion) + } + return nil +} + func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { var nm netmap.NetMap var nodes []netmap.NodeInfo @@ -164,15 +183,82 @@ func (repl *policyPlaygroundREPL) netMap() netmap.NetMap { return nm } -var policyPlaygroundCompleter = readline.NewPrefixCompleter( - readline.PcItem("list"), - readline.PcItem("ls"), - readline.PcItem("add"), - readline.PcItem("load"), - readline.PcItem("remove"), - readline.PcItem("rm"), - readline.PcItem("eval"), -) +type commandDescription struct { + descriprion string + usage string +} + +var commands = map[string]commandDescription{ + "list": { + descriprion: "Display all nodes in the netmap", + usage: `Display all nodes in the netmap +Example of usage: + list + 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} + 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} +`, + }, + + "ls": { + descriprion: "Display all nodes in the netmap", + usage: `Display all nodes in the netmap +Example of usage: + ls + 1: id=03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae attrs={Continent:"Europe" Country:"Poland"} + 2: id=02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3 attrs={Continent:"Antarctica" Country:"Heard Island"} +`, + }, + + "add": { + descriprion: "Add a new node: add attr=value", + usage: `Add a new node +Example of usage: + add 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae continent:Europe country:Poland`, + }, + + "load": { + descriprion: "Load netmap from file: load ", + usage: `Load netmap from file +Example of usage: + load "netmap.json" +File format (netmap.json): +{ + "03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae": { + "continent": "Europe", + "country": "Poland" + }, + "02ac920cd7df0b61b289072e6b946e2da4e1a31b9ab1c621bb475e30fa4ab102c3": { + "continent": "Antarctica", + "country": "Heard Island" + } +}`, + }, + + "remove": { + descriprion: "Remove a node: remove ", + usage: `Remove a node +Example of usage: + remove 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, + }, + + "rm": { + descriprion: "Remove a node: rm ", + usage: `Remove a node +Example of usage: + rm 03ff65b6ae79134a4dce9d0d39d3851e9bab4ee97abf86e81e1c5bbc50cd2826ae`, + }, + + "eval": { + descriprion: "Evaluate a policy: eval ", + usage: `Evaluate a policy +Example of usage: + eval REP 2`, + }, + + "help": { + descriprion: "Show available commands", + }, +} func (repl *policyPlaygroundREPL) run() error { if len(viper.GetString(commonflags.RPC)) > 0 { @@ -199,12 +285,25 @@ func (repl *policyPlaygroundREPL) run() error { "remove": repl.handleRemove, "rm": repl.handleRemove, "eval": repl.handleEval, + "help": repl.handleHelp, } + var cfgCompleter []readline.PrefixCompleterInterface + var helpSubItems []readline.PrefixCompleterInterface + + for name := range commands { + if name != "help" { + cfgCompleter = append(cfgCompleter, readline.PcItem(name)) + helpSubItems = append(helpSubItems, readline.PcItem(name)) + } + } + + cfgCompleter = append(cfgCompleter, readline.PcItem("help", helpSubItems...)) + completer := readline.NewPrefixCompleter(cfgCompleter...) rl, err := readline.NewEx(&readline.Config{ Prompt: "> ", InterruptPrompt: "^C", - AutoComplete: policyPlaygroundCompleter, + AutoComplete: completer, }) if err != nil { return fmt.Errorf("error initializing readline: %w", err) From faec499b38a8ff06bc5c38fa31b4e75139d09d7d Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 7 Apr 2025 17:19:34 +0300 Subject: [PATCH 523/591] [#1689] linter: Fix staticcheck warning: 'variable naming format' Change-Id: I8f8b63a6a5f9b6feb7c91f70fe8ac092575b145c Signed-off-by: Alexander Chuprov --- .../blobstor/blobovniczatree/manager.go | 26 +++++++++---------- .../blobstor/blobovniczatree/rebuild.go | 4 +-- pkg/local_object_storage/pilorama/inmemory.go | 4 +-- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go index f2f9509ad..6438f715b 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/manager.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/manager.go @@ -141,8 +141,8 @@ func (b *sharedDB) SystemPath() string { return b.path } -// levelDbManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree. -type levelDbManager struct { +// levelDBManager stores pointers of the sharedDB's for the leaf directory of the blobovnicza tree. +type levelDBManager struct { dbMtx *sync.RWMutex databases map[uint64]*sharedDB @@ -157,8 +157,8 @@ type levelDbManager struct { func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath string, readOnly bool, metrics blobovnicza.Metrics, openDBCounter *openDBCounter, closedFlag *atomic.Bool, log *logger.Logger, -) *levelDbManager { - result := &levelDbManager{ +) *levelDBManager { + result := &levelDBManager{ databases: make(map[uint64]*sharedDB), dbMtx: &sync.RWMutex{}, @@ -173,7 +173,7 @@ func newLevelDBManager(options []blobovnicza.Option, rootPath string, lvlPath st return result } -func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB { +func (m *levelDBManager) GetByIndex(idx uint64) *sharedDB { res := m.getDBIfExists(idx) if res != nil { return res @@ -181,14 +181,14 @@ func (m *levelDbManager) GetByIndex(idx uint64) *sharedDB { return m.getOrCreateDB(idx) } -func (m *levelDbManager) getDBIfExists(idx uint64) *sharedDB { +func (m *levelDBManager) getDBIfExists(idx uint64) *sharedDB { m.dbMtx.RLock() defer m.dbMtx.RUnlock() return m.databases[idx] } -func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB { +func (m *levelDBManager) getOrCreateDB(idx uint64) *sharedDB { m.dbMtx.Lock() defer m.dbMtx.Unlock() @@ -202,7 +202,7 @@ func (m *levelDbManager) getOrCreateDB(idx uint64) *sharedDB { return db } -func (m *levelDbManager) hasAnyDB() bool { +func (m *levelDBManager) hasAnyDB() bool { m.dbMtx.RLock() defer m.dbMtx.RUnlock() @@ -213,7 +213,7 @@ func (m *levelDbManager) hasAnyDB() bool { // // The blobovnicza opens at the first request, closes after the last request. type dbManager struct { - levelToManager map[string]*levelDbManager + levelToManager map[string]*levelDBManager levelToManagerGuard *sync.RWMutex closedFlag *atomic.Bool dbCounter *openDBCounter @@ -231,7 +231,7 @@ func newDBManager(rootPath string, options []blobovnicza.Option, readOnly bool, options: options, readOnly: readOnly, metrics: metrics, - levelToManager: make(map[string]*levelDbManager), + levelToManager: make(map[string]*levelDBManager), levelToManagerGuard: &sync.RWMutex{}, log: log, closedFlag: &atomic.Bool{}, @@ -266,7 +266,7 @@ func (m *dbManager) Close() { m.dbCounter.WaitUntilAllClosed() } -func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager { +func (m *dbManager) getLevelManager(lvlPath string) *levelDBManager { result := m.getLevelManagerIfExists(lvlPath) if result != nil { return result @@ -274,14 +274,14 @@ func (m *dbManager) getLevelManager(lvlPath string) *levelDbManager { return m.getOrCreateLevelManager(lvlPath) } -func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDbManager { +func (m *dbManager) getLevelManagerIfExists(lvlPath string) *levelDBManager { m.levelToManagerGuard.RLock() defer m.levelToManagerGuard.RUnlock() return m.levelToManager[lvlPath] } -func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDbManager { +func (m *dbManager) getOrCreateLevelManager(lvlPath string) *levelDBManager { m.levelToManagerGuard.Lock() defer m.levelToManagerGuard.Unlock() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index 7ef3317fd..d2eef2074 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -328,7 +328,7 @@ func (b *Blobovniczas) moveObject(ctx context.Context, source *blobovnicza.Blobo return nil } -func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) (bool, error) { +func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDB *sharedDB) (bool, error) { select { case <-ctx.Done(): return false, ctx.Err() @@ -341,7 +341,7 @@ func (b *Blobovniczas) dropDB(ctx context.Context, path string, shDb *sharedDB) b.dbFilesGuard.Lock() defer b.dbFilesGuard.Unlock() - if err := shDb.CloseAndRemoveFile(ctx); err != nil { + if err := shDB.CloseAndRemoveFile(ctx); err != nil { return false, err } b.commondbManager.CleanResources(path) diff --git a/pkg/local_object_storage/pilorama/inmemory.go b/pkg/local_object_storage/pilorama/inmemory.go index f74d12a1b..28b7faec8 100644 --- a/pkg/local_object_storage/pilorama/inmemory.go +++ b/pkg/local_object_storage/pilorama/inmemory.go @@ -192,7 +192,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node { } var nodes []Node - var lastTs Timestamp + var lastTS Timestamp children := t.getChildren(curNode) for i := range children { @@ -200,7 +200,7 @@ func (t tree) getByPath(attr string, path []string, latest bool) []Node { fileName := string(info.Meta.GetAttr(attr)) if fileName == path[len(path)-1] { if latest { - if info.Meta.Time >= lastTs { + if info.Meta.Time >= lastTS { nodes = append(nodes[:0], children[i]) } } else { From c274bbeb7c0b6957ccc5d3f507b5b5432f91ffd6 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 7 Apr 2025 17:25:59 +0300 Subject: [PATCH 524/591] [#1689] linter: Fix staticcheck warning: 'methods on the same type should have the same receiver name' Change-Id: I25e9432987f73061c1506a184a82065e37885861 Signed-off-by: Alexander Chuprov --- pkg/services/object/get/get.go | 42 +++++++++++++++--------------- pkg/services/object/put/service.go | 4 +-- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go index 9676fd914..3a50308c2 100644 --- a/pkg/services/object/get/get.go +++ b/pkg/services/object/get/get.go @@ -90,48 +90,48 @@ func (s *Service) get(ctx context.Context, prm RequestParameters) error { return exec.err } -func (exec *request) execute(ctx context.Context) { - exec.log.Debug(ctx, logs.ServingRequest) +func (r *request) execute(ctx context.Context) { + r.log.Debug(ctx, logs.ServingRequest) // perform local operation - exec.executeLocal(ctx) + r.executeLocal(ctx) - exec.analyzeStatus(ctx, true) + r.analyzeStatus(ctx, true) } -func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) { +func (r *request) analyzeStatus(ctx context.Context, execCnr bool) { // analyze local result - switch exec.status { + switch r.status { case statusOK: - exec.log.Debug(ctx, logs.OperationFinishedSuccessfully) + r.log.Debug(ctx, logs.OperationFinishedSuccessfully) case statusINHUMED: - exec.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved) + r.log.Debug(ctx, logs.GetRequestedObjectWasMarkedAsRemoved) case statusVIRTUAL: - exec.log.Debug(ctx, logs.GetRequestedObjectIsVirtual) - exec.assemble(ctx) + r.log.Debug(ctx, logs.GetRequestedObjectIsVirtual) + r.assemble(ctx) case statusOutOfRange: - exec.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds) + r.log.Debug(ctx, logs.GetRequestedRangeIsOutOfObjectBounds) case statusEC: - exec.log.Debug(ctx, logs.GetRequestedObjectIsEC) - if exec.isRaw() && execCnr { - exec.executeOnContainer(ctx) - exec.analyzeStatus(ctx, false) + r.log.Debug(ctx, logs.GetRequestedObjectIsEC) + if r.isRaw() && execCnr { + r.executeOnContainer(ctx) + r.analyzeStatus(ctx, false) } - exec.assembleEC(ctx) + r.assembleEC(ctx) default: - exec.log.Debug(ctx, logs.OperationFinishedWithError, - zap.Error(exec.err), + r.log.Debug(ctx, logs.OperationFinishedWithError, + zap.Error(r.err), ) var errAccessDenied *apistatus.ObjectAccessDenied - if execCnr && errors.As(exec.err, &errAccessDenied) { + if execCnr && errors.As(r.err, &errAccessDenied) { // Local get can't return access denied error, so this error was returned by // write to the output stream. So there is no need to try to find object on other nodes. return } if execCnr { - exec.executeOnContainer(ctx) - exec.analyzeStatus(ctx, false) + r.executeOnContainer(ctx) + r.analyzeStatus(ctx, false) } } } diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go index 099486b3f..7aeb5857d 100644 --- a/pkg/services/object/put/service.go +++ b/pkg/services/object/put/service.go @@ -56,8 +56,8 @@ func NewService(ks *objutil.KeyStorage, } } -func (p *Service) Put() (*Streamer, error) { +func (s *Service) Put() (*Streamer, error) { return &Streamer{ - Config: p.Config, + Config: s.Config, }, nil } From 2394ae6ce006acf85e3914fc29c3dd284be9ce3c Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 7 Apr 2025 17:28:27 +0300 Subject: [PATCH 525/591] [#1689] linter: Fix staticcheck warning: 'could lift into loop condition' Change-Id: I4ff3cda54861d857740203d6994872998a22d5d5 Signed-off-by: Alexander Chuprov --- pkg/services/object/get/container.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go index 0ee8aed53..dfb31133c 100644 --- a/pkg/services/object/get/container.go +++ b/pkg/services/object/get/container.go @@ -28,16 +28,7 @@ func (r *request) executeOnContainer(ctx context.Context) { localStatus := r.status - for { - if r.processCurrentEpoch(ctx, localStatus) { - break - } - - // check the maximum depth has been reached - if lookupDepth == 0 { - break - } - + for !r.processCurrentEpoch(ctx, localStatus) && lookupDepth != 0 { lookupDepth-- // go to the previous epoch From dfdeedfc6fe2b557fe71d58960f954f30be2c339 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 7 Apr 2025 17:32:23 +0300 Subject: [PATCH 526/591] [#1689] linter: Fix staticcheck warning: 'could apply De Morgan's law' Change-Id: Ife03172bad7d517dc99771250c3308a9fc0916b3 Signed-off-by: Alexander Chuprov --- cmd/frostfs-cli/modules/object/range.go | 2 +- pkg/morph/event/notary_preparator.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/frostfs-cli/modules/object/range.go b/cmd/frostfs-cli/modules/object/range.go index be4fee4cf..6ec508ae2 100644 --- a/cmd/frostfs-cli/modules/object/range.go +++ b/cmd/frostfs-cli/modules/object/range.go @@ -154,7 +154,7 @@ func printECInfoErr(cmd *cobra.Command, err error) bool { if ok { toJSON, _ := cmd.Flags().GetBool(commonflags.JSON) toProto, _ := cmd.Flags().GetBool("proto") - if !(toJSON || toProto) { + if !toJSON && !toProto { cmd.PrintErrln("Object is erasure-encoded, ec information received.") } printECInfo(cmd, errECInfo.ECInfo()) diff --git a/pkg/morph/event/notary_preparator.go b/pkg/morph/event/notary_preparator.go index 40f5984a9..b11973646 100644 --- a/pkg/morph/event/notary_preparator.go +++ b/pkg/morph/event/notary_preparator.go @@ -199,8 +199,8 @@ func (p Preparator) validateNotaryRequest(nr *payload.P2PNotaryRequest) error { // neo-go API) // // this check prevents notary flow recursion - if !(len(nr.MainTransaction.Scripts[1].InvocationScript) == 0 || - bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript)) { // compatibility with old version + if len(nr.MainTransaction.Scripts[1].InvocationScript) != 0 && + !bytes.Equal(nr.MainTransaction.Scripts[1].InvocationScript, p.dummyInvocationScript) { // compatibility with old version return ErrTXAlreadyHandled } @@ -364,8 +364,8 @@ func (p Preparator) validateWitnesses(w []transaction.Witness, alphaKeys keys.Pu // the last one must be a placeholder for notary contract witness last := len(w) - 1 - if !(len(w[last].InvocationScript) == 0 || // https://github.com/nspcc-dev/neo-go/pull/2981 - bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version + if (len(w[last].InvocationScript) != 0 && // https://github.com/nspcc-dev/neo-go/pull/2981 + !bytes.Equal(w[last].InvocationScript, p.dummyInvocationScript)) || // compatibility with old version len(w[last].VerificationScript) != 0 { return errIncorrectNotaryPlaceholder } From 4f9d237042233c8ae155ae3d2de4ea3911f8ce49 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Mon, 7 Apr 2025 17:36:54 +0300 Subject: [PATCH 527/591] [#1689] linter: Fix staticcheck warning: 'probably want to use time.Time.Equal instead' Change-Id: Idb119d3f4f167c9e42ed48633d301185589553ed Signed-off-by: Alexander Chuprov --- pkg/local_object_storage/engine/evacuate_limiter.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pkg/local_object_storage/engine/evacuate_limiter.go b/pkg/local_object_storage/engine/evacuate_limiter.go index c74134500..3dd7494be 100644 --- a/pkg/local_object_storage/engine/evacuate_limiter.go +++ b/pkg/local_object_storage/engine/evacuate_limiter.go @@ -95,8 +95,7 @@ func (s *EvacuationState) StartedAt() *time.Time { if s == nil { return nil } - defaultTime := time.Time{} - if s.startedAt == defaultTime { + if s.startedAt.IsZero() { return nil } return &s.startedAt @@ -106,8 +105,7 @@ func (s *EvacuationState) FinishedAt() *time.Time { if s == nil { return nil } - defaultTime := time.Time{} - if s.finishedAt == defaultTime { + if s.finishedAt.IsZero() { return nil } return &s.finishedAt From fe29ed043a7d5e6e27256cf52490e1ec7ceedb40 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Tue, 8 Apr 2025 15:22:00 +0300 Subject: [PATCH 528/591] [#1689] linter: Fix staticcheck warning: 'could use tagged switch on *' Change-Id: Ia340ce1ccdd223eb87f7aefabfba62b7055f344d Signed-off-by: Alexander Chuprov --- pkg/services/object/ape/checker.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go index ee46a6fe4..b96757def 100644 --- a/pkg/services/object/ape/checker.go +++ b/pkg/services/object/ape/checker.go @@ -76,9 +76,10 @@ var errMissingOID = errors.New("object ID is not set") // CheckAPE prepares an APE-request and checks if it is permitted by policies. func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error { // APE check is ignored for some inter-node requests. - if prm.Role == nativeschema.PropertyValueContainerRoleContainer { + switch prm.Role { + case nativeschema.PropertyValueContainerRoleContainer: return nil - } else if prm.Role == nativeschema.PropertyValueContainerRoleIR { + case nativeschema.PropertyValueContainerRoleIR: switch prm.Method { case nativeschema.MethodGetObject, nativeschema.MethodHeadObject, From aed84b567caac5b2825888621ca170ee9dae0974 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Tue, 8 Apr 2025 15:23:51 +0300 Subject: [PATCH 529/591] [#1689] linter: Bump 'golangci-lint' to v2.0.2 Change-Id: Ib546af43845014785f0debce429a37d62e616539 Signed-off-by: Alexander Chuprov --- .golangci.yml | 179 ++++++++++++++++++++++++++------------------------ Makefile | 4 +- 2 files changed, 94 insertions(+), 89 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 18de49425..3ac4eb651 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,98 +1,103 @@ -# This file contains all available configuration options -# with their default values. - -# options for analysis running +version: "2" run: - # timeout for analysis, e.g. 30s, 5m, default is 1m - timeout: 20m - - # include test files or not, default is true tests: false - -# output configuration options output: - # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" formats: - - format: tab - -# all available settings of specific linters -linters-settings: - exhaustive: - # indicates that switch statements are to be considered exhaustive if a - # 'default' case is present, even if all enum members aren't listed in the - # switch - default-signifies-exhaustive: true - gci: - sections: - - standard - - default - custom-order: true - staticcheck: - checks: ["all", "-SA1019"] # TODO Enable SA1019 after deprecated warning are fixed. - funlen: - lines: 80 # default 60 - statements: 60 # default 40 - gocognit: - min-complexity: 40 # default 30 - importas: - no-unaliased: true - no-extra-aliases: false - alias: - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object - alias: objectSDK - unused: - field-writes-are-uses: false - exported-fields-are-used: false - local-variables-are-used: false - custom: - truecloudlab-linters: - path: bin/linters/external_linters.so - original-url: git.frostfs.info/TrueCloudLab/linters.git - settings: - noliteral: - target-methods : ["reportFlushError", "reportError"] - disable-packages: ["codes", "err", "res","exec"] - constants-package: "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" - + tab: + path: stdout + colors: false linters: + default: none enable: - # mandatory linters - - revive - - predeclared - - # some default golangci-lint linters - - errcheck - - gosimple - - godot - - ineffassign - - staticcheck - - typecheck - - unused - - # extra linters - bidichk - - durationcheck - - exhaustive + - containedctx + - contextcheck - copyloopvar + - durationcheck + - errcheck + - exhaustive + - funlen + - gocognit + - godot + - importas + - ineffassign + - intrange + - misspell + - perfsprint + - predeclared + - protogetter + - reassign + - revive + - staticcheck + - testifylint + - truecloudlab-linters + - unconvert + - unparam + - unused + - usetesting + - whitespace + settings: + exhaustive: + default-signifies-exhaustive: true + funlen: + lines: 80 + statements: 60 + gocognit: + min-complexity: 40 + importas: + alias: + - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object + alias: objectSDK + no-unaliased: true + no-extra-aliases: false + staticcheck: + checks: + - all + - -QF1002 + unused: + field-writes-are-uses: false + exported-fields-are-used: false + local-variables-are-used: false + custom: + truecloudlab-linters: + path: bin/linters/external_linters.so + original-url: git.frostfs.info/TrueCloudLab/linters.git + settings: + noliteral: + constants-package: git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs + disable-packages: + - codes + - err + - res + - exec + target-methods: + - reportFlushError + - reportError + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: - gci - gofmt - goimports - - misspell - - predeclared - - reassign - - whitespace - - containedctx - - funlen - - gocognit - - contextcheck - - importas - - truecloudlab-linters - - perfsprint - - testifylint - - protogetter - - intrange - - unconvert - - unparam - - usetesting - disable-all: true - fast: false + settings: + gci: + sections: + - standard + - default + custom-order: true + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/Makefile b/Makefile index 5b55c9eec..575eaae6f 100755 --- a/Makefile +++ b/Makefile @@ -9,7 +9,7 @@ HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" GO_VERSION ?= 1.23 -LINT_VERSION ?= 1.64.8 +LINT_VERSION ?= 2.0.2 TRUECLOUDLAB_LINT_VERSION ?= 0.0.10 PROTOC_VERSION ?= 25.0 PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-sdk-go) @@ -224,7 +224,7 @@ lint-install: $(BIN) @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) @rm -rf $(TMP_DIR)/linters @rmdir $(TMP_DIR) 2>/dev/null || true - @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) + @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v$(LINT_VERSION) # Run linters lint: From f93b96c60114440128782ca9d0a228a80a501ae1 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 9 Apr 2025 12:35:34 +0300 Subject: [PATCH 530/591] [#1712] adm: Add `maintenance zombie` commands Change-Id: I1b73e561a8daad67d0a8ffc0d293cbdd09aaab6b Signed-off-by: Dmitrii Stepanov --- .../internal/modules/maintenance/root.go | 15 + .../modules/maintenance/zombie/key.go | 70 +++++ .../modules/maintenance/zombie/list.go | 31 ++ .../modules/maintenance/zombie/morph.go | 46 +++ .../modules/maintenance/zombie/quarantine.go | 154 ++++++++++ .../modules/maintenance/zombie/remove.go | 55 ++++ .../modules/maintenance/zombie/restore.go | 69 +++++ .../modules/maintenance/zombie/root.go | 125 ++++++++ .../modules/maintenance/zombie/scan.go | 281 ++++++++++++++++++ .../maintenance/zombie/storage_engine.go | 203 +++++++++++++ cmd/frostfs-adm/internal/modules/root.go | 2 + 11 files changed, 1051 insertions(+) create mode 100644 cmd/frostfs-adm/internal/modules/maintenance/root.go create mode 100644 cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go create mode 100644 cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go create mode 100644 cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go create mode 100644 cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go create mode 100644 cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go create mode 100644 cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go create mode 100644 cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go create mode 100644 cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go create mode 100644 cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go diff --git a/cmd/frostfs-adm/internal/modules/maintenance/root.go b/cmd/frostfs-adm/internal/modules/maintenance/root.go new file mode 100644 index 000000000..d67b70d2a --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/root.go @@ -0,0 +1,15 @@ +package maintenance + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie" + "github.com/spf13/cobra" +) + +var RootCmd = &cobra.Command{ + Use: "maintenance", + Short: "Section for maintenance commands", +} + +func init() { + RootCmd.AddCommand(zombie.Cmd) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go new file mode 100644 index 000000000..1b66889aa --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/key.go @@ -0,0 +1,70 @@ +package zombie + +import ( + "crypto/ecdsa" + "fmt" + "os" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "github.com/nspcc-dev/neo-go/cli/flags" + "github.com/nspcc-dev/neo-go/cli/input" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/wallet" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +func getPrivateKey(cmd *cobra.Command, appCfg *config.Config) *ecdsa.PrivateKey { + keyDesc := viper.GetString(walletFlag) + if keyDesc == "" { + return &nodeconfig.Key(appCfg).PrivateKey + } + data, err := os.ReadFile(keyDesc) + commonCmd.ExitOnErr(cmd, "open wallet file: %w", err) + + priv, err := keys.NewPrivateKeyFromBytes(data) + if err != nil { + w, err := wallet.NewWalletFromFile(keyDesc) + commonCmd.ExitOnErr(cmd, "provided key is incorrect, only wallet or binary key supported: %w", err) + return fromWallet(cmd, w, viper.GetString(addressFlag)) + } + return &priv.PrivateKey +} + +func fromWallet(cmd *cobra.Command, w *wallet.Wallet, addrStr string) *ecdsa.PrivateKey { + var ( + addr util.Uint160 + err error + ) + + if addrStr == "" { + addr = w.GetChangeAddress() + } else { + addr, err = flags.ParseAddress(addrStr) + commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", err) + } + + acc := w.GetAccount(addr) + if acc == nil { + commonCmd.ExitOnErr(cmd, "--address option must be specified and valid: %w", fmt.Errorf("can't find wallet account for %s", addrStr)) + } + + pass, err := getPassword() + commonCmd.ExitOnErr(cmd, "invalid password for the encrypted key: %w", err) + + commonCmd.ExitOnErr(cmd, "can't decrypt account: %w", acc.Decrypt(pass, keys.NEP2ScryptParams())) + + return &acc.PrivateKey().PrivateKey +} + +func getPassword() (string, error) { + // this check allows empty passwords + if viper.IsSet("password") { + return viper.GetString("password"), nil + } + + return input.ReadPassword("Enter password > ") +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go new file mode 100644 index 000000000..f73f33db9 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/list.go @@ -0,0 +1,31 @@ +package zombie + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +func list(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + storageEngine := newEngine(cmd, appCfg) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + var containerID *cid.ID + if cidStr, _ := cmd.Flags().GetString(cidFlag); cidStr != "" { + containerID = &cid.ID{} + commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) + } + + commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(a oid.Address) error { + if containerID != nil && a.Container() != *containerID { + return nil + } + cmd.Println(a.EncodeToString()) + return nil + })) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go new file mode 100644 index 000000000..cd3a64499 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/morph.go @@ -0,0 +1,46 @@ +package zombie + +import ( + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph" + nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + netmapClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "github.com/spf13/cobra" +) + +func createMorphClient(cmd *cobra.Command, appCfg *config.Config) *client.Client { + addresses := morphconfig.RPCEndpoint(appCfg) + if len(addresses) == 0 { + commonCmd.ExitOnErr(cmd, "create morph client: %w", errors.New("no morph endpoints found")) + } + key := nodeconfig.Key(appCfg) + cli, err := client.New(cmd.Context(), + key, + client.WithDialTimeout(morphconfig.DialTimeout(appCfg)), + client.WithEndpoints(addresses...), + client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)), + ) + commonCmd.ExitOnErr(cmd, "create morph client: %w", err) + return cli +} + +func createContainerClient(cmd *cobra.Command, morph *client.Client) *cntClient.Client { + hs, err := morph.NNSContractAddress(client.NNSContainerContractName) + commonCmd.ExitOnErr(cmd, "resolve container contract hash: %w", err) + cc, err := cntClient.NewFromMorph(morph, hs, 0) + commonCmd.ExitOnErr(cmd, "create morph container client: %w", err) + return cc +} + +func createNetmapClient(cmd *cobra.Command, morph *client.Client) *netmapClient.Client { + hs, err := morph.NNSContractAddress(client.NNSNetmapContractName) + commonCmd.ExitOnErr(cmd, "resolve netmap contract hash: %w", err) + cli, err := netmapClient.NewFromMorph(morph, hs, 0) + commonCmd.ExitOnErr(cmd, "create morph netmap client: %w", err) + return cli +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go new file mode 100644 index 000000000..27f83aec7 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/quarantine.go @@ -0,0 +1,154 @@ +package zombie + +import ( + "context" + "fmt" + "math" + "os" + "path/filepath" + "strings" + "sync" + + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +type quarantine struct { + // mtx protects current field. + mtx sync.Mutex + current int + trees []*fstree.FSTree +} + +func createQuarantine(cmd *cobra.Command, engineInfo engine.Info) *quarantine { + var paths []string + for _, sh := range engineInfo.Shards { + var storagePaths []string + for _, st := range sh.BlobStorInfo.SubStorages { + storagePaths = append(storagePaths, st.Path) + } + if len(storagePaths) == 0 { + continue + } + paths = append(paths, filepath.Join(commonPath(storagePaths), "quarantine")) + } + q, err := newQuarantine(paths) + commonCmd.ExitOnErr(cmd, "create quarantine: %w", err) + return q +} + +func commonPath(paths []string) string { + if len(paths) == 0 { + return "" + } + if len(paths) == 1 { + return paths[0] + } + minLen := math.MaxInt + for _, p := range paths { + if len(p) < minLen { + minLen = len(p) + } + } + + var sb strings.Builder + for i := range minLen { + for _, path := range paths[1:] { + if paths[0][i] != path[i] { + return sb.String() + } + } + sb.WriteByte(paths[0][i]) + } + return sb.String() +} + +func newQuarantine(paths []string) (*quarantine, error) { + var q quarantine + for i := range paths { + f := fstree.New( + fstree.WithDepth(1), + fstree.WithDirNameLen(1), + fstree.WithPath(paths[i]), + fstree.WithPerm(os.ModePerm), + ) + if err := f.Open(mode.ComponentReadWrite); err != nil { + return nil, fmt.Errorf("open fstree %s: %w", paths[i], err) + } + if err := f.Init(); err != nil { + return nil, fmt.Errorf("init fstree %s: %w", paths[i], err) + } + q.trees = append(q.trees, f) + } + return &q, nil +} + +func (q *quarantine) Get(ctx context.Context, a oid.Address) (*objectSDK.Object, error) { + for i := range q.trees { + res, err := q.trees[i].Get(ctx, common.GetPrm{Address: a}) + if err != nil { + continue + } + return res.Object, nil + } + return nil, &apistatus.ObjectNotFound{} +} + +func (q *quarantine) Delete(ctx context.Context, a oid.Address) error { + for i := range q.trees { + _, err := q.trees[i].Delete(ctx, common.DeletePrm{Address: a}) + if err != nil { + continue + } + return nil + } + return &apistatus.ObjectNotFound{} +} + +func (q *quarantine) Put(ctx context.Context, obj *objectSDK.Object) error { + data, err := obj.Marshal() + if err != nil { + return err + } + + var prm common.PutPrm + prm.Address = objectcore.AddressOf(obj) + prm.Object = obj + prm.RawData = data + + q.mtx.Lock() + current := q.current + q.current = (q.current + 1) % len(q.trees) + q.mtx.Unlock() + + _, err = q.trees[current].Put(ctx, prm) + return err +} + +func (q *quarantine) Iterate(ctx context.Context, f func(oid.Address) error) error { + var prm common.IteratePrm + prm.Handler = func(elem common.IterationElement) error { + return f(elem.Address) + } + for i := range q.trees { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + _, err := q.trees[i].Iterate(ctx, prm) + if err != nil { + return err + } + } + return nil +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go new file mode 100644 index 000000000..0b8f2f172 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/remove.go @@ -0,0 +1,55 @@ +package zombie + +import ( + "errors" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +func remove(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + storageEngine := newEngine(cmd, appCfg) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + + var containerID cid.ID + cidStr, _ := cmd.Flags().GetString(cidFlag) + commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) + + var objectID *oid.ID + oidStr, _ := cmd.Flags().GetString(oidFlag) + if oidStr != "" { + objectID = &oid.ID{} + commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) + } + + if objectID != nil { + var addr oid.Address + addr.SetContainer(containerID) + addr.SetObject(*objectID) + removeObject(cmd, q, addr) + } else { + commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { + if addr.Container() != containerID { + return nil + } + removeObject(cmd, q, addr) + return nil + })) + } +} + +func removeObject(cmd *cobra.Command, q *quarantine, addr oid.Address) { + err := q.Delete(cmd.Context(), addr) + if errors.Is(err, new(apistatus.ObjectNotFound)) { + return + } + commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", err) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go new file mode 100644 index 000000000..f179c7c2d --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/restore.go @@ -0,0 +1,69 @@ +package zombie + +import ( + "crypto/sha256" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" +) + +func restore(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + storageEngine := newEngine(cmd, appCfg) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + morphClient := createMorphClient(cmd, appCfg) + cnrCli := createContainerClient(cmd, morphClient) + + var containerID cid.ID + cidStr, _ := cmd.Flags().GetString(cidFlag) + commonCmd.ExitOnErr(cmd, "decode container ID string: %w", containerID.DecodeString(cidStr)) + + var objectID *oid.ID + oidStr, _ := cmd.Flags().GetString(oidFlag) + if oidStr != "" { + objectID = &oid.ID{} + commonCmd.ExitOnErr(cmd, "decode object ID string: %w", objectID.DecodeString(oidStr)) + } + + if objectID != nil { + var addr oid.Address + addr.SetContainer(containerID) + addr.SetObject(*objectID) + restoreObject(cmd, storageEngine, q, addr, cnrCli) + } else { + commonCmd.ExitOnErr(cmd, "iterate over quarantine: %w", q.Iterate(cmd.Context(), func(addr oid.Address) error { + if addr.Container() != containerID { + return nil + } + restoreObject(cmd, storageEngine, q, addr, cnrCli) + return nil + })) + } +} + +func restoreObject(cmd *cobra.Command, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address, cnrCli *cntClient.Client) { + obj, err := q.Get(cmd.Context(), addr) + commonCmd.ExitOnErr(cmd, "get object from quarantine: %w", err) + rawCID := make([]byte, sha256.Size) + + cid := addr.Container() + cid.Encode(rawCID) + cnr, err := cnrCli.Get(cmd.Context(), rawCID) + commonCmd.ExitOnErr(cmd, "get container: %w", err) + + putPrm := engine.PutPrm{ + Object: obj, + IsIndexedContainer: containerCore.IsIndexedContainer(cnr.Value), + } + commonCmd.ExitOnErr(cmd, "put object to storage engine: %w", storageEngine.Put(cmd.Context(), putPrm)) + commonCmd.ExitOnErr(cmd, "remove object from quarantine: %w", q.Delete(cmd.Context(), addr)) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go new file mode 100644 index 000000000..9ef18f7f8 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go @@ -0,0 +1,125 @@ +package zombie + +import ( + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +const ( + flagBatchSize = "batch-size" + flagBatchSizeUsage = "Objects iteration batch size" + cidFlag = "cid" + cidFlagUsage = "Container ID" + oidFlag = "oid" + oidFlagUsage = "Object ID" + walletFlag = "wallet" + walletFlagShorthand = "w" + walletFlagUsage = "Path to the wallet or binary key" + addressFlag = "address" + addressFlagUsage = "Address of wallet account" + moveFlag = "move" + moveFlagUsage = "Move objects from storage engine to quarantine" +) + +var ( + Cmd = &cobra.Command{ + Use: "zombie", + Short: "Zombie objects related commands", + } + scanCmd = &cobra.Command{ + Use: "scan", + Short: "Scan storage engine for zombie objects and move them to quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag)) + _ = viper.BindPFlag(addressFlag, cmd.Flags().Lookup(addressFlag)) + _ = viper.BindPFlag(flagBatchSize, cmd.Flags().Lookup(flagBatchSize)) + _ = viper.BindPFlag(moveFlag, cmd.Flags().Lookup(moveFlag)) + }, + Run: scan, + } + listCmd = &cobra.Command{ + Use: "list", + Short: "List zombie objects from quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) + }, + Run: list, + } + restoreCmd = &cobra.Command{ + Use: "restore", + Short: "Restore zombie objects from quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) + _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) + }, + Run: restore, + } + removeCmd = &cobra.Command{ + Use: "remove", + Short: "Remove zombie objects from quarantine", + Long: "", + PreRun: func(cmd *cobra.Command, _ []string) { + _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) + _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) + _ = viper.BindPFlag(cidFlag, cmd.Flags().Lookup(cidFlag)) + _ = viper.BindPFlag(oidFlag, cmd.Flags().Lookup(oidFlag)) + }, + Run: remove, + } +) + +func init() { + initScanCmd() + initListCmd() + initRestoreCmd() + initRemoveCmd() +} + +func initScanCmd() { + Cmd.AddCommand(scanCmd) + + scanCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) + scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage) + scanCmd.Flags().StringP(walletFlag, walletFlagShorthand, "", walletFlagUsage) + scanCmd.Flags().String(addressFlag, "", addressFlagUsage) + scanCmd.Flags().Bool(moveFlag, false, moveFlagUsage) +} + +func initListCmd() { + Cmd.AddCommand(listCmd) + + listCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + listCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + listCmd.Flags().String(cidFlag, "", cidFlagUsage) +} + +func initRestoreCmd() { + Cmd.AddCommand(restoreCmd) + + restoreCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + restoreCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + restoreCmd.Flags().String(cidFlag, "", cidFlagUsage) + restoreCmd.Flags().String(oidFlag, "", oidFlagUsage) +} + +func initRemoveCmd() { + Cmd.AddCommand(removeCmd) + + removeCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) + removeCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) + removeCmd.Flags().String(cidFlag, "", cidFlagUsage) + removeCmd.Flags().String(oidFlag, "", oidFlagUsage) +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go new file mode 100644 index 000000000..268ec4911 --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/scan.go @@ -0,0 +1,281 @@ +package zombie + +import ( + "context" + "crypto/ecdsa" + "crypto/sha256" + "errors" + "fmt" + "sync" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + apiclientconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/apiclient" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + clientCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" + netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + cntClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network/cache" + clientSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" +) + +func scan(cmd *cobra.Command, _ []string) { + configFile, _ := cmd.Flags().GetString(commonflags.ConfigFlag) + configDir, _ := cmd.Flags().GetString(commonflags.ConfigDirFlag) + appCfg := config.New(configFile, configDir, config.EnvPrefix) + batchSize, _ := cmd.Flags().GetUint32(flagBatchSize) + if batchSize == 0 { + commonCmd.ExitOnErr(cmd, "invalid batch size: %w", errors.New("batch size must be positive value")) + } + move, _ := cmd.Flags().GetBool(moveFlag) + + storageEngine := newEngine(cmd, appCfg) + morphClient := createMorphClient(cmd, appCfg) + cnrCli := createContainerClient(cmd, morphClient) + nmCli := createNetmapClient(cmd, morphClient) + q := createQuarantine(cmd, storageEngine.DumpInfo()) + pk := getPrivateKey(cmd, appCfg) + + epoch, err := nmCli.Epoch(cmd.Context()) + commonCmd.ExitOnErr(cmd, "read epoch from morph: %w", err) + + nm, err := nmCli.GetNetMapByEpoch(cmd.Context(), epoch) + commonCmd.ExitOnErr(cmd, "read netmap from morph: %w", err) + + cmd.Printf("Epoch: %d\n", nm.Epoch()) + cmd.Printf("Nodes in the netmap: %d\n", len(nm.Nodes())) + + ps := &processStatus{ + statusCount: make(map[status]uint64), + } + + stopCh := make(chan struct{}) + start := time.Now() + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + tick := time.NewTicker(time.Second) + defer tick.Stop() + for { + select { + case <-cmd.Context().Done(): + return + case <-stopCh: + return + case <-tick.C: + fmt.Printf("Objects processed: %d; Time elapsed: %s\n", ps.total(), time.Since(start)) + } + } + }() + go func() { + defer wg.Done() + err = scanStorageEngine(cmd, batchSize, storageEngine, ps, appCfg, cnrCli, nmCli, q, pk, move) + close(stopCh) + }() + wg.Wait() + commonCmd.ExitOnErr(cmd, "scan storage engine for zombie objects: %w", err) + + cmd.Println() + cmd.Println("Status description:") + cmd.Println("undefined -- nothing is clear") + cmd.Println("found -- object is found in cluster") + cmd.Println("quarantine -- object is not found in cluster") + cmd.Println() + for status, count := range ps.statusCount { + cmd.Printf("Status: %s, Count: %d\n", status, count) + } +} + +type status string + +const ( + statusUndefined status = "undefined" + statusFound status = "found" + statusQuarantine status = "quarantine" +) + +func checkAddr(ctx context.Context, cnrCli *cntClient.Client, nmCli *netmap.Client, cc *cache.ClientCache, obj object.Info) (status, error) { + rawCID := make([]byte, sha256.Size) + cid := obj.Address.Container() + cid.Encode(rawCID) + + cnr, err := cnrCli.Get(ctx, rawCID) + if err != nil { + var errContainerNotFound *apistatus.ContainerNotFound + if errors.As(err, &errContainerNotFound) { + // Policer will deal with this object. + return statusFound, nil + } + return statusUndefined, fmt.Errorf("read container %s from morph: %w", cid, err) + } + nm, err := nmCli.NetMap(ctx) + if err != nil { + return statusUndefined, fmt.Errorf("read netmap from morph: %w", err) + } + + nodes, err := nm.ContainerNodes(cnr.Value.PlacementPolicy(), rawCID) + if err != nil { + // Not enough nodes, check all netmap nodes. + nodes = append([][]netmap.NodeInfo{}, nm.Nodes()) + } + + objID := obj.Address.Object() + cnrID := obj.Address.Container() + local := true + raw := false + if obj.ECInfo != nil { + objID = obj.ECInfo.ParentID + local = false + raw = true + } + prm := clientSDK.PrmObjectHead{ + ObjectID: &objID, + ContainerID: &cnrID, + Local: local, + Raw: raw, + } + + var ni clientCore.NodeInfo + for i := range nodes { + for j := range nodes[i] { + if err := clientCore.NodeInfoFromRawNetmapElement(&ni, netmapCore.Node(nodes[i][j])); err != nil { + return statusUndefined, fmt.Errorf("parse node info: %w", err) + } + c, err := cc.Get(ni) + if err != nil { + continue + } + res, err := c.ObjectHead(ctx, prm) + if err != nil { + var errECInfo *objectSDK.ECInfoError + if raw && errors.As(err, &errECInfo) { + return statusFound, nil + } + continue + } + if err := apistatus.ErrFromStatus(res.Status()); err != nil { + continue + } + return statusFound, nil + } + } + + if cnr.Value.PlacementPolicy().NumberOfReplicas() == 1 && cnr.Value.PlacementPolicy().ReplicaDescriptor(0).NumberOfObjects() == 1 { + return statusFound, nil + } + return statusQuarantine, nil +} + +func scanStorageEngine(cmd *cobra.Command, batchSize uint32, storageEngine *engine.StorageEngine, ps *processStatus, + appCfg *config.Config, cnrCli *cntClient.Client, nmCli *netmap.Client, q *quarantine, pk *ecdsa.PrivateKey, move bool, +) error { + cc := cache.NewSDKClientCache(cache.ClientCacheOpts{ + DialTimeout: apiclientconfig.DialTimeout(appCfg), + StreamTimeout: apiclientconfig.StreamTimeout(appCfg), + ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg), + Key: pk, + AllowExternal: apiclientconfig.AllowExternal(appCfg), + }) + ctx := cmd.Context() + + var cursor *engine.Cursor + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var prm engine.ListWithCursorPrm + prm.WithCursor(cursor) + prm.WithCount(batchSize) + + res, err := storageEngine.ListWithCursor(ctx, prm) + if err != nil { + if errors.Is(err, engine.ErrEndOfListing) { + return nil + } + return fmt.Errorf("list with cursor: %w", err) + } + + cursor = res.Cursor() + addrList := res.AddressList() + eg, egCtx := errgroup.WithContext(ctx) + eg.SetLimit(int(batchSize)) + + for i := range addrList { + addr := addrList[i] + eg.Go(func() error { + result, err := checkAddr(egCtx, cnrCli, nmCli, cc, addr) + if err != nil { + return fmt.Errorf("check object %s status: %w", addr.Address, err) + } + ps.add(result) + + if !move && result == statusQuarantine { + cmd.Println(addr) + return nil + } + + if result == statusQuarantine { + return moveToQuarantine(egCtx, storageEngine, q, addr.Address) + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return fmt.Errorf("process objects batch: %w", err) + } + } +} + +func moveToQuarantine(ctx context.Context, storageEngine *engine.StorageEngine, q *quarantine, addr oid.Address) error { + var getPrm engine.GetPrm + getPrm.WithAddress(addr) + res, err := storageEngine.Get(ctx, getPrm) + if err != nil { + return fmt.Errorf("get object %s from storage engine: %w", addr, err) + } + + if err := q.Put(ctx, res.Object()); err != nil { + return fmt.Errorf("put object %s to quarantine: %w", addr, err) + } + + var delPrm engine.DeletePrm + delPrm.WithForceRemoval() + delPrm.WithAddress(addr) + + if err = storageEngine.Delete(ctx, delPrm); err != nil { + return fmt.Errorf("delete object %s from storage engine: %w", addr, err) + } + return nil +} + +type processStatus struct { + guard sync.RWMutex + statusCount map[status]uint64 + count uint64 +} + +func (s *processStatus) add(st status) { + s.guard.Lock() + defer s.guard.Unlock() + s.statusCount[st]++ + s.count++ +} + +func (s *processStatus) total() uint64 { + s.guard.RLock() + defer s.guard.RUnlock() + return s.count +} diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go new file mode 100644 index 000000000..5851e049c --- /dev/null +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go @@ -0,0 +1,203 @@ +package zombie + +import ( + "context" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine" + shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard" + blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" + fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" + commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" + meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" + "github.com/panjf2000/ants/v2" + "github.com/spf13/cobra" + "go.etcd.io/bbolt" + "go.uber.org/zap" +) + +func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine { + ngOpts := storageEngineOptions(c) + shardOpts := shardOptions(cmd, c) + e := engine.New(ngOpts...) + for _, opts := range shardOpts { + _, err := e.AddShard(cmd.Context(), opts...) + commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) + } + commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context())) + commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context())) + return e +} + +func storageEngineOptions(c *config.Config) []engine.Option { + return []engine.Option{ + engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)), + engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)), + } +} + +func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option { + var result [][]shard.Option + err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error { + result = append(result, getShardOpts(cmd, c, sh)) + return nil + }) + commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err) + return result +} + +func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option { + wc, wcEnabled := getWriteCacheOpts(sh) + return []shard.Option{ + shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + shard.WithRefillMetabase(sh.RefillMetabase()), + shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()), + shard.WithMode(sh.Mode()), + shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...), + shard.WithMetaBaseOptions(getMetabaseOpts(sh)...), + shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...), + shard.WithWriteCache(wcEnabled), + shard.WithWriteCacheOptions(wc), + shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()), + shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()), + shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()), + shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()), + shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { + pool, err := ants.NewPool(sz) + commonCmd.ExitOnErr(cmd, "init GC pool: %w", err) + return pool + }), + shard.WithLimiter(qos.NewNoopLimiter()), + } +} + +func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) { + if wc := sh.WriteCache(); wc != nil && wc.Enabled() { + var result []writecache.Option + result = append(result, + writecache.WithPath(wc.Path()), + writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()), + writecache.WithMaxObjectSize(wc.MaxObjectSize()), + writecache.WithFlushWorkersCount(wc.WorkerCount()), + writecache.WithMaxCacheSize(wc.SizeLimit()), + writecache.WithMaxCacheCount(wc.CountLimit()), + writecache.WithNoSync(wc.NoSync()), + writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + writecache.WithQoSLimiter(qos.NewNoopLimiter()), + ) + return result, true + } + return nil, false +} + +func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option { + var piloramaOpts []pilorama.Option + if config.BoolSafe(c.Sub("tree"), "enabled") { + pr := sh.Pilorama() + piloramaOpts = append(piloramaOpts, + pilorama.WithPath(pr.Path()), + pilorama.WithPerm(pr.Perm()), + pilorama.WithNoSync(pr.NoSync()), + pilorama.WithMaxBatchSize(pr.MaxBatchSize()), + pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()), + ) + } + return piloramaOpts +} + +func getMetabaseOpts(sh *shardconfig.Config) []meta.Option { + return []meta.Option{ + meta.WithPath(sh.Metabase().Path()), + meta.WithPermissions(sh.Metabase().BoltDB().Perm()), + meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()), + meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()), + meta.WithBoltDBOptions(&bbolt.Options{ + Timeout: 100 * time.Millisecond, + }), + meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + meta.WithEpochState(&epochState{}), + } +} + +func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option { + result := []blobstor.Option{ + blobstor.WithCompressObjects(sh.Compress()), + blobstor.WithUncompressableContentTypes(sh.UncompressableContentTypes()), + blobstor.WithCompressibilityEstimate(sh.EstimateCompressibility()), + blobstor.WithCompressibilityEstimateThreshold(sh.EstimateCompressibilityThreshold()), + blobstor.WithStorages(getSubStorages(ctx, sh)), + blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + } + + return result +} + +func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage { + var ss []blobstor.SubStorage + for _, storage := range sh.BlobStor().Storages() { + switch storage.Type() { + case blobovniczatree.Type: + sub := blobovniczaconfig.From((*config.Config)(storage)) + blobTreeOpts := []blobovniczatree.Option{ + blobovniczatree.WithRootPath(storage.Path()), + blobovniczatree.WithPermissions(storage.Perm()), + blobovniczatree.WithBlobovniczaSize(sub.Size()), + blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()), + blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()), + blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()), + blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()), + blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()), + blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()), + blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()), + blobovniczatree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()), + } + + ss = append(ss, blobstor.SubStorage{ + Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...), + Policy: func(_ *objectSDK.Object, data []byte) bool { + return uint64(len(data)) < sh.SmallSizeLimit() + }, + }) + case fstree.Type: + sub := fstreeconfig.From((*config.Config)(storage)) + fstreeOpts := []fstree.Option{ + fstree.WithPath(storage.Path()), + fstree.WithPerm(storage.Perm()), + fstree.WithDepth(sub.Depth()), + fstree.WithNoSync(sub.NoSync()), + fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + } + + ss = append(ss, blobstor.SubStorage{ + Storage: fstree.New(fstreeOpts...), + Policy: func(_ *objectSDK.Object, _ []byte) bool { + return true + }, + }) + default: + // should never happen, that has already + // been handled: when the config was read + } + } + return ss +} + +type epochState struct{} + +func (epochState) CurrentEpoch() uint64 { + return 0 +} diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go index e42204b7a..cc8225c7a 100644 --- a/cmd/frostfs-adm/internal/modules/root.go +++ b/cmd/frostfs-adm/internal/modules/root.go @@ -5,6 +5,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/maintenance" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph" "git.frostfs.info/TrueCloudLab/frostfs-node/misc" @@ -41,6 +42,7 @@ func init() { rootCmd.AddCommand(config.RootCmd) rootCmd.AddCommand(morph.RootCmd) rootCmd.AddCommand(metabase.RootCmd) + rootCmd.AddCommand(maintenance.RootCmd) rootCmd.AddCommand(autocomplete.Command("frostfs-adm")) rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{})) From f7779adf71babe6c06a91172e3ef95c947c1f1a7 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 9 Apr 2025 15:32:23 +0300 Subject: [PATCH 531/591] [#1712] core: Extend object info string with EC header Closes #1712 Change-Id: Ief4a960f7dece3359763113270d1ff5155f3f19e Signed-off-by: Dmitrii Stepanov --- pkg/core/object/info.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/core/object/info.go b/pkg/core/object/info.go index 67c9a3188..aab12ebf9 100644 --- a/pkg/core/object/info.go +++ b/pkg/core/object/info.go @@ -13,6 +13,13 @@ type ECInfo struct { Total uint32 } +func (v *ECInfo) String() string { + if v == nil { + return "" + } + return fmt.Sprintf("parent ID: %s, index: %d, total %d", v.ParentID, v.Index, v.Total) +} + // Info groups object address with its FrostFS // object info. type Info struct { @@ -23,5 +30,5 @@ type Info struct { } func (v Info) String() string { - return fmt.Sprintf("address: %s, type: %s, is linking: %t", v.Address, v.Type, v.IsLinkingObject) + return fmt.Sprintf("address: %s, type: %s, is linking: %t, EC header: %s", v.Address, v.Type, v.IsLinkingObject, v.ECInfo) } From 6730e27ae71a97af5ca33392ce6ab5e9e3257173 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 9 Apr 2025 16:29:48 +0300 Subject: [PATCH 532/591] [#1712] adm: Drop rpc-endpoint flag from `zombie scan` Morph addresses from config are used. Change-Id: Id99f91defbbff442c308f30d219b9824b4c871de Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go index 9ef18f7f8..c8fd9e5e5 100644 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/root.go @@ -32,7 +32,6 @@ var ( Short: "Scan storage engine for zombie objects and move them to quarantine", Long: "", PreRun: func(cmd *cobra.Command, _ []string) { - _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag)) _ = viper.BindPFlag(commonflags.ConfigFlag, cmd.Flags().Lookup(commonflags.ConfigFlag)) _ = viper.BindPFlag(commonflags.ConfigDirFlag, cmd.Flags().Lookup(commonflags.ConfigDirFlag)) _ = viper.BindPFlag(walletFlag, cmd.Flags().Lookup(walletFlag)) @@ -89,7 +88,6 @@ func init() { func initScanCmd() { Cmd.AddCommand(scanCmd) - scanCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc) scanCmd.Flags().StringP(commonflags.ConfigFlag, commonflags.ConfigFlagShorthand, "", commonflags.ConfigFlagUsage) scanCmd.Flags().String(commonflags.ConfigDirFlag, "", commonflags.ConfigDirFlagUsage) scanCmd.Flags().Uint32(flagBatchSize, 1000, flagBatchSizeUsage) From dcfd89544925719560343521ceb9a792d195cb51 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Tue, 8 Apr 2025 10:21:14 +0300 Subject: [PATCH 533/591] [#1710] object: Implement `Unwrap()` for `errIncompletePut` * When sign service calls `SignResponse`, it tries to set v2 status to response by unwrapping an error to the possible depth. This wasn't applicable for `errIncompletePut` so far as it didn't implement `Unwrap()`. Thus, it wasn't able to find a correct status set in error. Change-Id: I280c1806a008176854c55f13bf8688e5736ef941 Signed-off-by: Airat Arifullin --- pkg/services/object/common/writer/distributed.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go index f7486eae7..fff58aca7 100644 --- a/pkg/services/object/common/writer/distributed.go +++ b/pkg/services/object/common/writer/distributed.go @@ -95,6 +95,10 @@ func (x errIncompletePut) Error() string { return commonMsg } +func (x errIncompletePut) Unwrap() error { + return x.singleErr +} + // WriteObject implements the transformer.ObjectWriter interface. func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error { t.obj = obj From 64c1392513f43b95bbe3f2afb6bab0a8edd03f89 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Tue, 8 Apr 2025 10:45:46 +0300 Subject: [PATCH 534/591] [#1710] object: Sign response even if `CloseAndRecv` returns error * Sign service wraps an error with status and sign a response even if error occurs from `CloseAndRecv` in `Put` and `Patch` methods. Close #1710 Change-Id: I7e1d8fe00db53607fa6e04ebec9a29b87349f8a1 Signed-off-by: Airat Arifullin --- pkg/services/object/sign.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go index 2b44227a5..fd8e926dd 100644 --- a/pkg/services/object/sign.go +++ b/pkg/services/object/sign.go @@ -96,7 +96,8 @@ func (s *putStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.PutRes } else { resp, err = s.stream.CloseAndRecv(ctx) if err != nil { - return nil, fmt.Errorf("could not close stream and receive response: %w", err) + err = fmt.Errorf("could not close stream and receive response: %w", err) + resp = new(object.PutResponse) } } @@ -132,7 +133,8 @@ func (s *patchStreamSigner) CloseAndRecv(ctx context.Context) (resp *object.Patc } else { resp, err = s.stream.CloseAndRecv(ctx) if err != nil { - return nil, fmt.Errorf("could not close stream and receive response: %w", err) + err = fmt.Errorf("could not close stream and receive response: %w", err) + resp = new(object.PatchResponse) } } From e06ecacf57e14630f1cd783af0c88854c26f41cc Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 10 Apr 2025 16:04:04 +0300 Subject: [PATCH 535/591] [#1705] engine: Use condition var for evacuation unit tests To know exactly when the evacuation was completed, a conditional variable was added. Closes #1705 Change-Id: I86f6d7d2ad2b9759905b6b5e9341008cb74f5dfd Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/engine/engine.go | 16 +++++++++++----- .../engine/evacuate_limiter.go | 6 +++++- .../engine/evacuate_test.go | 18 +++++++++++++----- 3 files changed, 29 insertions(+), 11 deletions(-) diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index a915c9bd6..1d33c0592 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -212,12 +212,18 @@ func New(opts ...Option) *StorageEngine { opts[i](c) } + evLimMtx := &sync.RWMutex{} + evLimCond := sync.NewCond(evLimMtx) + return &StorageEngine{ - cfg: c, - shards: make(map[string]hashedShard), - closeCh: make(chan struct{}), - setModeCh: make(chan setModeRequest), - evacuateLimiter: &evacuationLimiter{}, + cfg: c, + shards: make(map[string]hashedShard), + closeCh: make(chan struct{}), + setModeCh: make(chan setModeRequest), + evacuateLimiter: &evacuationLimiter{ + guard: evLimMtx, + statusCond: evLimCond, + }, } } diff --git a/pkg/local_object_storage/engine/evacuate_limiter.go b/pkg/local_object_storage/engine/evacuate_limiter.go index 3dd7494be..b75e8686d 100644 --- a/pkg/local_object_storage/engine/evacuate_limiter.go +++ b/pkg/local_object_storage/engine/evacuate_limiter.go @@ -139,7 +139,8 @@ type evacuationLimiter struct { eg *errgroup.Group cancel context.CancelFunc - guard sync.RWMutex + guard *sync.RWMutex + statusCond *sync.Cond // used in unit tests } func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, result *EvacuateShardRes) (*errgroup.Group, context.Context, error) { @@ -165,6 +166,7 @@ func (l *evacuationLimiter) TryStart(ctx context.Context, shardIDs []string, res startedAt: time.Now().UTC(), result: result, } + l.statusCond.Broadcast() return l.eg, egCtx, nil } @@ -180,6 +182,7 @@ func (l *evacuationLimiter) Complete(err error) { l.state.processState = EvacuateProcessStateCompleted l.state.errMessage = errMsq l.state.finishedAt = time.Now().UTC() + l.statusCond.Broadcast() l.eg = nil } @@ -214,6 +217,7 @@ func (l *evacuationLimiter) ResetEvacuationStatus() error { l.state = EvacuationState{} l.eg = nil l.cancel = nil + l.statusCond.Broadcast() return nil } diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index 359e49481..f2ba7d994 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -204,11 +204,10 @@ func TestEvacuateShardObjects(t *testing.T) { func testWaitForEvacuationCompleted(t *testing.T, e *StorageEngine) *EvacuationState { var st *EvacuationState var err error - require.Eventually(t, func() bool { - st, err = e.GetEvacuationState(context.Background()) - require.NoError(t, err) - return st.ProcessingStatus() == EvacuateProcessStateCompleted - }, 6*time.Second, 10*time.Millisecond) + e.evacuateLimiter.waitForCompleted() + st, err = e.GetEvacuationState(context.Background()) + require.NoError(t, err) + require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus()) return st } @@ -817,3 +816,12 @@ func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) { t.Logf("evacuate took %v\n", time.Since(start)) require.NoError(t, err) } + +func (l *evacuationLimiter) waitForCompleted() { + l.guard.Lock() + defer l.guard.Unlock() + + for l.state.processState != EvacuateProcessStateCompleted { + l.statusCond.Wait() + } +} From dfe2f9956a82c6bee62e443e9e5178d6584ade64 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Mon, 24 Mar 2025 15:32:25 +0300 Subject: [PATCH 536/591] [#1619] logger: Filter entries by tags provided in config Change-Id: Ia2a79d6cb2a5eb263fb2e6db3f9cf9f2a7d57118 Signed-off-by: Anton Nikiforov --- cmd/frostfs-ir/config.go | 23 +++++- cmd/frostfs-ir/main.go | 4 ++ cmd/frostfs-node/config.go | 10 ++- cmd/frostfs-node/config/logger/config.go | 16 +++++ cmd/frostfs-node/validate.go | 5 ++ pkg/util/logger/logger.go | 92 +++++++++++++++++++----- pkg/util/logger/tag_string.go | 24 +++++++ pkg/util/logger/tags.go | 75 +++++++++++++++++++ 8 files changed, 231 insertions(+), 18 deletions(-) create mode 100644 pkg/util/logger/tag_string.go create mode 100644 pkg/util/logger/tags.go diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go index 19b7f05d6..13a747ba6 100644 --- a/cmd/frostfs-ir/config.go +++ b/cmd/frostfs-ir/config.go @@ -4,12 +4,14 @@ import ( "context" "os" "os/signal" + "strconv" "syscall" configViper "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/ir" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" + "github.com/spf13/cast" "github.com/spf13/viper" "go.uber.org/zap" ) @@ -44,11 +46,30 @@ func reloadConfig() error { if err != nil { return err } - log.Reload(logPrm) + err = logPrm.SetTags(loggerTags()) + if err != nil { + return err + } + logger.UpdateLevelForTags(logPrm) return nil } +func loggerTags() [][]string { + var res [][]string + for i := 0; ; i++ { + var item []string + index := strconv.FormatInt(int64(i), 10) + names := cast.ToString(cfg.Get("logger.tags." + index + ".names")) + if names == "" { + break + } + item = append(item, names, cast.ToString(cfg.Get("logger.tags."+index+".level"))) + res = append(res, item) + } + return res +} + func watchForSignal(ctx context.Context, cancel func()) { ch := make(chan os.Signal, 1) signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go index 114d8e4de..799feb784 100644 --- a/cmd/frostfs-ir/main.go +++ b/cmd/frostfs-ir/main.go @@ -80,10 +80,14 @@ func main() { exitErr(err) logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook() logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp") + err = logPrm.SetTags(loggerTags()) + exitErr(err) log, err = logger.NewLogger(logPrm) exitErr(err) + logger.UpdateLevelForTags(logPrm) + ctx, cancel := context.WithCancel(context.Background()) pprofCmp = newPprofComponent() diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 431316258..88e45d848 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -109,6 +109,7 @@ type applicationConfiguration struct { destination string timestamp bool options []zap.Option + tags [][]string } ObjectCfg struct { @@ -241,6 +242,7 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error { })} } a.LoggerCfg.options = opts + a.LoggerCfg.tags = loggerconfig.Tags(c) // Object @@ -727,6 +729,7 @@ func initCfg(appCfg *config.Config) *cfg { logPrm.SamplingHook = c.metricsCollector.LogMetrics().GetSamplingHook() log, err := logger.NewLogger(logPrm) fatalOnErr(err) + logger.UpdateLevelForTags(logPrm) c.internals = initInternals(appCfg, log) @@ -1094,6 +1097,11 @@ func (c *cfg) loggerPrm() (logger.Prm, error) { } prm.PrependTimestamp = c.LoggerCfg.timestamp prm.Options = c.LoggerCfg.options + err = prm.SetTags(c.LoggerCfg.tags) + if err != nil { + // not expected since validation should be performed before + return logger.Prm{}, errors.New("incorrect allowed tags format: " + c.LoggerCfg.destination) + } return prm, nil } @@ -1381,7 +1389,7 @@ func (c *cfg) getComponents(ctx context.Context) []dCmp { if err != nil { return err } - c.log.Reload(prm) + logger.UpdateLevelForTags(prm) return nil }}) components = append(components, dCmp{"runtime", func() error { diff --git a/cmd/frostfs-node/config/logger/config.go b/cmd/frostfs-node/config/logger/config.go index ba9eeea2b..20f373184 100644 --- a/cmd/frostfs-node/config/logger/config.go +++ b/cmd/frostfs-node/config/logger/config.go @@ -2,6 +2,7 @@ package loggerconfig import ( "os" + "strconv" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" @@ -60,6 +61,21 @@ func Timestamp(c *config.Config) bool { return config.BoolSafe(c.Sub(subsection), "timestamp") } +// Tags returns the value of "tags" config parameter from "logger" section. +func Tags(c *config.Config) [][]string { + var res [][]string + sub := c.Sub(subsection).Sub("tags") + for i := 0; ; i++ { + s := sub.Sub(strconv.FormatInt(int64(i), 10)) + names := config.StringSafe(s, "names") + if names == "" { + break + } + res = append(res, []string{names, config.StringSafe(s, "level")}) + } + return res +} + // ToLokiConfig extracts loki config. func ToLokiConfig(c *config.Config) loki.Config { hostname, _ := os.Hostname() diff --git a/cmd/frostfs-node/validate.go b/cmd/frostfs-node/validate.go index ae52b9e4a..22d2e0aa9 100644 --- a/cmd/frostfs-node/validate.go +++ b/cmd/frostfs-node/validate.go @@ -30,6 +30,11 @@ func validateConfig(c *config.Config) error { return fmt.Errorf("invalid logger destination: %w", err) } + err = loggerPrm.SetTags(loggerconfig.Tags(c)) + if err != nil { + return fmt.Errorf("invalid list of allowed tags: %w", err) + } + // shard configuration validation shardNum := 0 diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index 10c7e8dc9..276847be1 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -13,8 +13,10 @@ import ( // Logger represents a component // for writing messages to log. type Logger struct { - z *zap.Logger - lvl zap.AtomicLevel + z *zap.Logger + c zapcore.Core + t Tag + w bool } // Prm groups Logger's parameters. @@ -39,6 +41,9 @@ type Prm struct { // Options for zap.Logger Options []zap.Option + + // map of tag's bit masks to log level, overrides lvl + tl map[Tag]zapcore.Level } const ( @@ -68,6 +73,12 @@ func (p *Prm) SetDestination(d string) error { return nil } +// SetTags parses list of tags with log level. +func (p *Prm) SetTags(tags [][]string) (err error) { + p.tl, err = parseTags(tags) + return err +} + // NewLogger constructs a new zap logger instance. Constructing with nil // parameters is safe: default values will be used then. // Passing non-nil parameters after a successful creation (non-error) allows @@ -91,10 +102,8 @@ func NewLogger(prm Prm) (*Logger, error) { } func newConsoleLogger(prm Prm) (*Logger, error) { - lvl := zap.NewAtomicLevelAt(prm.level) - c := zap.NewProductionConfig() - c.Level = lvl + c.Level = zap.NewAtomicLevelAt(zap.DebugLevel) c.Encoding = "console" if prm.SamplingHook != nil { c.Sampling.Hook = prm.SamplingHook @@ -115,15 +124,13 @@ func newConsoleLogger(prm Prm) (*Logger, error) { if err != nil { return nil, err } - - l := &Logger{z: lZap, lvl: lvl} + l := &Logger{z: lZap, c: lZap.Core()} + l = l.WithTag(TagMain) return l, nil } func newJournaldLogger(prm Prm) (*Logger, error) { - lvl := zap.NewAtomicLevelAt(prm.level) - c := zap.NewProductionConfig() if prm.SamplingHook != nil { c.Sampling.Hook = prm.SamplingHook @@ -137,7 +144,7 @@ func newJournaldLogger(prm Prm) (*Logger, error) { encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields) - core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields) + core := zapjournald.NewCore(zap.NewAtomicLevelAt(zap.DebugLevel), encoder, &journald.Journal{}, zapjournald.SyslogFields) coreWithContext := core.With([]zapcore.Field{ zapjournald.SyslogFacility(zapjournald.LogDaemon), zapjournald.SyslogIdentifier(), @@ -161,22 +168,75 @@ func newJournaldLogger(prm Prm) (*Logger, error) { } opts = append(opts, prm.Options...) lZap := zap.New(samplingCore, opts...) - - l := &Logger{z: lZap, lvl: lvl} + l := &Logger{z: lZap, c: lZap.Core()} + l = l.WithTag(TagMain) return l, nil } -func (l *Logger) Reload(prm Prm) { - l.lvl.SetLevel(prm.level) +// With create a child logger with new fields, don't affect the parent. +// Throws panic if tag is unset. +func (l *Logger) With(fields ...zap.Field) *Logger { + if l.t == 0 { + panic("tag is unset") + } + c := *l + c.z = l.z.With(fields...) + // With called under the logger + c.w = true + return &c } -func (l *Logger) With(fields ...zap.Field) *Logger { - return &Logger{z: l.z.With(fields...)} +type core struct { + c zapcore.Core + l zap.AtomicLevel +} + +func (c *core) Enabled(lvl zapcore.Level) bool { + return c.l.Enabled(lvl) +} + +func (c *core) With(fields []zapcore.Field) zapcore.Core { + clone := *c + clone.c = clone.c.With(fields) + return &clone +} + +func (c *core) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + return c.c.Check(e, ce) +} + +func (c *core) Write(e zapcore.Entry, fields []zapcore.Field) error { + return c.c.Write(e, fields) +} + +func (c *core) Sync() error { + return c.c.Sync() +} + +// WithTag is an equivalent of calling [NewLogger] with the same parameters for the current logger. +// Throws panic if provided unsupported tag. +func (l *Logger) WithTag(tag Tag) *Logger { + if tag == 0 || tag > Tag(len(_Tag_index)-1) { + panic("unsupported tag " + tag.String()) + } + if l.w { + panic("unsupported operation for the logger's state") + } + c := *l + c.t = tag + c.z = l.z.WithOptions(zap.WrapCore(func(zapcore.Core) zapcore.Core { + return &core{ + c: l.c.With([]zap.Field{zap.String("tag", tag.String())}), + l: tagToLogLevel[tag], + } + })) + return &c } func NewLoggerWrapper(z *zap.Logger) *Logger { return &Logger{ z: z.WithOptions(zap.AddCallerSkip(1)), + t: TagMain, } } diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go new file mode 100644 index 000000000..80cb712eb --- /dev/null +++ b/pkg/util/logger/tag_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type Tag -linecomment"; DO NOT EDIT. + +package logger + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TagMain-1] +} + +const _Tag_name = "main" + +var _Tag_index = [...]uint8{0, 4} + +func (i Tag) String() string { + i -= 1 + if i >= Tag(len(_Tag_index)-1) { + return "Tag(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _Tag_name[_Tag_index[i]:_Tag_index[i+1]] +} diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go new file mode 100644 index 000000000..06abcabe5 --- /dev/null +++ b/pkg/util/logger/tags.go @@ -0,0 +1,75 @@ +package logger + +import ( + "fmt" + "strings" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +//go:generate stringer -type Tag -linecomment + +type Tag uint8 + +const ( + _ Tag = iota // + TagMain // main + + defaultLevel = zapcore.InfoLevel +) + +var ( + tagToLogLevel = map[Tag]zap.AtomicLevel{} + stringToTag = map[string]Tag{} +) + +func init() { + for i := TagMain; i <= Tag(len(_Tag_index)-1); i++ { + tagToLogLevel[i] = zap.NewAtomicLevelAt(defaultLevel) + stringToTag[i.String()] = i + } +} + +// parseTags returns: +// - map(always instantiated) of tag to custom log level for that tag; +// - error if it occurred(map is empty). +func parseTags(raw [][]string) (map[Tag]zapcore.Level, error) { + m := make(map[Tag]zapcore.Level) + if len(raw) == 0 { + return m, nil + } + for _, item := range raw { + str, level := item[0], item[1] + if len(level) == 0 { + // It is not necessary to parse tags without level, + // because default log level will be used. + continue + } + var l zapcore.Level + err := l.UnmarshalText([]byte(level)) + if err != nil { + return nil, err + } + tmp := strings.Split(str, ",") + for _, tagStr := range tmp { + tag, ok := stringToTag[strings.TrimSpace(tagStr)] + if !ok { + return nil, fmt.Errorf("unsupported tag %s", str) + } + m[tag] = l + } + } + return m, nil +} + +func UpdateLevelForTags(prm Prm) { + for k, v := range tagToLogLevel { + nk, ok := prm.tl[k] + if ok { + v.SetLevel(nk) + } else { + v.SetLevel(prm.level) + } + } +} From 12fc7850dd78d6ea0818f90d90388de8e42d190c Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Wed, 29 Jan 2025 14:37:17 +0300 Subject: [PATCH 537/591] [#1619] logger: Set tags for `ir` components Change-Id: Ifab575bc2a3cd83c9001cd68fffaf94c91494043 Signed-off-by: Anton Nikiforov --- config/example/ir.env | 2 ++ config/example/ir.yaml | 3 +++ pkg/innerring/initialization.go | 16 ++++++++-------- pkg/innerring/innerring.go | 2 +- pkg/util/logger/tag_string.go | 8 ++++++-- pkg/util/logger/tags.go | 8 ++++++-- 6 files changed, 26 insertions(+), 13 deletions(-) diff --git a/config/example/ir.env b/config/example/ir.env index ebd91c243..c13044a6e 100644 --- a/config/example/ir.env +++ b/config/example/ir.env @@ -1,5 +1,7 @@ FROSTFS_IR_LOGGER_LEVEL=info FROSTFS_IR_LOGGER_TIMESTAMP=true +FROSTFS_IR_LOGGER_TAGS_0_NAMES="main, morph" +FROSTFS_IR_LOGGER_TAGS_0_LEVEL="debug" FROSTFS_IR_WALLET_PATH=/path/to/wallet.json FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX diff --git a/config/example/ir.yaml b/config/example/ir.yaml index 49f9fd324..a4a006550 100644 --- a/config/example/ir.yaml +++ b/config/example/ir.yaml @@ -3,6 +3,9 @@ logger: level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" timestamp: true + tags: + - names: "main, morph" # Possible values: `main`, `morph`, `grpc_svc`, `ir`, `processor`. + level: debug wallet: path: /path/to/wallet.json # Path to NEP-6 NEO wallet file diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go index f7b71dbe6..3d236641e 100644 --- a/pkg/innerring/initialization.go +++ b/pkg/innerring/initialization.go @@ -50,7 +50,7 @@ func (s *Server) initNetmapProcessor(ctx context.Context, cfg *viper.Viper, var err error s.netmapProcessor, err = netmap.New(&netmap.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, NetmapClient: netmap.NewNetmapClient(s.netmapClient), @@ -159,7 +159,7 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli } else { // create governance processor governanceProcessor, err := governance.New(&governance.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, FrostFSClient: frostfsCli, AlphabetState: s, @@ -225,7 +225,7 @@ func (s *Server) initAlphabetProcessor(ctx context.Context, cfg *viper.Viper) er // create alphabet processor s.alphabetProcessor, err = alphabet.New(&alphabet.Params{ ParsedWallets: parsedWallets, - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, AlphabetContracts: s.contracts.alphabet, @@ -247,7 +247,7 @@ func (s *Server) initContainerProcessor(ctx context.Context, cfg *viper.Viper, c s.log.Debug(ctx, logs.ContainerContainerWorkerPool, zap.Int("size", poolSize)) // container processor containerProcessor, err := cont.New(&cont.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, AlphabetState: s, @@ -268,7 +268,7 @@ func (s *Server) initBalanceProcessor(ctx context.Context, cfg *viper.Viper, fro s.log.Debug(ctx, logs.BalanceBalanceWorkerPool, zap.Int("size", poolSize)) // create balance processor balanceProcessor, err := balance.New(&balance.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, FrostFSClient: frostfsCli, @@ -291,7 +291,7 @@ func (s *Server) initFrostFSMainnetProcessor(ctx context.Context, cfg *viper.Vip s.log.Debug(ctx, logs.FrostFSFrostfsWorkerPool, zap.Int("size", poolSize)) frostfsProcessor, err := frostfs.New(&frostfs.Params{ - Log: s.log, + Log: s.log.WithTag(logger.TagProcessor), Metrics: s.irMetrics, PoolSize: poolSize, FrostFSContract: s.contracts.frostfs, @@ -342,7 +342,7 @@ func (s *Server) initGRPCServer(ctx context.Context, cfg *viper.Viper, log *logg controlSvc := controlsrv.NewAuditService(controlsrv.New(p, s.netmapClient, s.containerClient, controlsrv.WithAllowedKeys(authKeys), - ), log, audit) + ), log.WithTag(logger.TagGrpcSvc), audit) grpcControlSrv := grpc.NewServer() control.RegisterControlServiceServer(grpcControlSrv, controlSvc) @@ -458,7 +458,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<- } morphChain := &chainParams{ - log: s.log, + log: s.log.WithTag(logger.TagMorph), cfg: cfg, key: s.key, name: morphPrefix, diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index ae5661905..3a5137261 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -339,7 +339,7 @@ func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan ) (*Server, error) { var err error server := &Server{ - log: log, + log: log.WithTag(logger.TagIr), irMetrics: metrics, cmode: cmode, } diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go index 80cb712eb..a25b3c445 100644 --- a/pkg/util/logger/tag_string.go +++ b/pkg/util/logger/tag_string.go @@ -9,11 +9,15 @@ func _() { // Re-run the stringer command to generate them again. var x [1]struct{} _ = x[TagMain-1] + _ = x[TagMorph-2] + _ = x[TagGrpcSvc-3] + _ = x[TagIr-4] + _ = x[TagProcessor-5] } -const _Tag_name = "main" +const _Tag_name = "mainmorphgrpc_svcirprocessor" -var _Tag_index = [...]uint8{0, 4} +var _Tag_index = [...]uint8{0, 4, 9, 17, 19, 28} func (i Tag) String() string { i -= 1 diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go index 06abcabe5..51a6f62da 100644 --- a/pkg/util/logger/tags.go +++ b/pkg/util/logger/tags.go @@ -13,8 +13,12 @@ import ( type Tag uint8 const ( - _ Tag = iota // - TagMain // main + _ Tag = iota // + TagMain // main + TagMorph // morph + TagGrpcSvc // grpc_svc + TagIr // ir + TagProcessor // processor defaultLevel = zapcore.InfoLevel ) From 8e87cbee17d3374c55a536bc14f02acb752f9d5f Mon Sep 17 00:00:00 2001 From: Vitaliy Potyarkin Date: Fri, 11 Apr 2025 14:44:19 +0300 Subject: [PATCH 538/591] [#1689] ci: Move commit checker out of Jenkinsfile Commit checker is now configured globally for all Gerrit repositories: https://git.frostfs.info/TrueCloudLab/jenkins/pulls/16 This allows us to execute commit-checker independently from the rest of CI suite and re-check commit message format without rerunning other tests. Change-Id: Ib8f899b856482a5dc5d03861171585415ff6b452 Signed-off-by: Vitaliy Potyarkin --- .ci/Jenkinsfile | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 4ddd36406..4234de160 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -78,10 +78,4 @@ async { } } } - - task('dco') { - container('git.frostfs.info/truecloudlab/commit-check:master') { - sh 'FROM=pull_request_target commit-check' - } - } } From 0d36e93169495b82e0cd23ff0c681c67c020c5e6 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Wed, 9 Apr 2025 16:27:03 +0300 Subject: [PATCH 539/591] [#1332] cli/playground: Move command handler selection to separate function Change-Id: I2dcbd85e61960c3cf141b815edab174e308ef858 Signed-off-by: Alexander Chuprov --- .../modules/container/policy_playground.go | 46 ++++++++++--------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index f747b3252..a9dea13b1 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -260,6 +260,28 @@ Example of usage: }, } +func (repl *policyPlaygroundREPL) handleCommand(args []string) error { + if len(args) == 0 { + return nil + } + + switch args[0] { + case "list", "ls": + return repl.handleLs(args[1:]) + case "add": + return repl.handleAdd(args[1:]) + case "load": + return repl.handleLoad(args[1:]) + case "remove", "rm": + return repl.handleRemove(args[1:]) + case "eval": + return repl.handleEval(args[1:]) + case "help": + return repl.handleHelp(args[1:]) + } + return fmt.Errorf("unknown command %q", args[0]) +} + func (repl *policyPlaygroundREPL) run() error { if len(viper.GetString(commonflags.RPC)) > 0 { key := key.GetOrGenerate(repl.cmd) @@ -277,17 +299,6 @@ func (repl *policyPlaygroundREPL) run() error { } } - cmdHandlers := map[string]func([]string) error{ - "list": repl.handleLs, - "ls": repl.handleLs, - "add": repl.handleAdd, - "load": repl.handleLoad, - "remove": repl.handleRemove, - "rm": repl.handleRemove, - "eval": repl.handleEval, - "help": repl.handleHelp, - } - var cfgCompleter []readline.PrefixCompleterInterface var helpSubItems []readline.PrefixCompleterInterface @@ -326,17 +337,8 @@ func (repl *policyPlaygroundREPL) run() error { } exit = false - parts := strings.Fields(line) - if len(parts) == 0 { - continue - } - cmd := parts[0] - if handler, exists := cmdHandlers[cmd]; exists { - if err := handler(parts[1:]); err != nil { - fmt.Fprintf(repl.console, "error: %v\n", err) - } - } else { - fmt.Fprintf(repl.console, "error: unknown command %q\n", cmd) + if err := repl.handleCommand(strings.Fields(line)); err != nil { + fmt.Fprintf(repl.console, "error: %v\n", err) } } } From 29b4fbe451bb695724e529e757e339f235dd8759 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Wed, 9 Apr 2025 16:31:48 +0300 Subject: [PATCH 540/591] [#1332] cli/playground: Add 'netmap-config' flag Change-Id: I4342fb9a6da2a05c18ae4e0ad9f0c71550efc5ef Signed-off-by: Alexander Chuprov --- .../modules/container/policy_playground.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index a9dea13b1..377cf43c6 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -299,6 +299,11 @@ func (repl *policyPlaygroundREPL) run() error { } } + if len(viper.GetString(netmapConfigPath)) > 0 { + err := repl.handleLoad([]string{viper.GetString(netmapConfigPath)}) + commonCmd.ExitOnErr(repl.cmd, "load netmap config error: %w", err) + } + var cfgCompleter []readline.PrefixCompleterInterface var helpSubItems []readline.PrefixCompleterInterface @@ -354,6 +359,14 @@ If a wallet and endpoint is provided, the initial netmap data will be loaded fro }, } +const ( + netmapConfigPath = "netmap-config" + netmapConfigUsage = "Path to the netmap configuration file" +) + func initContainerPolicyPlaygroundCmd() { commonflags.Init(policyPlaygroundCmd) + policyPlaygroundCmd.Flags().String(netmapConfigPath, "", netmapConfigUsage) + + _ = viper.BindPFlag(netmapConfigPath, policyPlaygroundCmd.Flags().Lookup(netmapConfigPath)) } From 3be33b7117900ba1dc715e330545ca6ff5d44dc4 Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Wed, 9 Apr 2025 16:34:16 +0300 Subject: [PATCH 541/591] [#1706] cli/playground: Mention 'help' in error message for invalid commands Change-Id: Ica1112b907919a6d19fa1bf683f2a952c4c638e4 Signed-off-by: Alexander Chuprov --- cmd/frostfs-cli/modules/container/policy_playground.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index 377cf43c6..2cc1107ef 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -279,7 +279,7 @@ func (repl *policyPlaygroundREPL) handleCommand(args []string) error { case "help": return repl.handleHelp(args[1:]) } - return fmt.Errorf("unknown command %q", args[0]) + return fmt.Errorf("unknown command %q. See 'help' for assistance", args[0]) } func (repl *policyPlaygroundREPL) run() error { From 5aaa3df533c02763db0dfef0ad5fd63cb0bebc46 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 11 Apr 2025 14:56:07 +0300 Subject: [PATCH 542/591] [#1700] config: Move config struct to qos package Change-Id: Ie642fff5cd1702cda00425628e11f3fd8c514798 Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 2 +- cmd/frostfs-node/config/engine/config_test.go | 36 ++++++---- .../config/engine/shard/limits/config.go | 66 ++++++------------- config/example/node.env | 6 ++ config/example/node.json | 10 +++ config/example/node.yaml | 6 ++ internal/qos/config.go | 31 +++++++++ internal/qos/limiter.go | 15 ++--- internal/qos/validate.go | 12 ++-- 9 files changed, 109 insertions(+), 75 deletions(-) create mode 100644 internal/qos/config.go diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 88e45d848..298049158 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -385,7 +385,7 @@ func (a *applicationConfiguration) setGCConfig(target *shardCfg, source *shardco } func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardconfig.Config) error { - limitsConfig := source.Limits() + limitsConfig := source.Limits().ToConfig() limiter, err := qos.NewLimiter(limitsConfig) if err != nil { return err diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go index 22f26268d..34613ad9e 100644 --- a/cmd/frostfs-node/config/engine/config_test.go +++ b/cmd/frostfs-node/config/engine/config_test.go @@ -11,10 +11,10 @@ import ( blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza" fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree" gcconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/gc" - limitsconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "github.com/stretchr/testify/require" ) @@ -135,8 +135,8 @@ func TestEngineSection(t *testing.T) { require.Equal(t, mode.ReadOnly, sc.Mode()) require.Equal(t, 100, sc.RefillMetabaseWorkersCount()) - readLimits := limits.Read() - writeLimits := limits.Write() + readLimits := limits.ToConfig().Read + writeLimits := limits.ToConfig().Write require.Equal(t, 30*time.Second, readLimits.IdleTimeout) require.Equal(t, int64(10_000), readLimits.MaxRunningOps) require.Equal(t, int64(1_000), readLimits.MaxWaitingOps) @@ -144,7 +144,7 @@ func TestEngineSection(t *testing.T) { require.Equal(t, int64(1_000), writeLimits.MaxRunningOps) require.Equal(t, int64(100), writeLimits.MaxWaitingOps) require.ElementsMatch(t, readLimits.Tags, - []limitsconfig.IOTagConfig{ + []qos.IOTagConfig{ { Tag: "internal", Weight: toPtr(20), @@ -173,9 +173,14 @@ func TestEngineSection(t *testing.T) { LimitOps: toPtr(25000), Prohibited: true, }, + { + Tag: "treesync", + Weight: toPtr(5), + LimitOps: toPtr(25), + }, }) require.ElementsMatch(t, writeLimits.Tags, - []limitsconfig.IOTagConfig{ + []qos.IOTagConfig{ { Tag: "internal", Weight: toPtr(200), @@ -203,6 +208,11 @@ func TestEngineSection(t *testing.T) { Weight: toPtr(50), LimitOps: toPtr(2500), }, + { + Tag: "treesync", + Weight: toPtr(50), + LimitOps: toPtr(100), + }, }) case 1: require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path()) @@ -259,14 +269,14 @@ func TestEngineSection(t *testing.T) { require.Equal(t, mode.ReadWrite, sc.Mode()) require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount()) - readLimits := limits.Read() - writeLimits := limits.Write() - require.Equal(t, limitsconfig.DefaultIdleTimeout, readLimits.IdleTimeout) - require.Equal(t, limitsconfig.NoLimit, readLimits.MaxRunningOps) - require.Equal(t, limitsconfig.NoLimit, readLimits.MaxWaitingOps) - require.Equal(t, limitsconfig.DefaultIdleTimeout, writeLimits.IdleTimeout) - require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxRunningOps) - require.Equal(t, limitsconfig.NoLimit, writeLimits.MaxWaitingOps) + readLimits := limits.ToConfig().Read + writeLimits := limits.ToConfig().Write + require.Equal(t, qos.DefaultIdleTimeout, readLimits.IdleTimeout) + require.Equal(t, qos.NoLimit, readLimits.MaxRunningOps) + require.Equal(t, qos.NoLimit, readLimits.MaxWaitingOps) + require.Equal(t, qos.DefaultIdleTimeout, writeLimits.IdleTimeout) + require.Equal(t, qos.NoLimit, writeLimits.MaxRunningOps) + require.Equal(t, qos.NoLimit, writeLimits.MaxWaitingOps) require.Equal(t, 0, len(readLimits.Tags)) require.Equal(t, 0, len(writeLimits.Tags)) } diff --git a/cmd/frostfs-node/config/engine/shard/limits/config.go b/cmd/frostfs-node/config/engine/shard/limits/config.go index 8444d6aa8..ccd1e0000 100644 --- a/cmd/frostfs-node/config/engine/shard/limits/config.go +++ b/cmd/frostfs-node/config/engine/shard/limits/config.go @@ -1,19 +1,13 @@ package limits import ( - "math" "strconv" - "time" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "github.com/spf13/cast" ) -const ( - NoLimit int64 = math.MaxInt64 - DefaultIdleTimeout = 5 * time.Minute -) - // From wraps config section into Config. func From(c *config.Config) *Config { return (*Config)(c) @@ -23,36 +17,43 @@ func From(c *config.Config) *Config { // which provides access to Shard's limits configurations. type Config config.Config -// Read returns the value of "read" limits config section. -func (x *Config) Read() OpConfig { +func (x *Config) ToConfig() qos.LimiterConfig { + result := qos.LimiterConfig{ + Read: x.read(), + Write: x.write(), + } + panicOnErr(result.Validate()) + return result +} + +func (x *Config) read() qos.OpConfig { return x.parse("read") } -// Write returns the value of "write" limits config section. -func (x *Config) Write() OpConfig { +func (x *Config) write() qos.OpConfig { return x.parse("write") } -func (x *Config) parse(sub string) OpConfig { +func (x *Config) parse(sub string) qos.OpConfig { c := (*config.Config)(x).Sub(sub) - var result OpConfig + var result qos.OpConfig if s := config.Int(c, "max_waiting_ops"); s > 0 { result.MaxWaitingOps = s } else { - result.MaxWaitingOps = NoLimit + result.MaxWaitingOps = qos.NoLimit } if s := config.Int(c, "max_running_ops"); s > 0 { result.MaxRunningOps = s } else { - result.MaxRunningOps = NoLimit + result.MaxRunningOps = qos.NoLimit } if s := config.DurationSafe(c, "idle_timeout"); s > 0 { result.IdleTimeout = s } else { - result.IdleTimeout = DefaultIdleTimeout + result.IdleTimeout = qos.DefaultIdleTimeout } result.Tags = tags(c) @@ -60,43 +61,16 @@ func (x *Config) parse(sub string) OpConfig { return result } -type OpConfig struct { - // MaxWaitingOps returns the value of "max_waiting_ops" config parameter. - // - // Equals NoLimit if the value is not a positive number. - MaxWaitingOps int64 - // MaxRunningOps returns the value of "max_running_ops" config parameter. - // - // Equals NoLimit if the value is not a positive number. - MaxRunningOps int64 - // IdleTimeout returns the value of "idle_timeout" config parameter. - // - // Equals DefaultIdleTimeout if the value is not a valid duration. - IdleTimeout time.Duration - // Tags returns the value of "tags" config parameter. - // - // Equals nil if the value is not a valid tags config slice. - Tags []IOTagConfig -} - -type IOTagConfig struct { - Tag string - Weight *float64 - LimitOps *float64 - ReservedOps *float64 - Prohibited bool -} - -func tags(c *config.Config) []IOTagConfig { +func tags(c *config.Config) []qos.IOTagConfig { c = c.Sub("tags") - var result []IOTagConfig + var result []qos.IOTagConfig for i := 0; ; i++ { tag := config.String(c, strconv.Itoa(i)+".tag") if tag == "" { return result } - var tagConfig IOTagConfig + var tagConfig qos.IOTagConfig tagConfig.Tag = tag v := c.Value(strconv.Itoa(i) + ".weight") diff --git a/config/example/node.env b/config/example/node.env index b7c798ad8..dfb250341 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -181,6 +181,9 @@ FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_TAG=policer FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_WEIGHT=5 FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_LIMIT_OPS=25000 FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_4_PROHIBITED=true +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_TAG=treesync +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_WEIGHT=5 +FROSTFS_STORAGE_SHARD_0_LIMITS_READ_TAGS_5_LIMIT_OPS=25 FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_TAG=internal FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_WEIGHT=200 FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_0_LIMIT_OPS=0 @@ -198,6 +201,9 @@ FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_3_LIMIT_OPS=2500 FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_TAG=policer FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_WEIGHT=50 FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_4_LIMIT_OPS=2500 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_TAG=treesync +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_WEIGHT=50 +FROSTFS_STORAGE_SHARD_0_LIMITS_WRITE_TAGS_5_LIMIT_OPS=100 ## 1 shard ### Flag to refill Metabase from BlobStor diff --git a/config/example/node.json b/config/example/node.json index 2f4413e4d..0b061a3d4 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -254,6 +254,11 @@ "weight": 5, "limit_ops": 25000, "prohibited": true + }, + { + "tag": "treesync", + "weight": 5, + "limit_ops": 25 } ] }, @@ -288,6 +293,11 @@ "tag": "policer", "weight": 50, "limit_ops": 2500 + }, + { + "tag": "treesync", + "weight": 50, + "limit_ops": 100 } ] } diff --git a/config/example/node.yaml b/config/example/node.yaml index 0b6c7b12c..46e4ebdbe 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -253,6 +253,9 @@ storage: weight: 5 limit_ops: 25000 prohibited: true + - tag: treesync + weight: 5 + limit_ops: 25 write: max_running_ops: 1000 max_waiting_ops: 100 @@ -275,6 +278,9 @@ storage: - tag: policer weight: 50 limit_ops: 2500 + - tag: treesync + weight: 50 + limit_ops: 100 1: writecache: diff --git a/internal/qos/config.go b/internal/qos/config.go new file mode 100644 index 000000000..d90b403b5 --- /dev/null +++ b/internal/qos/config.go @@ -0,0 +1,31 @@ +package qos + +import ( + "math" + "time" +) + +const ( + NoLimit int64 = math.MaxInt64 + DefaultIdleTimeout = 5 * time.Minute +) + +type LimiterConfig struct { + Read OpConfig + Write OpConfig +} + +type OpConfig struct { + MaxWaitingOps int64 + MaxRunningOps int64 + IdleTimeout time.Duration + Tags []IOTagConfig +} + +type IOTagConfig struct { + Tag string + Weight *float64 + LimitOps *float64 + ReservedOps *float64 + Prohibited bool +} diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go index c73481c2c..5851d7626 100644 --- a/internal/qos/limiter.go +++ b/internal/qos/limiter.go @@ -8,7 +8,6 @@ import ( "sync/atomic" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" "git.frostfs.info/TrueCloudLab/frostfs-qos/scheduling" "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" @@ -37,15 +36,15 @@ type scheduler interface { Close() } -func NewLimiter(c *limits.Config) (Limiter, error) { - if err := validateConfig(c); err != nil { +func NewLimiter(c LimiterConfig) (Limiter, error) { + if err := c.Validate(); err != nil { return nil, err } - readScheduler, err := createScheduler(c.Read()) + readScheduler, err := createScheduler(c.Read) if err != nil { return nil, fmt.Errorf("create read scheduler: %w", err) } - writeScheduler, err := createScheduler(c.Write()) + writeScheduler, err := createScheduler(c.Write) if err != nil { return nil, fmt.Errorf("create write scheduler: %w", err) } @@ -63,8 +62,8 @@ func NewLimiter(c *limits.Config) (Limiter, error) { return l, nil } -func createScheduler(config limits.OpConfig) (scheduler, error) { - if len(config.Tags) == 0 && config.MaxWaitingOps == limits.NoLimit { +func createScheduler(config OpConfig) (scheduler, error) { + if len(config.Tags) == 0 && config.MaxWaitingOps == NoLimit { return newSemaphoreScheduler(config.MaxRunningOps), nil } return scheduling.NewMClock( @@ -72,7 +71,7 @@ func createScheduler(config limits.OpConfig) (scheduler, error) { converToSchedulingTags(config.Tags), config.IdleTimeout) } -func converToSchedulingTags(limits []limits.IOTagConfig) map[string]scheduling.TagInfo { +func converToSchedulingTags(limits []IOTagConfig) map[string]scheduling.TagInfo { result := make(map[string]scheduling.TagInfo) for _, tag := range []IOTag{IOTagBackground, IOTagClient, IOTagInternal, IOTagPolicer, IOTagTreeSync, IOTagWritecache} { result[tag.String()] = scheduling.TagInfo{ diff --git a/internal/qos/validate.go b/internal/qos/validate.go index d4475e38b..70f1f24e8 100644 --- a/internal/qos/validate.go +++ b/internal/qos/validate.go @@ -4,8 +4,6 @@ import ( "errors" "fmt" "math" - - "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/limits" ) var errWeightsMustBeSpecified = errors.New("invalid weights: weights must be specified for all tags or not specified for any") @@ -14,17 +12,17 @@ type tagConfig struct { Shares, Limit, Reserved *float64 } -func validateConfig(c *limits.Config) error { - if err := validateOpConfig(c.Read()); err != nil { +func (c *LimiterConfig) Validate() error { + if err := validateOpConfig(c.Read); err != nil { return fmt.Errorf("limits 'read' section validation error: %w", err) } - if err := validateOpConfig(c.Write()); err != nil { + if err := validateOpConfig(c.Write); err != nil { return fmt.Errorf("limits 'write' section validation error: %w", err) } return nil } -func validateOpConfig(c limits.OpConfig) error { +func validateOpConfig(c OpConfig) error { if c.MaxRunningOps <= 0 { return fmt.Errorf("invalid 'max_running_ops = %d': must be greater than zero", c.MaxRunningOps) } @@ -40,7 +38,7 @@ func validateOpConfig(c limits.OpConfig) error { return nil } -func validateTags(configTags []limits.IOTagConfig) error { +func validateTags(configTags []IOTagConfig) error { tags := map[IOTag]tagConfig{ IOTagBackground: {}, IOTagClient: {}, From e80632884a2a89c6a5edfcb46cc1a939c9413028 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 11 Apr 2025 16:21:27 +0300 Subject: [PATCH 543/591] [#1700] config: Drop redundant check Target config created on level above, so limiter is always nil. Change-Id: I1896baae5b9ddeed339a7d2b022a9a886589d362 Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 298049158..b167439e0 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -390,9 +390,6 @@ func (a *applicationConfiguration) setLimiter(target *shardCfg, source *shardcon if err != nil { return err } - if target.limiter != nil { - target.limiter.Close() - } target.limiter = limiter return nil } From fd37cea443df4aad4b3a61db0e31fbfa0ae94c33 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 11 Apr 2025 17:08:44 +0300 Subject: [PATCH 544/591] [#1700] engine: Drop unused block execution methods `BlockExecution` and `ResumeExecution` were used only by unit test. So drop them and simplify code. Change-Id: Ib3de324617e8a27fc1f015542ac5e94df5c60a6e Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/engine/control.go | 67 +++---------------- .../engine/control_test.go | 40 ----------- pkg/local_object_storage/engine/engine.go | 5 +- 3 files changed, 10 insertions(+), 102 deletions(-) diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index 96b53581e..bf1649f6e 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -22,10 +22,6 @@ type shardInitError struct { // Open opens all StorageEngine's components. func (e *StorageEngine) Open(ctx context.Context) error { - return e.open(ctx) -} - -func (e *StorageEngine) open(ctx context.Context) error { e.mtx.Lock() defer e.mtx.Unlock() @@ -149,11 +145,11 @@ var errClosed = errors.New("storage engine is closed") func (e *StorageEngine) Close(ctx context.Context) error { close(e.closeCh) defer e.wg.Wait() - return e.setBlockExecErr(ctx, errClosed) + return e.closeEngine(ctx) } // closes all shards. Never returns an error, shard errors are logged. -func (e *StorageEngine) close(ctx context.Context) error { +func (e *StorageEngine) closeAllShards(ctx context.Context) error { e.mtx.RLock() defer e.mtx.RUnlock() @@ -176,70 +172,23 @@ func (e *StorageEngine) execIfNotBlocked(op func() error) error { e.blockExec.mtx.RLock() defer e.blockExec.mtx.RUnlock() - if e.blockExec.err != nil { - return e.blockExec.err + if e.blockExec.closed { + return errClosed } return op() } -// sets the flag of blocking execution of all data operations according to err: -// - err != nil, then blocks the execution. If exec wasn't blocked, calls close method -// (if err == errClosed => additionally releases pools and does not allow to resume executions). -// - otherwise, resumes execution. If exec was blocked, calls open method. -// -// Can be called concurrently with exec. In this case it waits for all executions to complete. -func (e *StorageEngine) setBlockExecErr(ctx context.Context, err error) error { +func (e *StorageEngine) closeEngine(ctx context.Context) error { e.blockExec.mtx.Lock() defer e.blockExec.mtx.Unlock() - prevErr := e.blockExec.err - - wasClosed := errors.Is(prevErr, errClosed) - if wasClosed { + if e.blockExec.closed { return errClosed } - e.blockExec.err = err - - if err == nil { - if prevErr != nil { // block -> ok - return e.open(ctx) - } - } else if prevErr == nil { // ok -> block - return e.close(ctx) - } - - // otherwise do nothing - - return nil -} - -// BlockExecution blocks the execution of any data-related operation. All blocked ops will return err. -// To resume the execution, use ResumeExecution method. -// -// Сan be called regardless of the fact of the previous blocking. If execution wasn't blocked, releases all resources -// similar to Close. Can be called concurrently with Close and any data related method (waits for all executions -// to complete). Returns error if any Close has been called before. -// -// Must not be called concurrently with either Open or Init. -// -// Note: technically passing nil error will resume the execution, otherwise, it is recommended to call ResumeExecution -// for this. -func (e *StorageEngine) BlockExecution(err error) error { - return e.setBlockExecErr(context.Background(), err) -} - -// ResumeExecution resumes the execution of any data-related operation. -// To block the execution, use BlockExecution method. -// -// Сan be called regardless of the fact of the previous blocking. If execution was blocked, prepares all resources -// similar to Open. Can be called concurrently with Close and any data related method (waits for all executions -// to complete). Returns error if any Close has been called before. -// -// Must not be called concurrently with either Open or Init. -func (e *StorageEngine) ResumeExecution() error { - return e.setBlockExecErr(context.Background(), nil) + e.blockExec.closed = true + return e.closeAllShards(ctx) } type ReConfiguration struct { diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go index a0e658aeb..4ff0ed5ec 100644 --- a/pkg/local_object_storage/engine/control_test.go +++ b/pkg/local_object_storage/engine/control_test.go @@ -2,7 +2,6 @@ package engine import ( "context" - "errors" "fmt" "io/fs" "os" @@ -12,17 +11,14 @@ import ( "testing" "time" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" "github.com/stretchr/testify/require" "go.etcd.io/bbolt" ) @@ -163,42 +159,6 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O require.Equal(t, 1, shardCount) } -func TestExecBlocks(t *testing.T) { - e := testNewEngine(t).setShardsNum(t, 2).prepare(t).engine // number doesn't matter in this test, 2 is several but not many - - // put some object - obj := testutil.GenerateObjectWithCID(cidtest.ID()) - - addr := object.AddressOf(obj) - - require.NoError(t, Put(context.Background(), e, obj, false)) - - // block executions - errBlock := errors.New("block exec err") - - require.NoError(t, e.BlockExecution(errBlock)) - - // try to exec some op - _, err := Head(context.Background(), e, addr) - require.ErrorIs(t, err, errBlock) - - // resume executions - require.NoError(t, e.ResumeExecution()) - - _, err = Head(context.Background(), e, addr) // can be any data-related op - require.NoError(t, err) - - // close - require.NoError(t, e.Close(context.Background())) - - // try exec after close - _, err = Head(context.Background(), e, addr) - require.Error(t, err) - - // try to resume - require.Error(t, e.ResumeExecution()) -} - func TestPersistentShardID(t *testing.T) { dir := t.TempDir() diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 1d33c0592..376d545d3 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -33,9 +33,8 @@ type StorageEngine struct { wg sync.WaitGroup blockExec struct { - mtx sync.RWMutex - - err error + mtx sync.RWMutex + closed bool } evacuateLimiter *evacuationLimiter } From f37babdc5482334a3c573f09c7b43f43943d7dfa Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 14 Apr 2025 09:51:50 +0300 Subject: [PATCH 545/591] [#1700] shard: Lock shard's mode mutex on close To prevent race between GC handlers and close. Change-Id: I06219230964f000f666a56158d3563c760518c3b Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/shard/control.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 72e650c5e..d489b8b0d 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -363,6 +363,7 @@ func (s *Shard) refillTombstoneObject(ctx context.Context, obj *objectSDK.Object // Close releases all Shard's components. func (s *Shard) Close(ctx context.Context) error { + unlock := s.lockExclusive() if s.rb != nil { s.rb.Stop(ctx, s.log) } @@ -388,15 +389,19 @@ func (s *Shard) Close(ctx context.Context) error { } } + if s.opsLimiter != nil { + s.opsLimiter.Close() + } + + unlock() + + // GC waits for handlers and remover to complete. Handlers may try to lock shard's lock. + // So to prevent deadlock GC stopping is outside of exclusive lock. // If Init/Open was unsuccessful gc can be nil. if s.gc != nil { s.gc.stop(ctx) } - if s.opsLimiter != nil { - s.opsLimiter.Close() - } - return lastErr } From 48930ec452bc957a1919ee66f91606937cd08261 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Fri, 4 Apr 2025 16:59:22 +0300 Subject: [PATCH 546/591] [#1703] cli: Allow reading RPC endpoint from config file Allowed reading an RPC endpoint from a configuration file when getting current epoch in the `object lock` and `bearer create` commands. Close #1703 Change-Id: Iea8509dff2893a02cb63f695d7f532eecd743ed8 Signed-off-by: Aleksey Savchuk --- cmd/frostfs-cli/modules/bearer/create.go | 3 ++- cmd/frostfs-cli/modules/object/lock.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-cli/modules/bearer/create.go b/cmd/frostfs-cli/modules/bearer/create.go index a86506c37..0927788ba 100644 --- a/cmd/frostfs-cli/modules/bearer/create.go +++ b/cmd/frostfs-cli/modules/bearer/create.go @@ -44,6 +44,7 @@ is set to current epoch + n. _ = viper.BindPFlag(commonflags.WalletPath, ff.Lookup(commonflags.WalletPath)) _ = viper.BindPFlag(commonflags.Account, ff.Lookup(commonflags.Account)) + _ = viper.BindPFlag(commonflags.RPC, ff.Lookup(commonflags.RPC)) }, } @@ -81,7 +82,7 @@ func createToken(cmd *cobra.Command, _ []string) { commonCmd.ExitOnErr(cmd, "can't parse --"+notValidBeforeFlag+" flag: %w", err) if iatRelative || expRelative || nvbRelative { - endpoint, _ := cmd.Flags().GetString(commonflags.RPC) + endpoint := viper.GetString(commonflags.RPC) if len(endpoint) == 0 { commonCmd.ExitOnErr(cmd, "can't fetch current epoch: %w", fmt.Errorf("'%s' flag value must be specified", commonflags.RPC)) } diff --git a/cmd/frostfs-cli/modules/object/lock.go b/cmd/frostfs-cli/modules/object/lock.go index 53dd01868..d67db9f0d 100644 --- a/cmd/frostfs-cli/modules/object/lock.go +++ b/cmd/frostfs-cli/modules/object/lock.go @@ -18,6 +18,7 @@ import ( oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/spf13/cobra" + "github.com/spf13/viper" ) // object lock command. @@ -78,7 +79,7 @@ var objectLockCmd = &cobra.Command{ ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() - endpoint, _ := cmd.Flags().GetString(commonflags.RPC) + endpoint := viper.GetString(commonflags.RPC) currEpoch, err := internalclient.GetCurrentEpoch(ctx, cmd, endpoint) commonCmd.ExitOnErr(cmd, "Request current epoch: %w", err) From 0712c113dee45c81dfacb882f688c18ea39c1ee3 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Tue, 15 Apr 2025 10:06:05 +0300 Subject: [PATCH 547/591] [#1700] gc: Fix deadlock `HandleExpiredLocks` gets read lock, then `shard.Close` tries to acquire write lock, but `HandleExpiredLocks` calls `inhumeUnlockedIfExpired` or `selectExpired`, that try to acquire read lock again. Change-Id: Ib2ed015e859328045b5a542a4f569e5e0ff8b05b Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/shard/gc.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 19b6e2d12..a262a52cb 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -391,6 +391,16 @@ func (s *Shard) handleExpiredObjects(ctx context.Context, expired []oid.Address) return } + s.handleExpiredObjectsUnsafe(ctx, expired) +} + +func (s *Shard) handleExpiredObjectsUnsafe(ctx context.Context, expired []oid.Address) { + select { + case <-ctx.Done(): + return + default: + } + expired, err := s.getExpiredWithLinked(ctx, expired) if err != nil { s.log.Warn(ctx, logs.ShardGCFailedToGetExpiredWithLinked, zap.Error(err)) @@ -611,13 +621,6 @@ func (s *Shard) getExpiredObjects(ctx context.Context, epoch uint64, onExpiredFo } func (s *Shard) selectExpired(ctx context.Context, epoch uint64, addresses []oid.Address) ([]oid.Address, error) { - s.m.RLock() - defer s.m.RUnlock() - - if s.info.Mode.NoMetabase() { - return nil, ErrDegradedMode - } - release, err := s.opsLimiter.ReadRequest(ctx) if err != nil { return nil, err @@ -728,7 +731,7 @@ func (s *Shard) inhumeUnlockedIfExpired(ctx context.Context, epoch uint64, unloc return } - s.handleExpiredObjects(ctx, expiredUnlocked) + s.handleExpiredObjectsUnsafe(ctx, expiredUnlocked) } // HandleDeletedLocks unlocks all objects which were locked by lockers. From 56d09a99579a59b41f194f4c90a1dd582a9c88bb Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Fri, 7 Feb 2025 17:09:08 +0300 Subject: [PATCH 548/591] [#1640] object: Add priority metric based on geo distance Change-Id: I3a7ea4fc4807392bf50e6ff1389c61367c953074 Signed-off-by: Anton Nikiforov --- cmd/frostfs-node/config.go | 17 +- cmd/frostfs-node/config/node/config.go | 5 + config/example/node.env | 1 + config/example/node.json | 3 +- config/example/node.yaml | 1 + docs/storage-node-configuration.md | 53 ++++--- .../object_manager/placement/metrics.go | 150 +++++++++++++++++- .../placement/traverser_test.go | 49 ++++++ 8 files changed, 241 insertions(+), 38 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index b167439e0..8ceef2c31 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -247,15 +247,16 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error { // Object a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c) - var pm []placement.Metric - for _, raw := range objectconfig.Get(c).Priority() { - m, err := placement.ParseMetric(raw) - if err != nil { - return err - } - pm = append(pm, m) + locodeDBPath := nodeconfig.LocodeDBPath(c) + parser, err := placement.NewMetricsParser(locodeDBPath) + if err != nil { + return fmt.Errorf("metrics parser creation: %w", err) } - a.ObjectCfg.priorityMetrics = pm + m, err := parser.ParseMetrics(objectconfig.Get(c).Priority()) + if err != nil { + return fmt.Errorf("parse metrics: %w", err) + } + a.ObjectCfg.priorityMetrics = m // Storage Engine diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go index 969d77396..18aa254f1 100644 --- a/cmd/frostfs-node/config/node/config.go +++ b/cmd/frostfs-node/config/node/config.go @@ -217,3 +217,8 @@ func (l PersistentPolicyRulesConfig) NoSync() bool { func CompatibilityMode(c *config.Config) bool { return config.BoolSafe(c.Sub(subsection), "kludge_compatibility_mode") } + +// LocodeDBPath returns path to LOCODE database. +func LocodeDBPath(c *config.Config) string { + return config.String(c.Sub(subsection), "locode_db_path") +} diff --git a/config/example/node.env b/config/example/node.env index dfb250341..b501d3836 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -23,6 +23,7 @@ FROSTFS_NODE_ATTRIBUTE_1="UN-LOCODE:RU MSK" FROSTFS_NODE_RELAY=true FROSTFS_NODE_PERSISTENT_SESSIONS_PATH=/sessions FROSTFS_NODE_PERSISTENT_STATE_PATH=/state +FROSTFS_NODE_LOCODE_DB_PATH=/path/to/locode/db # Tree service section FROSTFS_TREE_ENABLED=true diff --git a/config/example/node.json b/config/example/node.json index 0b061a3d4..b02f43f60 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -37,7 +37,8 @@ }, "persistent_state": { "path": "/state" - } + }, + "locode_db_path": "/path/to/locode/db" }, "grpc": { "0": { diff --git a/config/example/node.yaml b/config/example/node.yaml index 46e4ebdbe..ba32adb82 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -36,6 +36,7 @@ node: path: /sessions # path to persistent session tokens file of Storage node (default: in-memory sessions) persistent_state: path: /state # path to persistent state file of Storage node + "locode_db_path": "/path/to/locode/db" grpc: - endpoint: s01.frostfs.devenv:8080 # endpoint for gRPC server diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 5fe011ece..248b54ea4 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -12,22 +12,23 @@ There are some custom types used for brevity: # Structure -| Section | Description | -|------------------------|---------------------------------------------------------------------| -| `logger` | [Logging parameters](#logger-section) | -| `pprof` | [PProf configuration](#pprof-section) | -| `prometheus` | [Prometheus metrics configuration](#prometheus-section) | -| `control` | [Control service configuration](#control-section) | -| `contracts` | [Override FrostFS contracts hashes](#contracts-section) | -| `morph` | [N3 blockchain client configuration](#morph-section) | -| `apiclient` | [FrostFS API client configuration](#apiclient-section) | -| `policer` | [Policer service configuration](#policer-section) | -| `replicator` | [Replicator service configuration](#replicator-section) | -| `storage` | [Storage engine configuration](#storage-section) | -| `runtime` | [Runtime configuration](#runtime-section) | -| `audit` | [Audit configuration](#audit-section) | -| `multinet` | [Multinet configuration](#multinet-section) | -| `qos` | [QoS configuration](#qos-section) | +| Section | Description | +|--------------|---------------------------------------------------------| +| `node` | [Node parameters](#node-section) | +| `logger` | [Logging parameters](#logger-section) | +| `pprof` | [PProf configuration](#pprof-section) | +| `prometheus` | [Prometheus metrics configuration](#prometheus-section) | +| `control` | [Control service configuration](#control-section) | +| `contracts` | [Override FrostFS contracts hashes](#contracts-section) | +| `morph` | [N3 blockchain client configuration](#morph-section) | +| `apiclient` | [FrostFS API client configuration](#apiclient-section) | +| `policer` | [Policer service configuration](#policer-section) | +| `replicator` | [Replicator service configuration](#replicator-section) | +| `storage` | [Storage engine configuration](#storage-section) | +| `runtime` | [Runtime configuration](#runtime-section) | +| `audit` | [Audit configuration](#audit-section) | +| `multinet` | [Multinet configuration](#multinet-section) | +| `qos` | [QoS configuration](#qos-section) | # `control` section ```yaml @@ -384,17 +385,19 @@ node: path: /sessions persistent_state: path: /state + locode_db_path: "/path/to/locode/db" ``` -| Parameter | Type | Default value | Description | -|-----------------------|---------------------------------------------------------------|---------------|-------------------------------------------------------------------------| -| `key` | `string` | | Path to the binary-encoded private key. | -| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | -| `addresses` | `[]string` | | Addresses advertised in the netmap. | -| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | -| `relay` | `bool` | | Enable relay mode. | -| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | -| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | +| Parameter | Type | Default value | Description | +|-----------------------|---------------------------------------------------------------|---------------|-----------------------------------------------------------------------------------------------------| +| `key` | `string` | | Path to the binary-encoded private key. | +| `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | +| `addresses` | `[]string` | | Addresses advertised in the netmap. | +| `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | +| `relay` | `bool` | | Enable relay mode. | +| `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | +| `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | +| `locode_db_path` | `string` | empty | Path to UN/LOCODE [database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/) for FrostFS. | ## `wallet` subsection N3 wallet configuration. diff --git a/pkg/services/object_manager/placement/metrics.go b/pkg/services/object_manager/placement/metrics.go index 45e6df339..0f24a9d96 100644 --- a/pkg/services/object_manager/placement/metrics.go +++ b/pkg/services/object_manager/placement/metrics.go @@ -2,24 +2,90 @@ package placement import ( "errors" + "fmt" + "maps" + "math" "strings" + "sync" + "sync/atomic" + locodedb "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db" + locodebolt "git.frostfs.info/TrueCloudLab/frostfs-locode-db/pkg/locode/db/boltdb" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) const ( attrPrefix = "$attribute:" + + geoDistance = "$geoDistance" ) type Metric interface { CalculateValue(*netmap.NodeInfo, *netmap.NodeInfo) int } -func ParseMetric(raw string) (Metric, error) { - if attr, found := strings.CutPrefix(raw, attrPrefix); found { - return NewAttributeMetric(attr), nil +type metricsParser struct { + locodeDBPath string + locodes map[string]locodedb.Point +} + +type MetricParser interface { + ParseMetrics([]string) ([]Metric, error) +} + +func NewMetricsParser(locodeDBPath string) (MetricParser, error) { + return &metricsParser{ + locodeDBPath: locodeDBPath, + }, nil +} + +func (p *metricsParser) initLocodes() error { + if len(p.locodes) != 0 { + return nil } - return nil, errors.New("unsupported priority metric") + if len(p.locodeDBPath) > 0 { + p.locodes = make(map[string]locodedb.Point) + locodeDB := locodebolt.New(locodebolt.Prm{ + Path: p.locodeDBPath, + }, + locodebolt.ReadOnly(), + ) + err := locodeDB.Open() + if err != nil { + return err + } + defer locodeDB.Close() + err = locodeDB.IterateOverLocodes(func(k string, v locodedb.Point) { + p.locodes[k] = v + }) + if err != nil { + return err + } + return nil + } + return errors.New("set path to locode database") +} + +func (p *metricsParser) ParseMetrics(priority []string) ([]Metric, error) { + var metrics []Metric + for _, raw := range priority { + if attr, found := strings.CutPrefix(raw, attrPrefix); found { + metrics = append(metrics, NewAttributeMetric(attr)) + } else if raw == geoDistance { + err := p.initLocodes() + if err != nil { + return nil, err + } + if len(p.locodes) == 0 { + return nil, fmt.Errorf("provide locodes database for metric %s", raw) + } + m := NewGeoDistanceMetric(p.locodes) + metrics = append(metrics, m) + } else { + return nil, fmt.Errorf("unsupported priority metric %s", raw) + } + } + return metrics, nil } // attributeMetric describes priority metric based on attribute. @@ -41,3 +107,79 @@ func (am *attributeMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.Node func NewAttributeMetric(attr string) Metric { return &attributeMetric{attribute: attr} } + +// geoDistanceMetric describes priority metric based on attribute. +type geoDistanceMetric struct { + locodes map[string]locodedb.Point + distance *atomic.Pointer[map[string]int] + mtx sync.Mutex +} + +func NewGeoDistanceMetric(locodes map[string]locodedb.Point) Metric { + d := atomic.Pointer[map[string]int]{} + m := make(map[string]int) + d.Store(&m) + gm := &geoDistanceMetric{ + locodes: locodes, + distance: &d, + } + return gm +} + +// CalculateValue return distance in kilometers between current node and provided, +// if coordinates for provided node found. In other case return math.MaxInt. +func (gm *geoDistanceMetric) CalculateValue(from *netmap.NodeInfo, to *netmap.NodeInfo) int { + fl := from.LOCODE() + tl := to.LOCODE() + if fl == tl { + return 0 + } + m := gm.distance.Load() + if v, ok := (*m)[fl+tl]; ok { + return v + } + return gm.calculateDistance(fl, tl) +} + +func (gm *geoDistanceMetric) calculateDistance(from, to string) int { + gm.mtx.Lock() + defer gm.mtx.Unlock() + od := gm.distance.Load() + if v, ok := (*od)[from+to]; ok { + return v + } + nd := maps.Clone(*od) + var dist int + pointFrom, okFrom := gm.locodes[from] + pointTo, okTo := gm.locodes[to] + if okFrom && okTo { + dist = int(distance(pointFrom.Latitude(), pointFrom.Longitude(), pointTo.Latitude(), pointTo.Longitude())) + } else { + dist = math.MaxInt + } + nd[from+to] = dist + gm.distance.Store(&nd) + + return dist +} + +// distance return amount of KM between two points. +// Parameters are latitude and longitude of point 1 and 2 in decimal degrees. +// Original implementation can be found here https://www.geodatasource.com/developers/go. +func distance(lt1 float64, ln1 float64, lt2 float64, ln2 float64) float64 { + radLat1 := math.Pi * lt1 / 180 + radLat2 := math.Pi * lt2 / 180 + radTheta := math.Pi * (ln1 - ln2) / 180 + + dist := math.Sin(radLat1)*math.Sin(radLat2) + math.Cos(radLat1)*math.Cos(radLat2)*math.Cos(radTheta) + + if dist > 1 { + dist = 1 + } + + dist = math.Acos(dist) + dist = dist * 180 / math.Pi + dist = dist * 60 * 1.1515 * 1.609344 + + return dist +} diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go index 9c825bf19..d1370f21e 100644 --- a/pkg/services/object_manager/placement/traverser_test.go +++ b/pkg/services/object_manager/placement/traverser_test.go @@ -601,4 +601,53 @@ func TestTraverserPriorityMetrics(t *testing.T) { next = tr.Next() require.Nil(t, next) }) + + t.Run("one rep one geo metric", func(t *testing.T) { + t.Skip() + selectors := []int{2} + replicas := []int{2} + + nodes, cnr := testPlacement(selectors, replicas) + + // Node_0, PK - ip4/0.0.0.0/tcp/0 + nodes[0][0].SetAttribute("UN-LOCODE", "RU MOW") + // Node_1, PK - ip4/0.0.0.0/tcp/1 + nodes[0][1].SetAttribute("UN-LOCODE", "RU LED") + + sdkNode := testNode(2) + sdkNode.SetAttribute("UN-LOCODE", "FI HEL") + + nodesCopy := copyVectors(nodes) + + parser, err := NewMetricsParser("/path/to/locode_db") + require.NoError(t, err) + m, err := parser.ParseMetrics([]string{geoDistance}) + require.NoError(t, err) + + tr, err := NewTraverser(context.Background(), + ForContainer(cnr), + UseBuilder(&testBuilder{ + vectors: nodesCopy, + }), + WithoutSuccessTracking(), + WithPriorityMetrics(m), + WithNodeState(&nodeState{ + node: &sdkNode, + }), + ) + require.NoError(t, err) + + // Without priority metric `$geoDistance` the order will be: + // [ {Node_0 RU MOW}, {Node_1 RU LED}] + // With priority metric `$geoDistance` the order should be: + // [ {Node_1 RU LED}, {Node_0 RU MOW}] + next := tr.Next() + require.NotNil(t, next) + require.Equal(t, 2, len(next)) + require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey())) + require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey())) + + next = tr.Next() + require.Nil(t, next) + }) } From bf06c4fb4b935410a4ea9b610f4e12f2fe28cb2a Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 4 Apr 2025 20:22:05 +0300 Subject: [PATCH 549/591] [#1689] Remove deprecated NodeInfo.IterateNetworkEndpoints() Change-Id: Ic78f18aed11fab34ee3147ceea657296b89fe60c Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-cli/modules/netmap/nodeinfo.go | 4 +- cmd/frostfs-cli/modules/object/nodes.go | 11 ++---- cmd/frostfs-node/cache.go | 22 ++--------- cmd/frostfs-node/netmap.go | 6 ++- cmd/internal/common/netmap.go | 4 +- pkg/core/netmap/nodes.go | 6 ++- pkg/network/validation.go | 6 ++- pkg/services/replicator/pull.go | 7 +--- pkg/services/tree/redirect.go | 41 +++++++++++-------- pkg/services/tree/replicator.go | 46 ++++++++++++---------- pkg/services/tree/sync.go | 12 +++--- 11 files changed, 83 insertions(+), 82 deletions(-) diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go index ae4bb329a..316d18d2b 100644 --- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go +++ b/cmd/frostfs-cli/modules/netmap/nodeinfo.go @@ -62,9 +62,9 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) { cmd.Println("state:", stateWord) - netmap.IterateNetworkEndpoints(i, func(s string) { + for s := range i.NetworkEndpoints() { cmd.Println("address:", s) - }) + } i.IterateAttributes(func(key, value string) { cmd.Printf("attribute: %s=%s\n", key, value) diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index 734b557a4..476238651 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -7,6 +7,7 @@ import ( "encoding/json" "errors" "fmt" + "slices" "sync" internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client" @@ -460,17 +461,11 @@ func createClient(ctx context.Context, cmd *cobra.Command, candidate netmapSDK.N var cli *client.Client var addresses []string if preferInternal, _ := cmd.Flags().GetBool(preferInternalAddressesFlag); preferInternal { - candidate.IterateNetworkEndpoints(func(s string) bool { - addresses = append(addresses, s) - return false - }) + addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) addresses = append(addresses, candidate.ExternalAddresses()...) } else { addresses = append(addresses, candidate.ExternalAddresses()...) - candidate.IterateNetworkEndpoints(func(s string) bool { - addresses = append(addresses, s) - return false - }) + addresses = slices.AppendSeq(addresses, candidate.NetworkEndpoints()) } var lastErr error diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go index 38cee5837..e5df0a22d 100644 --- a/cmd/frostfs-node/cache.go +++ b/cmd/frostfs-node/cache.go @@ -235,16 +235,8 @@ func (s *lruNetmapSource) updateCandidates(ctx context.Context, d time.Duration) slices.Compare(n1.ExternalAddresses(), n2.ExternalAddresses()) != 0 { return 1 } - var ne1 []string - n1.IterateNetworkEndpoints(func(s string) bool { - ne1 = append(ne1, s) - return false - }) - var ne2 []string - n2.IterateNetworkEndpoints(func(s string) bool { - ne2 = append(ne2, s) - return false - }) + ne1 := slices.Collect(n1.NetworkEndpoints()) + ne2 := slices.Collect(n2.NetworkEndpoints()) return slices.Compare(ne1, ne2) }) if ret != 0 { @@ -364,15 +356,9 @@ func getNetMapNodesToUpdate(nm *netmapSDK.NetMap, candidates []netmapSDK.NodeInf } nodeEndpoints := make([]string, 0, nm.Nodes()[i].NumberOfNetworkEndpoints()) - nm.Nodes()[i].IterateNetworkEndpoints(func(s string) bool { - nodeEndpoints = append(nodeEndpoints, s) - return false - }) + nodeEndpoints = slices.AppendSeq(nodeEndpoints, nm.Nodes()[i].NetworkEndpoints()) candidateEndpoints := make([]string, 0, cnd.NumberOfNetworkEndpoints()) - cnd.IterateNetworkEndpoints(func(s string) bool { - candidateEndpoints = append(candidateEndpoints, s) - return false - }) + candidateEndpoints = slices.AppendSeq(candidateEndpoints, cnd.NetworkEndpoints()) if slices.Compare(nodeEndpoints, candidateEndpoints) != 0 { update = true tmp.endpoints = candidateEndpoints diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 0e90e7707..6d57edcce 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -124,7 +124,11 @@ func nodeKeyFromNetmap(c *cfg) []byte { func (c *cfg) iterateNetworkAddresses(f func(string) bool) { ni, ok := c.cfgNetmap.state.getNodeInfo() if ok { - ni.IterateNetworkEndpoints(f) + for s := range ni.NetworkEndpoints() { + if f(s) { + return + } + } } } diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go index f550552d2..334b662f8 100644 --- a/cmd/internal/common/netmap.go +++ b/cmd/internal/common/netmap.go @@ -27,9 +27,9 @@ func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo, cmd.Printf("%sNode %d: %s %s ", indent, index+1, hex.EncodeToString(node.PublicKey()), strState) - netmap.IterateNetworkEndpoints(node, func(endpoint string) { + for endpoint := range node.NetworkEndpoints() { cmd.Printf("%s ", endpoint) - }) + } cmd.Println() if !short { diff --git a/pkg/core/netmap/nodes.go b/pkg/core/netmap/nodes.go index b0c9e1f9e..f01c07b19 100644 --- a/pkg/core/netmap/nodes.go +++ b/pkg/core/netmap/nodes.go @@ -17,7 +17,11 @@ func (x Node) PublicKey() []byte { // IterateAddresses iterates over all announced network addresses // and passes them into f. Handler MUST NOT be nil. func (x Node) IterateAddresses(f func(string) bool) { - (netmap.NodeInfo)(x).IterateNetworkEndpoints(f) + for s := range (netmap.NodeInfo)(x).NetworkEndpoints() { + if f(s) { + return + } + } } // NumberOfAddresses returns number of announced network addresses. diff --git a/pkg/network/validation.go b/pkg/network/validation.go index 92f650119..73a3ef8d7 100644 --- a/pkg/network/validation.go +++ b/pkg/network/validation.go @@ -35,7 +35,11 @@ var ( type NodeEndpointsIterator netmap.NodeInfo func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) { - (netmap.NodeInfo)(x).IterateNetworkEndpoints(f) + for s := range (netmap.NodeInfo)(x).NetworkEndpoints() { + if f(s) { + return + } + } } func (x NodeEndpointsIterator) NumberOfAddresses() int { diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go index bb38c72ad..216fe4919 100644 --- a/pkg/services/replicator/pull.go +++ b/pkg/services/replicator/pull.go @@ -3,6 +3,7 @@ package replicator import ( "context" "errors" + "slices" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" @@ -42,11 +43,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) { if err == nil { break } - var endpoints []string - node.IterateNetworkEndpoints(func(s string) bool { - endpoints = append(endpoints, s) - return false - }) + endpoints := slices.Collect(node.NetworkEndpoints()) p.log.Error(ctx, logs.ReplicatorCouldNotGetObjectFromRemoteStorage, zap.Stringer("object", task.Addr), zap.Error(err), diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go index d92c749a8..3dcdc4fc7 100644 --- a/pkg/services/tree/redirect.go +++ b/pkg/services/tree/redirect.go @@ -41,24 +41,15 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo var called bool for _, n := range cntNodes { var stop bool - n.IterateNetworkEndpoints(func(endpoint string) bool { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", - trace.WithAttributes( - attribute.String("endpoint", endpoint), - )) - defer span.End() - - c, err := s.cache.get(ctx, endpoint) - if err != nil { - return false + for endpoint := range n.NetworkEndpoints() { + stop = s.execOnClient(ctx, endpoint, func(c TreeServiceClient) bool { + called = true + return f(c) + }) + if called { + break } - - s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) - - called = true - stop = f(c) - return true - }) + } if stop { return nil } @@ -68,3 +59,19 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo } return nil } + +func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(TreeServiceClient) bool) bool { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", + trace.WithAttributes( + attribute.String("endpoint", endpoint), + )) + defer span.End() + + c, err := s.cache.get(ctx, endpoint) + if err != nil { + return false + } + + s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) + return f(c) +} diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index 01a4ffde0..ee40884eb 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -89,29 +89,13 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req var lastErr error var lastAddr string - n.IterateNetworkEndpoints(func(addr string) bool { - ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", - trace.WithAttributes( - attribute.String("public_key", hex.EncodeToString(n.PublicKey())), - attribute.String("address", addr), - ), - ) - defer span.End() - + for addr := range n.NetworkEndpoints() { lastAddr = addr - - c, err := s.cache.get(ctx, addr) - if err != nil { - lastErr = fmt.Errorf("can't create client: %w", err) - return false + lastErr = s.apply(ctx, n, addr, req) + if lastErr == nil { + break } - - ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout) - _, lastErr = c.Apply(ctx, req) - cancel() - - return lastErr == nil - }) + } if lastErr != nil { if errors.Is(lastErr, errRecentlyFailed) { @@ -130,6 +114,26 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req return nil } +func (s *Service) apply(ctx context.Context, n netmapSDK.NodeInfo, addr string, req *ApplyRequest) error { + ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.HandleReplicationTaskOnEndpoint", + trace.WithAttributes( + attribute.String("public_key", hex.EncodeToString(n.PublicKey())), + attribute.String("address", addr), + ), + ) + defer span.End() + + c, err := s.cache.get(ctx, addr) + if err != nil { + return fmt.Errorf("can't create client: %w", err) + } + + ctx, cancel := context.WithTimeout(ctx, s.replicatorTimeout) + _, err = c.Apply(ctx, req) + cancel() + return err +} + func (s *Service) replicateLoop(ctx context.Context) { for range s.replicatorWorkerCount { go s.replicationWorker(ctx) diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index c3796fbd4..32297f9d7 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -297,27 +297,27 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, for i, n := range nodes { errGroup.Go(func() error { var nodeSynced bool - n.IterateNetworkEndpoints(func(addr string) bool { + for addr := range n.NetworkEndpoints() { var a network.Address if err := a.FromString(addr); err != nil { s.log.Warn(ctx, logs.TreeFailedToParseAddressForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - return false + continue } cc, err := createConnection(a, grpc.WithContextDialer(s.ds.GrpcContextDialer())) if err != nil { s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr)) - return false + continue } - defer cc.Close() err = s.startStream(egCtx, cid, treeID, from, cc, nodeOperationStreams[i]) if err != nil { s.log.Warn(ctx, logs.TreeFailedToRunTreeSynchronizationForSpecificNode, zap.Error(err), zap.String("address", addr)) } nodeSynced = err == nil - return true - }) + _ = cc.Close() + break + } close(nodeOperationStreams[i]) if !nodeSynced { allNodesSynced.Store(false) From e65d578ba93f18c5d4276300fe660f01941f630c Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 4 Apr 2025 20:24:46 +0300 Subject: [PATCH 550/591] [#1689] Remove deprecated NodeInfo.IterateAttributes() Change-Id: Ibd07302079efe148903aa6177759232a28616736 Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-cli/modules/container/policy_playground.go | 4 ++-- cmd/frostfs-cli/modules/netmap/nodeinfo.go | 4 ++-- cmd/internal/common/netmap.go | 4 ++-- pkg/util/attributes/parser_test.go | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/frostfs-cli/modules/container/policy_playground.go b/cmd/frostfs-cli/modules/container/policy_playground.go index 2cc1107ef..cf4862b4a 100644 --- a/cmd/frostfs-cli/modules/container/policy_playground.go +++ b/cmd/frostfs-cli/modules/container/policy_playground.go @@ -40,9 +40,9 @@ func (repl *policyPlaygroundREPL) handleLs(args []string) error { i := 1 for id, node := range repl.nodes { var attrs []string - node.IterateAttributes(func(k, v string) { + for k, v := range node.Attributes() { attrs = append(attrs, fmt.Sprintf("%s:%q", k, v)) - }) + } fmt.Fprintf(repl.console, "\t%2d: id=%s attrs={%v}\n", i, id, strings.Join(attrs, " ")) i++ } diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go index 316d18d2b..5da66dcd9 100644 --- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go +++ b/cmd/frostfs-cli/modules/netmap/nodeinfo.go @@ -66,7 +66,7 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) { cmd.Println("address:", s) } - i.IterateAttributes(func(key, value string) { + for key, value := range i.Attributes() { cmd.Printf("attribute: %s=%s\n", key, value) - }) + } } diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go index 334b662f8..5dd1a060e 100644 --- a/cmd/internal/common/netmap.go +++ b/cmd/internal/common/netmap.go @@ -33,9 +33,9 @@ func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo, cmd.Println() if !short { - node.IterateAttributes(func(key, value string) { + for key, value := range node.Attributes() { cmd.Printf("%s\t%s: %s\n", indent, key, value) - }) + } } } diff --git a/pkg/util/attributes/parser_test.go b/pkg/util/attributes/parser_test.go index 547c8d50b..66581878a 100644 --- a/pkg/util/attributes/parser_test.go +++ b/pkg/util/attributes/parser_test.go @@ -23,12 +23,12 @@ func testAttributeMap(t *testing.T, mSrc, mExp map[string]string) { mExp = mSrc } - node.IterateAttributes(func(key, value string) { + for key, value := range node.Attributes() { v, ok := mExp[key] require.True(t, ok) require.Equal(t, value, v) delete(mExp, key) - }) + } require.Empty(t, mExp) } From 2d1232ce6de2f9291528c1c3e325bb8a0683de10 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Fri, 4 Apr 2025 20:31:35 +0300 Subject: [PATCH 551/591] [#1689] network,core/netmap: Replace Iterate*() functions with iterators Change-Id: I4842a3160d74c56d99ea9465d4be2f0662080605 Signed-off-by: Evgenii Stratonikov --- cmd/frostfs-node/config/node/config.go | 10 ++++------ pkg/core/client/util.go | 3 ++- pkg/core/netmap/nodes.go | 12 +++++++++++- pkg/network/group.go | 18 +++++++++--------- pkg/network/group_test.go | 8 ++++---- pkg/network/validation.go | 9 +++------ 6 files changed, 33 insertions(+), 27 deletions(-) diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go index 18aa254f1..81b191e96 100644 --- a/cmd/frostfs-node/config/node/config.go +++ b/cmd/frostfs-node/config/node/config.go @@ -3,7 +3,9 @@ package nodeconfig import ( "fmt" "io/fs" + "iter" "os" + "slices" "strconv" "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config" @@ -88,12 +90,8 @@ func Wallet(c *config.Config) *keys.PrivateKey { type stringAddressGroup []string -func (x stringAddressGroup) IterateAddresses(f func(string) bool) { - for i := range x { - if f(x[i]) { - break - } - } +func (x stringAddressGroup) Addresses() iter.Seq[string] { + return slices.Values(x) } func (x stringAddressGroup) NumberOfAddresses() int { diff --git a/pkg/core/client/util.go b/pkg/core/client/util.go index d4bc0cf68..91ee5c6c3 100644 --- a/pkg/core/client/util.go +++ b/pkg/core/client/util.go @@ -3,6 +3,7 @@ package client import ( "bytes" "fmt" + "iter" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -19,7 +20,7 @@ func nodeInfoFromKeyAddr(dst *NodeInfo, k []byte, a, external network.AddressGro // Args must not be nil. func NodeInfoFromRawNetmapElement(dst *NodeInfo, info interface { PublicKey() []byte - IterateAddresses(func(string) bool) + Addresses() iter.Seq[string] NumberOfAddresses() int ExternalAddresses() []string }, diff --git a/pkg/core/netmap/nodes.go b/pkg/core/netmap/nodes.go index f01c07b19..e58e42634 100644 --- a/pkg/core/netmap/nodes.go +++ b/pkg/core/netmap/nodes.go @@ -1,6 +1,10 @@ package netmap -import "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +import ( + "iter" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" +) // Node is a named type of netmap.NodeInfo which provides interface needed // in the current repository. Node is expected to be used everywhere instead @@ -14,8 +18,14 @@ func (x Node) PublicKey() []byte { return (netmap.NodeInfo)(x).PublicKey() } +// Addresses returns an iterator over all announced network addresses. +func (x Node) Addresses() iter.Seq[string] { + return (netmap.NodeInfo)(x).NetworkEndpoints() +} + // IterateAddresses iterates over all announced network addresses // and passes them into f. Handler MUST NOT be nil. +// Deprecated: use [Node.Addresses] instead. func (x Node) IterateAddresses(f func(string) bool) { for s := range (netmap.NodeInfo)(x).NetworkEndpoints() { if f(s) { diff --git a/pkg/network/group.go b/pkg/network/group.go index 5a71e530e..0044fb2d4 100644 --- a/pkg/network/group.go +++ b/pkg/network/group.go @@ -3,6 +3,7 @@ package network import ( "errors" "fmt" + "iter" "slices" "sort" @@ -68,9 +69,8 @@ func (x AddressGroup) Swap(i, j int) { // MultiAddressIterator is an interface of network address group. type MultiAddressIterator interface { - // IterateAddresses must iterate over network addresses and pass each one - // to the handler until it returns true. - IterateAddresses(func(string) bool) + // Addresses must return an iterator over network addresses. + Addresses() iter.Seq[string] // NumberOfAddresses must return number of addresses in group. NumberOfAddresses() int @@ -131,19 +131,19 @@ func (x *AddressGroup) FromIterator(iter MultiAddressIterator) error { // iterateParsedAddresses parses each address from MultiAddressIterator and passes it to f // until 1st parsing failure or f's error. func iterateParsedAddresses(iter MultiAddressIterator, f func(s Address) error) (err error) { - iter.IterateAddresses(func(s string) bool { + for s := range iter.Addresses() { var a Address err = a.FromString(s) if err != nil { - err = fmt.Errorf("could not parse address from string: %w", err) - return true + return fmt.Errorf("could not parse address from string: %w", err) } err = f(a) - - return err != nil - }) + if err != nil { + return err + } + } return } diff --git a/pkg/network/group_test.go b/pkg/network/group_test.go index 5b335fa52..d08264533 100644 --- a/pkg/network/group_test.go +++ b/pkg/network/group_test.go @@ -1,6 +1,8 @@ package network import ( + "iter" + "slices" "sort" "testing" @@ -58,10 +60,8 @@ func TestAddressGroup_FromIterator(t *testing.T) { type testIterator []string -func (t testIterator) IterateAddresses(f func(string) bool) { - for i := range t { - f(t[i]) - } +func (t testIterator) Addresses() iter.Seq[string] { + return slices.Values(t) } func (t testIterator) NumberOfAddresses() int { diff --git a/pkg/network/validation.go b/pkg/network/validation.go index 73a3ef8d7..b5157f28f 100644 --- a/pkg/network/validation.go +++ b/pkg/network/validation.go @@ -2,6 +2,7 @@ package network import ( "errors" + "iter" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" ) @@ -34,12 +35,8 @@ var ( // MultiAddressIterator. type NodeEndpointsIterator netmap.NodeInfo -func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) { - for s := range (netmap.NodeInfo)(x).NetworkEndpoints() { - if f(s) { - return - } - } +func (x NodeEndpointsIterator) Addresses() iter.Seq[string] { + return (netmap.NodeInfo)(x).NetworkEndpoints() } func (x NodeEndpointsIterator) NumberOfAddresses() int { From 98308d0cad312c34f2575d05651405de58c5b2f3 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 14 Apr 2025 15:39:56 +0300 Subject: [PATCH 552/591] [#1715] blobstor: Allow to specify custom compression level Change-Id: I140c39b9dceaaeb58767061b131777af22242b19 Signed-off-by: Dmitrii Stepanov --- .../maintenance/zombie/storage_engine.go | 2 ++ cmd/frostfs-node/config.go | 4 +++ cmd/frostfs-node/config/engine/config_test.go | 2 ++ .../config/engine/shard/config.go | 10 ++++++ config/example/node.env | 1 + config/example/node.json | 1 + config/example/node.yaml | 5 +-- docs/storage-node-configuration.md | 3 +- internal/assert/cond.go | 4 +++ internal/logs/logs.go | 1 + pkg/local_object_storage/blobstor/blobstor.go | 6 ++++ .../blobstor/compression/compress.go | 34 ++++++++++++++++++- pkg/local_object_storage/blobstor/control.go | 5 +++ 13 files changed, 74 insertions(+), 4 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go index 5851e049c..fe75a6f6f 100644 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go @@ -13,6 +13,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -135,6 +136,7 @@ func getMetabaseOpts(sh *shardconfig.Config) []meta.Option { func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option { result := []blobstor.Option{ blobstor.WithCompressObjects(sh.Compress()), + blobstor.WithCompressionLevel(compression.Level(sh.CompressionLevel())), blobstor.WithUncompressableContentTypes(sh.UncompressableContentTypes()), blobstor.WithCompressibilityEstimate(sh.EstimateCompressibility()), blobstor.WithCompressibilityEstimateThreshold(sh.EstimateCompressibilityThreshold()), diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 8ceef2c31..f32953c58 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -40,6 +40,7 @@ import ( netmapCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -129,6 +130,7 @@ type applicationConfiguration struct { type shardCfg struct { compress bool + compressionLevel compression.Level estimateCompressibility bool estimateCompressibilityThreshold float64 @@ -273,6 +275,7 @@ func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *s target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount() target.mode = source.Mode() target.compress = source.Compress() + target.compressionLevel = compression.Level(source.CompressionLevel()) target.estimateCompressibility = source.EstimateCompressibility() target.estimateCompressibilityThreshold = source.EstimateCompressibilityThreshold() target.uncompressableContentType = source.UncompressableContentTypes() @@ -1027,6 +1030,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID blobstoreOpts := []blobstor.Option{ blobstor.WithCompressObjects(shCfg.compress), + blobstor.WithCompressionLevel(shCfg.compressionLevel), blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType), blobstor.WithCompressibilityEstimate(shCfg.estimateCompressibility), blobstor.WithCompressibilityEstimateThreshold(shCfg.estimateCompressibilityThreshold), diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go index 34613ad9e..afadb4c99 100644 --- a/cmd/frostfs-node/config/engine/config_test.go +++ b/cmd/frostfs-node/config/engine/config_test.go @@ -101,6 +101,7 @@ func TestEngineSection(t *testing.T) { require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay()) require.Equal(t, true, sc.Compress()) + require.Equal(t, "fastest", sc.CompressionLevel()) require.Equal(t, []string{"audio/*", "video/*"}, sc.UncompressableContentTypes()) require.Equal(t, true, sc.EstimateCompressibility()) require.Equal(t, float64(0.7), sc.EstimateCompressibilityThreshold()) @@ -237,6 +238,7 @@ func TestEngineSection(t *testing.T) { require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay()) require.Equal(t, false, sc.Compress()) + require.Equal(t, "", sc.CompressionLevel()) require.Equal(t, []string(nil), sc.UncompressableContentTypes()) require.EqualValues(t, 102400, sc.SmallSizeLimit()) diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go index e50d56b95..14e91f01f 100644 --- a/cmd/frostfs-node/config/engine/shard/config.go +++ b/cmd/frostfs-node/config/engine/shard/config.go @@ -37,6 +37,16 @@ func (x *Config) Compress() bool { ) } +// CompressionLevel returns the value of "compression_level" config parameter. +// +// Returns empty string if the value is not a valid string. +func (x *Config) CompressionLevel() string { + return config.StringSafe( + (*config.Config)(x), + "compression_level", + ) +} + // UncompressableContentTypes returns the value of "compress_skip_content_types" config parameter. // // Returns nil if a the value is missing or is invalid. diff --git a/config/example/node.env b/config/example/node.env index b501d3836..fb4fc69dd 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -123,6 +123,7 @@ FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_SIZE=100 FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_DELAY=10ms ### Blobstor config FROSTFS_STORAGE_SHARD_0_COMPRESS=true +FROSTFS_STORAGE_SHARD_0_COMPRESSION_LEVEL=fastest FROSTFS_STORAGE_SHARD_0_COMPRESSION_EXCLUDE_CONTENT_TYPES="audio/* video/*" FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY=true FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY_THRESHOLD=0.7 diff --git a/config/example/node.json b/config/example/node.json index b02f43f60..1e9dd7835 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -184,6 +184,7 @@ "max_batch_delay": "10ms" }, "compress": true, + "compression_level": "fastest", "compression_exclude_content_types": [ "audio/*", "video/*" ], diff --git a/config/example/node.yaml b/config/example/node.yaml index ba32adb82..26c6e248c 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -160,7 +160,7 @@ storage: max_batch_delay: 5ms # maximum delay for a batch of operations to be executed max_batch_size: 100 # maximum amount of operations in a single batch - compress: false # turn on/off zstd(level 3) compression of stored objects + compress: false # turn on/off zstd compression of stored objects small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes blobstor: @@ -202,7 +202,8 @@ storage: max_batch_size: 100 max_batch_delay: 10ms - compress: true # turn on/off zstd(level 3) compression of stored objects + compress: true # turn on/off zstd compression of stored objects + compression_level: fastest compression_exclude_content_types: - audio/* - video/* diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 248b54ea4..19f6ee66d 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -188,6 +188,7 @@ The following table describes configuration for each shard. | Parameter | Type | Default value | Description | | ------------------------------------------------ | ------------------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `compress` | `bool` | `false` | Flag to enable compression. | +| `compression_level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. | | `compression_exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | | `compression_estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | | `compression_estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | @@ -199,7 +200,7 @@ The following table describes configuration for each shard. | `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | | `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | | `gc` | [GC config](#gc-subsection) | | GC configuration. | -| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. | +| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. | ### `blobstor` subsection diff --git a/internal/assert/cond.go b/internal/assert/cond.go index c6a034f94..113d2eba9 100644 --- a/internal/assert/cond.go +++ b/internal/assert/cond.go @@ -23,3 +23,7 @@ func NoError(err error, details ...string) { panic(content) } } + +func Fail(details ...string) { + panic(strings.Join(details, " ")) +} diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 3a3ceb150..dedc7e12c 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -516,4 +516,5 @@ const ( FailedToValidateIncomingIOTag = "failed to validate incoming IO tag, replaced with `client`" WriteCacheFailedToAcquireRPSQuota = "writecache failed to acquire RPS quota to flush object" FailedToUpdateNetmapCandidates = "update netmap candidates failed" + UnknownCompressionLevelDefaultWillBeUsed = "unknown compression level, 'optimal' will be used" ) diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go index d7c333349..7a9568aff 100644 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ b/pkg/local_object_storage/blobstor/blobstor.go @@ -109,6 +109,12 @@ func WithCompressObjects(comp bool) Option { } } +func WithCompressionLevel(level compression.Level) Option { + return func(c *cfg) { + c.compression.Level = level + } +} + // WithCompressibilityEstimate returns an option to use // normilized compressibility estimate to decide compress // data or not. diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go index 85ab47692..8a86b6982 100644 --- a/pkg/local_object_storage/blobstor/compression/compress.go +++ b/pkg/local_object_storage/blobstor/compression/compress.go @@ -4,15 +4,26 @@ import ( "bytes" "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/klauspost/compress" "github.com/klauspost/compress/zstd" ) +type Level string + +const ( + LevelDefault Level = "" + LevelOptimal Level = "optimal" + LevelFastest Level = "fastest" + LevelSmallestSize Level = "smallest_size" +) + // Config represents common compression-related configuration. type Config struct { Enabled bool UncompressableContentTypes []string + Level Level UseCompressEstimation bool CompressEstimationThreshold float64 @@ -30,7 +41,7 @@ func (c *Config) Init() error { var err error if c.Enabled { - c.encoder, err = zstd.NewWriter(nil) + c.encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(c.compressionLevel())) if err != nil { return err } @@ -116,3 +127,24 @@ func (c *Config) Close() error { } return err } + +func (c *Config) HasValidCompressionLevel() bool { + return c.Level == LevelDefault || + c.Level == LevelOptimal || + c.Level == LevelFastest || + c.Level == LevelSmallestSize +} + +func (c *Config) compressionLevel() zstd.EncoderLevel { + switch c.Level { + case LevelDefault, LevelOptimal: + return zstd.SpeedDefault + case LevelFastest: + return zstd.SpeedFastest + case LevelSmallestSize: + return zstd.SpeedBestCompression + default: + assert.Fail("unknown compression level", string(c.Level)) + return zstd.SpeedDefault + } +} diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go index 93316be02..0418eedd0 100644 --- a/pkg/local_object_storage/blobstor/control.go +++ b/pkg/local_object_storage/blobstor/control.go @@ -6,6 +6,7 @@ import ( "fmt" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "go.uber.org/zap" ) @@ -53,6 +54,10 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag func (b *BlobStor) Init(ctx context.Context) error { b.log.Debug(ctx, logs.BlobstorInitializing) + if !b.compression.HasValidCompressionLevel() { + b.log.Warn(ctx, logs.UnknownCompressionLevelDefaultWillBeUsed, zap.String("level", string(b.compression.Level))) + b.compression.Level = compression.LevelDefault + } if err := b.compression.Init(); err != nil { return err } From 8c746a914ac917a16b0ab3098e70fda7e15dcbb0 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 14 Apr 2025 16:18:08 +0300 Subject: [PATCH 553/591] [#1715] compression: Decouple Config and Compressor Refactoring. Change-Id: Ide2e1378f30c39045d4bacd13a902331bd4f764f Signed-off-by: Dmitrii Stepanov --- .../blobstor/blobovniczatree/blobovnicza.go | 4 ++-- .../blobstor/blobovniczatree/option.go | 2 +- pkg/local_object_storage/blobstor/blobstor.go | 4 ++-- .../blobstor/common/storage.go | 4 ++-- .../blobstor/compression/bench_test.go | 16 +++++++++----- .../blobstor/compression/compress.go | 22 +++++++++++-------- .../blobstor/fstree/fstree.go | 18 +++++++-------- .../blobstor/memstore/control.go | 4 ++-- .../blobstor/memstore/option.go | 2 +- .../blobstor/teststore/option.go | 8 +++---- .../blobstor/teststore/teststore.go | 4 ++-- .../writecache/writecache.go | 2 +- 12 files changed, 49 insertions(+), 41 deletions(-) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go index d9e99d0d1..3e8b9f07b 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go @@ -158,11 +158,11 @@ func (b *Blobovniczas) Path() string { } // SetCompressor implements common.Storage. -func (b *Blobovniczas) SetCompressor(cc *compression.Config) { +func (b *Blobovniczas) SetCompressor(cc *compression.Compressor) { b.compression = cc } -func (b *Blobovniczas) Compressor() *compression.Config { +func (b *Blobovniczas) Compressor() *compression.Compressor { return b.compression } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go index 0e1b2022e..2f6d31b4e 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go @@ -19,7 +19,7 @@ type cfg struct { openedCacheSize int blzShallowDepth uint64 blzShallowWidth uint64 - compression *compression.Config + compression *compression.Compressor blzOpts []blobovnicza.Option reportError func(context.Context, string, error) // reportError is the function called when encountering disk errors. metrics Metrics diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go index 7a9568aff..cf67c6fe9 100644 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ b/pkg/local_object_storage/blobstor/blobstor.go @@ -41,7 +41,7 @@ type SubStorageInfo struct { type Option func(*cfg) type cfg struct { - compression compression.Config + compression compression.Compressor log *logger.Logger storage []SubStorage metrics Metrics @@ -158,6 +158,6 @@ func WithMetrics(m Metrics) Option { } } -func (b *BlobStor) Compressor() *compression.Config { +func (b *BlobStor) Compressor() *compression.Compressor { return &b.compression } diff --git a/pkg/local_object_storage/blobstor/common/storage.go b/pkg/local_object_storage/blobstor/common/storage.go index 6ecef48cd..e35c35e60 100644 --- a/pkg/local_object_storage/blobstor/common/storage.go +++ b/pkg/local_object_storage/blobstor/common/storage.go @@ -18,8 +18,8 @@ type Storage interface { Path() string ObjectsCount(ctx context.Context) (uint64, error) - SetCompressor(cc *compression.Config) - Compressor() *compression.Config + SetCompressor(cc *compression.Compressor) + Compressor() *compression.Compressor // SetReportErrorFunc allows to provide a function to be called on disk errors. // This function MUST be called before Open. diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go index 9f70f8ec2..445a0494b 100644 --- a/pkg/local_object_storage/blobstor/compression/bench_test.go +++ b/pkg/local_object_storage/blobstor/compression/bench_test.go @@ -11,7 +11,7 @@ import ( ) func BenchmarkCompression(b *testing.B) { - c := Config{Enabled: true} + c := Compressor{Config: Config{Enabled: true}} require.NoError(b, c.Init()) for _, size := range []int{128, 1024, 32 * 1024, 32 * 1024 * 1024} { @@ -33,7 +33,7 @@ func BenchmarkCompression(b *testing.B) { } } -func benchWith(b *testing.B, c Config, data []byte) { +func benchWith(b *testing.B, c Compressor, data []byte) { b.ResetTimer() b.ReportAllocs() for range b.N { @@ -56,8 +56,10 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) { b.Run("estimate", func(b *testing.B) { b.ResetTimer() - c := &Config{ - Enabled: true, + c := &Compressor{ + Config: Config{ + Enabled: true, + }, } require.NoError(b, c.Init()) @@ -76,8 +78,10 @@ func BenchmarkCompressionRealVSEstimate(b *testing.B) { b.Run("compress", func(b *testing.B) { b.ResetTimer() - c := &Config{ - Enabled: true, + c := &Compressor{ + Config: Config{ + Enabled: true, + }, } require.NoError(b, c.Init()) diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go index 8a86b6982..efe84ea2a 100644 --- a/pkg/local_object_storage/blobstor/compression/compress.go +++ b/pkg/local_object_storage/blobstor/compression/compress.go @@ -19,6 +19,13 @@ const ( LevelSmallestSize Level = "smallest_size" ) +type Compressor struct { + Config + + encoder *zstd.Encoder + decoder *zstd.Decoder +} + // Config represents common compression-related configuration. type Config struct { Enabled bool @@ -27,9 +34,6 @@ type Config struct { UseCompressEstimation bool CompressEstimationThreshold float64 - - encoder *zstd.Encoder - decoder *zstd.Decoder } // zstdFrameMagic contains first 4 bytes of any compressed object @@ -37,7 +41,7 @@ type Config struct { var zstdFrameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} // Init initializes compression routines. -func (c *Config) Init() error { +func (c *Compressor) Init() error { var err error if c.Enabled { @@ -84,7 +88,7 @@ func (c *Config) NeedsCompression(obj *objectSDK.Object) bool { // Decompress decompresses data if it starts with the magic // and returns data untouched otherwise. -func (c *Config) Decompress(data []byte) ([]byte, error) { +func (c *Compressor) Decompress(data []byte) ([]byte, error) { if len(data) < 4 || !bytes.Equal(data[:4], zstdFrameMagic) { return data, nil } @@ -93,7 +97,7 @@ func (c *Config) Decompress(data []byte) ([]byte, error) { // Compress compresses data if compression is enabled // and returns data untouched otherwise. -func (c *Config) Compress(data []byte) []byte { +func (c *Compressor) Compress(data []byte) []byte { if c == nil || !c.Enabled { return data } @@ -107,7 +111,7 @@ func (c *Config) Compress(data []byte) []byte { return c.compress(data) } -func (c *Config) compress(data []byte) []byte { +func (c *Compressor) compress(data []byte) []byte { maxSize := c.encoder.MaxEncodedSize(len(data)) compressed := c.encoder.EncodeAll(data, make([]byte, 0, maxSize)) if len(data) < len(compressed) { @@ -117,7 +121,7 @@ func (c *Config) compress(data []byte) []byte { } // Close closes encoder and decoder, returns any error occurred. -func (c *Config) Close() error { +func (c *Compressor) Close() error { var err error if c.encoder != nil { err = c.encoder.Close() @@ -135,7 +139,7 @@ func (c *Config) HasValidCompressionLevel() bool { c.Level == LevelSmallestSize } -func (c *Config) compressionLevel() zstd.EncoderLevel { +func (c *Compressor) compressionLevel() zstd.EncoderLevel { switch c.Level { case LevelDefault, LevelOptimal: return zstd.SpeedDefault diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go index 031b385b2..112741ab4 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree.go @@ -45,7 +45,7 @@ type FSTree struct { log *logger.Logger - *compression.Config + compressor *compression.Compressor Depth uint64 DirNameLen int @@ -82,7 +82,7 @@ func New(opts ...Option) *FSTree { Permissions: 0o700, RootPath: "./", }, - Config: nil, + compressor: nil, Depth: 4, DirNameLen: DirNameLen, metrics: &noopMetrics{}, @@ -196,7 +196,7 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr } if err == nil { - data, err = t.Decompress(data) + data, err = t.compressor.Decompress(data) } if err != nil { if prm.IgnoreErrors { @@ -405,7 +405,7 @@ func (t *FSTree) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, err return common.PutRes{}, err } if !prm.DontCompress { - prm.RawData = t.Compress(prm.RawData) + prm.RawData = t.compressor.Compress(prm.RawData) } size = len(prm.RawData) @@ -448,7 +448,7 @@ func (t *FSTree) Get(ctx context.Context, prm common.GetPrm) (common.GetRes, err } } - data, err = t.Decompress(data) + data, err = t.compressor.Decompress(data) if err != nil { return common.GetRes{}, err } @@ -597,12 +597,12 @@ func (t *FSTree) Path() string { } // SetCompressor implements common.Storage. -func (t *FSTree) SetCompressor(cc *compression.Config) { - t.Config = cc +func (t *FSTree) SetCompressor(cc *compression.Compressor) { + t.compressor = cc } -func (t *FSTree) Compressor() *compression.Config { - return t.Config +func (t *FSTree) Compressor() *compression.Compressor { + return t.compressor } // SetReportErrorFunc implements common.Storage. diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go index 95a916662..3df96a1c3 100644 --- a/pkg/local_object_storage/blobstor/memstore/control.go +++ b/pkg/local_object_storage/blobstor/memstore/control.go @@ -16,7 +16,7 @@ func (s *memstoreImpl) Init() error func (s *memstoreImpl) Close(context.Context) error { return nil } func (s *memstoreImpl) Type() string { return Type } func (s *memstoreImpl) Path() string { return s.rootPath } -func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc } -func (s *memstoreImpl) Compressor() *compression.Config { return s.compression } +func (s *memstoreImpl) SetCompressor(cc *compression.Compressor) { s.compression = cc } +func (s *memstoreImpl) Compressor() *compression.Compressor { return s.compression } func (s *memstoreImpl) SetReportErrorFunc(func(context.Context, string, error)) {} func (s *memstoreImpl) SetParentID(string) {} diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go index 97a03993d..7605af4e5 100644 --- a/pkg/local_object_storage/blobstor/memstore/option.go +++ b/pkg/local_object_storage/blobstor/memstore/option.go @@ -7,7 +7,7 @@ import ( type cfg struct { rootPath string readOnly bool - compression *compression.Config + compression *compression.Compressor } func defaultConfig() *cfg { diff --git a/pkg/local_object_storage/blobstor/teststore/option.go b/pkg/local_object_storage/blobstor/teststore/option.go index fb1188751..3a38ecf82 100644 --- a/pkg/local_object_storage/blobstor/teststore/option.go +++ b/pkg/local_object_storage/blobstor/teststore/option.go @@ -17,8 +17,8 @@ type cfg struct { Type func() string Path func() string - SetCompressor func(cc *compression.Config) - Compressor func() *compression.Config + SetCompressor func(cc *compression.Compressor) + Compressor func() *compression.Compressor SetReportErrorFunc func(f func(context.Context, string, error)) Get func(common.GetPrm) (common.GetRes, error) @@ -45,11 +45,11 @@ func WithClose(f func() error) Option { return func(c *cfg) { c func WithType(f func() string) Option { return func(c *cfg) { c.overrides.Type = f } } func WithPath(f func() string) Option { return func(c *cfg) { c.overrides.Path = f } } -func WithSetCompressor(f func(*compression.Config)) Option { +func WithSetCompressor(f func(*compression.Compressor)) Option { return func(c *cfg) { c.overrides.SetCompressor = f } } -func WithCompressor(f func() *compression.Config) Option { +func WithCompressor(f func() *compression.Compressor) Option { return func(c *cfg) { c.overrides.Compressor = f } } diff --git a/pkg/local_object_storage/blobstor/teststore/teststore.go b/pkg/local_object_storage/blobstor/teststore/teststore.go index 626ba0023..190b6a876 100644 --- a/pkg/local_object_storage/blobstor/teststore/teststore.go +++ b/pkg/local_object_storage/blobstor/teststore/teststore.go @@ -116,7 +116,7 @@ func (s *TestStore) Path() string { } } -func (s *TestStore) SetCompressor(cc *compression.Config) { +func (s *TestStore) SetCompressor(cc *compression.Compressor) { s.mu.RLock() defer s.mu.RUnlock() switch { @@ -129,7 +129,7 @@ func (s *TestStore) SetCompressor(cc *compression.Config) { } } -func (s *TestStore) Compressor() *compression.Config { +func (s *TestStore) Compressor() *compression.Compressor { s.mu.RLock() defer s.mu.RUnlock() switch { diff --git a/pkg/local_object_storage/writecache/writecache.go b/pkg/local_object_storage/writecache/writecache.go index 70b17eb8e..7ed511318 100644 --- a/pkg/local_object_storage/writecache/writecache.go +++ b/pkg/local_object_storage/writecache/writecache.go @@ -52,7 +52,7 @@ type Cache interface { // MainStorage is the interface of the underlying storage of Cache implementations. type MainStorage interface { - Compressor() *compression.Config + Compressor() *compression.Compressor Exists(context.Context, common.ExistsPrm) (common.ExistsRes, error) Put(context.Context, common.PutPrm) (common.PutRes, error) } From 0ee7467da5c9cda8c398a011fa00c86657b7cbc3 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 14 Apr 2025 17:36:51 +0300 Subject: [PATCH 554/591] [#1715] config: Add `compression` config section To group all `compression_*` parameters together. Change-Id: I11ad9600f731903753fef1adfbc0328ef75bbf87 Signed-off-by: Dmitrii Stepanov --- .../maintenance/zombie/storage_engine.go | 7 +-- cmd/frostfs-node/config.go | 18 +----- cmd/frostfs-node/config/engine/config_test.go | 17 +++--- .../config/engine/shard/config.go | 56 ++++++------------- config/example/node.env | 2 +- config/example/node.json | 20 ++++--- config/example/node.yaml | 18 +++--- docs/storage-node-configuration.md | 51 +++++++++++------ pkg/local_object_storage/blobstor/blobstor.go | 47 +--------------- .../blobstor/blobstor_test.go | 11 +++- .../blobstor/compression/compress.go | 8 +-- .../blobstor/iterate_test.go | 5 +- 12 files changed, 105 insertions(+), 155 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go index fe75a6f6f..c54b331f3 100644 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go @@ -13,7 +13,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" - "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine" meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" @@ -135,11 +134,7 @@ func getMetabaseOpts(sh *shardconfig.Config) []meta.Option { func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option { result := []blobstor.Option{ - blobstor.WithCompressObjects(sh.Compress()), - blobstor.WithCompressionLevel(compression.Level(sh.CompressionLevel())), - blobstor.WithUncompressableContentTypes(sh.UncompressableContentTypes()), - blobstor.WithCompressibilityEstimate(sh.EstimateCompressibility()), - blobstor.WithCompressibilityEstimateThreshold(sh.EstimateCompressibilityThreshold()), + blobstor.WithCompression(sh.Compression()), blobstor.WithStorages(getSubStorages(ctx, sh)), blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), } diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index f32953c58..f80401b5b 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -129,13 +129,9 @@ type applicationConfiguration struct { } type shardCfg struct { - compress bool - compressionLevel compression.Level - estimateCompressibility bool - estimateCompressibilityThreshold float64 + compression compression.Config smallSizeObjectLimit uint64 - uncompressableContentType []string refillMetabase bool refillMetabaseWorkersCount int mode shardmode.Mode @@ -274,11 +270,7 @@ func (a *applicationConfiguration) updateShardConfig(c *config.Config, source *s target.refillMetabase = source.RefillMetabase() target.refillMetabaseWorkersCount = source.RefillMetabaseWorkersCount() target.mode = source.Mode() - target.compress = source.Compress() - target.compressionLevel = compression.Level(source.CompressionLevel()) - target.estimateCompressibility = source.EstimateCompressibility() - target.estimateCompressibilityThreshold = source.EstimateCompressibilityThreshold() - target.uncompressableContentType = source.UncompressableContentTypes() + target.compression = source.Compression() target.smallSizeObjectLimit = source.SmallSizeLimit() a.setShardWriteCacheConfig(&target, source) @@ -1029,11 +1021,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID ss := c.getSubstorageOpts(ctx, shCfg) blobstoreOpts := []blobstor.Option{ - blobstor.WithCompressObjects(shCfg.compress), - blobstor.WithCompressionLevel(shCfg.compressionLevel), - blobstor.WithUncompressableContentTypes(shCfg.uncompressableContentType), - blobstor.WithCompressibilityEstimate(shCfg.estimateCompressibility), - blobstor.WithCompressibilityEstimateThreshold(shCfg.estimateCompressibilityThreshold), + blobstor.WithCompression(shCfg.compression), blobstor.WithStorages(ss), blobstor.WithLogger(c.log), } diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go index afadb4c99..401c54edc 100644 --- a/cmd/frostfs-node/config/engine/config_test.go +++ b/cmd/frostfs-node/config/engine/config_test.go @@ -15,6 +15,7 @@ import ( writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" "github.com/stretchr/testify/require" ) @@ -100,11 +101,11 @@ func TestEngineSection(t *testing.T) { require.Equal(t, 100, meta.BoltDB().MaxBatchSize()) require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - require.Equal(t, true, sc.Compress()) - require.Equal(t, "fastest", sc.CompressionLevel()) - require.Equal(t, []string{"audio/*", "video/*"}, sc.UncompressableContentTypes()) - require.Equal(t, true, sc.EstimateCompressibility()) - require.Equal(t, float64(0.7), sc.EstimateCompressibilityThreshold()) + require.Equal(t, true, sc.Compression().Enabled) + require.Equal(t, compression.LevelFastest, sc.Compression().Level) + require.Equal(t, []string{"audio/*", "video/*"}, sc.Compression().UncompressableContentTypes) + require.Equal(t, true, sc.Compression().EstimateCompressibility) + require.Equal(t, float64(0.7), sc.Compression().EstimateCompressibilityThreshold) require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.Equal(t, 2, len(ss)) @@ -237,9 +238,9 @@ func TestEngineSection(t *testing.T) { require.Equal(t, 200, meta.BoltDB().MaxBatchSize()) require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay()) - require.Equal(t, false, sc.Compress()) - require.Equal(t, "", sc.CompressionLevel()) - require.Equal(t, []string(nil), sc.UncompressableContentTypes()) + require.Equal(t, false, sc.Compression().Enabled) + require.Equal(t, compression.LevelDefault, sc.Compression().Level) + require.Equal(t, []string(nil), sc.Compression().UncompressableContentTypes) require.EqualValues(t, 102400, sc.SmallSizeLimit()) require.Equal(t, 2, len(ss)) diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go index 14e91f01f..d42646da7 100644 --- a/cmd/frostfs-node/config/engine/shard/config.go +++ b/cmd/frostfs-node/config/engine/shard/config.go @@ -8,6 +8,7 @@ import ( metabaseconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/metabase" piloramaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/pilorama" writecacheconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/writecache" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" ) @@ -27,52 +28,27 @@ func From(c *config.Config) *Config { return (*Config)(c) } -// Compress returns the value of "compress" config parameter. -// -// Returns false if the value is not a valid bool. -func (x *Config) Compress() bool { - return config.BoolSafe( - (*config.Config)(x), - "compress", - ) -} - -// CompressionLevel returns the value of "compression_level" config parameter. -// -// Returns empty string if the value is not a valid string. -func (x *Config) CompressionLevel() string { - return config.StringSafe( - (*config.Config)(x), - "compression_level", - ) -} - -// UncompressableContentTypes returns the value of "compress_skip_content_types" config parameter. -// -// Returns nil if a the value is missing or is invalid. -func (x *Config) UncompressableContentTypes() []string { - return config.StringSliceSafe( - (*config.Config)(x), - "compression_exclude_content_types") -} - -// EstimateCompressibility returns the value of "estimate_compressibility" config parameter. -// -// Returns false if the value is not a valid bool. -func (x *Config) EstimateCompressibility() bool { - return config.BoolSafe( - (*config.Config)(x), - "compression_estimate_compressibility", - ) +func (x *Config) Compression() compression.Config { + cc := (*config.Config)(x).Sub("compression") + if cc == nil { + return compression.Config{} + } + return compression.Config{ + Enabled: config.BoolSafe(cc, "enabled"), + UncompressableContentTypes: config.StringSliceSafe(cc, "exclude_content_types"), + Level: compression.Level(config.StringSafe(cc, "level")), + EstimateCompressibility: config.BoolSafe(cc, "estimate_compressibility"), + EstimateCompressibilityThreshold: estimateCompressibilityThreshold(cc), + } } // EstimateCompressibilityThreshold returns the value of "estimate_compressibility_threshold" config parameter. // // Returns EstimateCompressibilityThresholdDefault if the value is not defined, not valid float or not in range [0.0; 1.0]. -func (x *Config) EstimateCompressibilityThreshold() float64 { +func estimateCompressibilityThreshold(c *config.Config) float64 { v := config.FloatOrDefault( - (*config.Config)(x), - "compression_estimate_compressibility_threshold", + c, + "estimate_compressibility_threshold", EstimateCompressibilityThresholdDefault) if v < 0.0 || v > 1.0 { return EstimateCompressibilityThresholdDefault diff --git a/config/example/node.env b/config/example/node.env index fb4fc69dd..9d054fe78 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -122,7 +122,7 @@ FROSTFS_STORAGE_SHARD_0_METABASE_PERM=0644 FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_SIZE=100 FROSTFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_DELAY=10ms ### Blobstor config -FROSTFS_STORAGE_SHARD_0_COMPRESS=true +FROSTFS_STORAGE_SHARD_0_COMPRESSION_ENABLED=true FROSTFS_STORAGE_SHARD_0_COMPRESSION_LEVEL=fastest FROSTFS_STORAGE_SHARD_0_COMPRESSION_EXCLUDE_CONTENT_TYPES="audio/* video/*" FROSTFS_STORAGE_SHARD_0_COMPRESSION_ESTIMATE_COMPRESSIBILITY=true diff --git a/config/example/node.json b/config/example/node.json index 1e9dd7835..110e99ee8 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -183,13 +183,15 @@ "max_batch_size": 100, "max_batch_delay": "10ms" }, - "compress": true, - "compression_level": "fastest", - "compression_exclude_content_types": [ - "audio/*", "video/*" - ], - "compression_estimate_compressibility": true, - "compression_estimate_compressibility_threshold": 0.7, + "compression": { + "enabled": true, + "level": "fastest", + "exclude_content_types": [ + "audio/*", "video/*" + ], + "estimate_compressibility": true, + "estimate_compressibility_threshold": 0.7 + }, "small_object_size": 102400, "blobstor": [ { @@ -323,7 +325,9 @@ "max_batch_size": 200, "max_batch_delay": "20ms" }, - "compress": false, + "compression": { + "enabled": false + }, "small_object_size": 102400, "blobstor": [ { diff --git a/config/example/node.yaml b/config/example/node.yaml index 26c6e248c..de5eaa133 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -160,7 +160,8 @@ storage: max_batch_delay: 5ms # maximum delay for a batch of operations to be executed max_batch_size: 100 # maximum amount of operations in a single batch - compress: false # turn on/off zstd compression of stored objects + compression: + enabled: false # turn on/off zstd compression of stored objects small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes blobstor: @@ -202,13 +203,14 @@ storage: max_batch_size: 100 max_batch_delay: 10ms - compress: true # turn on/off zstd compression of stored objects - compression_level: fastest - compression_exclude_content_types: - - audio/* - - video/* - compression_estimate_compressibility: true - compression_estimate_compressibility_threshold: 0.7 + compression: + enabled: true # turn on/off zstd compression of stored objects + level: fastest + exclude_content_types: + - audio/* + - video/* + estimate_compressibility: true + estimate_compressibility_threshold: 0.7 blobstor: - type: blobovnicza diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 19f6ee66d..1494d2fca 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -185,22 +185,41 @@ Contains configuration for each shard. Keys must be consecutive numbers starting `default` subsection has the same format and specifies defaults for missing values. The following table describes configuration for each shard. -| Parameter | Type | Default value | Description | -| ------------------------------------------------ | ------------------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `compress` | `bool` | `false` | Flag to enable compression. | -| `compression_level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. | -| `compression_exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | -| `compression_estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | -| `compression_estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | -| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` | -| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. | -| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. | -| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. | -| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. | -| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | -| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | -| `gc` | [GC config](#gc-subsection) | | GC configuration. | -| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. | +| Parameter | Type | Default value | Description | +| ------------------------------ | --------------------------------------------- | ------------- | --------------------------------------------------------------------------------------------------------- | +| `compression` | [Compression config](#compression-subsection) | | Compression config. | +| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` | +| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. | +| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. | +| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. | +| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. | +| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. | +| `small_object_size` | `size` | `1M` | Maximum size of an object stored in blobovnicza tree. | +| `gc` | [GC config](#gc-subsection) | | GC configuration. | +| `limits` | [Shard limits config](#limits-subsection) | | Shard limits configuration. | + +### `compression` subsection + +Contains compression config. + +```yaml +compression: + enabled: true + level: smallest_size + exclude_content_types: + - audio/* + - video/* + estimate_compressibility: true + estimate_compressibility_threshold: 0.7 +``` + +| Parameter | Type | Default value | Description | +| ------------------------------------ | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `enabled` | `bool` | `false` | Flag to enable compression. | +| `level` | `string` | `optimal` | Compression level. Available values are `optimal`, `fastest`, `smallest_size`. | +| `exclude_content_types` | `[]string` | | List of content-types to disable compression for. Content-type is taken from `Content-Type` object attribute. Each element can contain a star `*` as a first (last) character, which matches any prefix (suffix). | +| `estimate_compressibility` | `bool` | `false` | If `true`, then noramalized compressibility estimation is used to decide compress data or not. | +| `estimate_compressibility_threshold` | `float` | `0.1` | Normilized compressibility estimate threshold: data will compress if estimation if greater than this value. | ### `blobstor` subsection diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go index cf67c6fe9..edb2c1946 100644 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ b/pkg/local_object_storage/blobstor/blobstor.go @@ -95,52 +95,9 @@ func WithLogger(l *logger.Logger) Option { } } -// WithCompressObjects returns option to toggle -// compression of the stored objects. -// -// If true, Zstandard algorithm is used for data compression. -// -// If compressor (decompressor) creation failed, -// the uncompressed option will be used, and the error -// is recorded in the provided log. -func WithCompressObjects(comp bool) Option { +func WithCompression(comp compression.Config) Option { return func(c *cfg) { - c.compression.Enabled = comp - } -} - -func WithCompressionLevel(level compression.Level) Option { - return func(c *cfg) { - c.compression.Level = level - } -} - -// WithCompressibilityEstimate returns an option to use -// normilized compressibility estimate to decide compress -// data or not. -// -// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5 -func WithCompressibilityEstimate(v bool) Option { - return func(c *cfg) { - c.compression.UseCompressEstimation = v - } -} - -// WithCompressibilityEstimateThreshold returns an option to set -// normilized compressibility estimate threshold. -// -// See https://github.com/klauspost/compress/blob/v1.17.2/compressible.go#L5 -func WithCompressibilityEstimateThreshold(threshold float64) Option { - return func(c *cfg) { - c.compression.CompressEstimationThreshold = threshold - } -} - -// WithUncompressableContentTypes returns option to disable decompression -// for specific content types as seen by object.AttributeContentType attribute. -func WithUncompressableContentTypes(values []string) Option { - return func(c *cfg) { - c.compression.UncompressableContentTypes = values + c.compression.Config = comp } } diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go index 6cc56fa3b..6ddeb6f00 100644 --- a/pkg/local_object_storage/blobstor/blobstor_test.go +++ b/pkg/local_object_storage/blobstor/blobstor_test.go @@ -9,6 +9,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -51,7 +52,9 @@ func TestCompression(t *testing.T) { newBlobStor := func(t *testing.T, compress bool) *BlobStor { bs := New( - WithCompressObjects(compress), + WithCompression(compression.Config{ + Enabled: compress, + }), WithStorages(defaultStorages(dir, smallSizeLimit))) require.NoError(t, bs.Open(context.Background(), mode.ReadWrite)) require.NoError(t, bs.Init(context.Background())) @@ -113,8 +116,10 @@ func TestBlobstor_needsCompression(t *testing.T) { dir := t.TempDir() bs := New( - WithCompressObjects(compress), - WithUncompressableContentTypes(ct), + WithCompression(compression.Config{ + Enabled: compress, + UncompressableContentTypes: ct, + }), WithStorages([]SubStorage{ { Storage: blobovniczatree.NewBlobovniczaTree( diff --git a/pkg/local_object_storage/blobstor/compression/compress.go b/pkg/local_object_storage/blobstor/compression/compress.go index efe84ea2a..c76cec9a1 100644 --- a/pkg/local_object_storage/blobstor/compression/compress.go +++ b/pkg/local_object_storage/blobstor/compression/compress.go @@ -32,8 +32,8 @@ type Config struct { UncompressableContentTypes []string Level Level - UseCompressEstimation bool - CompressEstimationThreshold float64 + EstimateCompressibility bool + EstimateCompressibilityThreshold float64 } // zstdFrameMagic contains first 4 bytes of any compressed object @@ -101,9 +101,9 @@ func (c *Compressor) Compress(data []byte) []byte { if c == nil || !c.Enabled { return data } - if c.UseCompressEstimation { + if c.EstimateCompressibility { estimated := compress.Estimate(data) - if estimated >= c.CompressEstimationThreshold { + if estimated >= c.EstimateCompressibilityThreshold { return c.compress(data) } return data diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go index ccfa510fe..2786321a8 100644 --- a/pkg/local_object_storage/blobstor/iterate_test.go +++ b/pkg/local_object_storage/blobstor/iterate_test.go @@ -8,6 +8,7 @@ import ( "testing" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode" @@ -24,7 +25,9 @@ func TestIterateObjects(t *testing.T) { // create BlobStor instance blobStor := New( WithStorages(defaultStorages(p, smalSz)), - WithCompressObjects(true), + WithCompression(compression.Config{ + Enabled: true, + }), ) defer os.RemoveAll(p) From 410b6f70bae0e44da29d328ec3ba532042617c8b Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Tue, 15 Apr 2025 17:56:47 +0300 Subject: [PATCH 555/591] [#1716] cli: Return trace ID on operation failure Close #1716 Change-Id: I293d0cc6b7331517e8cde42eae07d65384976da5 Signed-off-by: Aleksey Savchuk --- cmd/internal/common/exit.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cmd/internal/common/exit.go b/cmd/internal/common/exit.go index b8acf0143..13f447af4 100644 --- a/cmd/internal/common/exit.go +++ b/cmd/internal/common/exit.go @@ -51,8 +51,13 @@ func ExitOnErr(cmd *cobra.Command, errFmt string, err error) { } cmd.PrintErrln(err) - if cmd.PersistentPostRun != nil { - cmd.PersistentPostRun(cmd, nil) + for p := cmd; p != nil; p = p.Parent() { + if p.PersistentPostRun != nil { + p.PersistentPostRun(cmd, nil) + if !cobra.EnableTraverseRunHooks { + break + } + } } os.Exit(code) } From a285d8924f87da6af50ad3fc28b5ac5726fc92e8 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Mon, 14 Apr 2025 12:38:52 +0300 Subject: [PATCH 556/591] [#1693] node: Replace conditional panics with asserts Change-Id: I5024705fd1693d00cb9241235030a73984c2a7e1 Signed-off-by: Ekaterina Lebedeva --- cmd/frostfs-node/netmap.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 6d57edcce..82b799e4c 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -8,6 +8,7 @@ import ( "net" "sync/atomic" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap" @@ -104,9 +105,7 @@ func (s *networkState) getNodeInfo() (res netmapSDK.NodeInfo, ok bool) { v := s.nodeInfo.Load() if v != nil { res, ok = v.(netmapSDK.NodeInfo) - if !ok { - panic(fmt.Sprintf("unexpected value in atomic node info state: %T", v)) - } + assert.True(ok, fmt.Sprintf("unexpected value in atomic node info state: %T", v)) } return From fc6abe30b892491fce0053492220e2ec91e11236 Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Tue, 8 Apr 2025 16:47:52 +0300 Subject: [PATCH 557/591] [#1693] storage: Replace conditional panics with asserts Change-Id: I9d8ccde3c71fca716856c7bfc53da20ee0542f20 Signed-off-by: Ekaterina Lebedeva --- .../blobstor/fstree/counter.go | 17 +++++++---------- pkg/local_object_storage/metabase/lock.go | 5 ++--- pkg/local_object_storage/metabase/util.go | 5 ++--- pkg/local_object_storage/writecache/limiter.go | 16 ++++++---------- 4 files changed, 17 insertions(+), 26 deletions(-) diff --git a/pkg/local_object_storage/blobstor/fstree/counter.go b/pkg/local_object_storage/blobstor/fstree/counter.go index b5dbc9e40..3caee7ee1 100644 --- a/pkg/local_object_storage/blobstor/fstree/counter.go +++ b/pkg/local_object_storage/blobstor/fstree/counter.go @@ -2,6 +2,8 @@ package fstree import ( "sync" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) // FileCounter used to count files in FSTree. The implementation must be thread-safe. @@ -52,16 +54,11 @@ func (c *SimpleCounter) Dec(size uint64) { c.mtx.Lock() defer c.mtx.Unlock() - if c.count > 0 { - c.count-- - } else { - panic("fstree.SimpleCounter: invalid count") - } - if c.size >= size { - c.size -= size - } else { - panic("fstree.SimpleCounter: invalid size") - } + assert.True(c.count > 0, "fstree.SimpleCounter: invalid count") + c.count-- + + assert.True(c.size >= size, "fstree.SimpleCounter: invalid size") + c.size -= size } func (c *SimpleCounter) CountSize() (uint64, uint64) { diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go index f73c2b4f6..f4cb9e53b 100644 --- a/pkg/local_object_storage/metabase/lock.go +++ b/pkg/local_object_storage/metabase/lock.go @@ -7,6 +7,7 @@ import ( "slices" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -63,9 +64,7 @@ func (db *DB) Lock(ctx context.Context, cnr cid.ID, locker oid.ID, locked []oid. return ErrReadOnlyMode } - if len(locked) == 0 { - panic("empty locked list") - } + assert.False(len(locked) == 0, "empty locked list") err := db.lockInternal(locked, cnr, locker) success = err == nil diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go index 80851f1c4..4ad83332b 100644 --- a/pkg/local_object_storage/metabase/util.go +++ b/pkg/local_object_storage/metabase/util.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -278,9 +279,7 @@ func objectKey(obj oid.ID, key []byte) []byte { // // firstIrregularObjectType(tx, cnr, obj) usage allows getting object type. func firstIrregularObjectType(tx *bbolt.Tx, idCnr cid.ID, objs ...[]byte) objectSDK.Type { - if len(objs) == 0 { - panic("empty object list in firstIrregularObjectType") - } + assert.False(len(objs) == 0, "empty object list in firstIrregularObjectType") var keys [2][1 + cidSize]byte diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go index ddc4101be..0e020b36e 100644 --- a/pkg/local_object_storage/writecache/limiter.go +++ b/pkg/local_object_storage/writecache/limiter.go @@ -3,6 +3,8 @@ package writecache import ( "errors" "sync" + + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" ) var errLimiterClosed = errors.New("acquire failed: limiter closed") @@ -45,17 +47,11 @@ func (l *flushLimiter) release(size uint64) { l.cond.L.Lock() defer l.cond.L.Unlock() - if l.size >= size { - l.size -= size - } else { - panic("flushLimiter: invalid size") - } + assert.True(l.size >= size, "flushLimiter: invalid size") + l.size -= size - if l.count > 0 { - l.count-- - } else { - panic("flushLimiter: invalid count") - } + assert.True(l.count > 0, "flushLimiter: invalid count") + l.count-- l.cond.Broadcast() } From bc045b29e21b458c9d3f244f05a16bce9869059e Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Tue, 8 Apr 2025 16:54:05 +0300 Subject: [PATCH 558/591] [#1693] services: Replace conditional panics with asserts Change-Id: Ic79609e6ad867caa88ad245b3014aa7fc32e05a8 Signed-off-by: Ekaterina Lebedeva --- pkg/services/netmap/executor.go | 11 +++++++---- pkg/services/object_manager/tombstone/constructor.go | 5 ++--- .../object_manager/tombstone/source/source.go | 5 ++--- pkg/services/policer/policer.go | 6 +++--- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go index 44101a153..1b92fdaad 100644 --- a/pkg/services/netmap/executor.go +++ b/pkg/services/netmap/executor.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/version" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/util/response" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/netmap" @@ -46,10 +47,12 @@ type NetworkInfo interface { } func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo, respSvc *response.Service) Server { - if s == nil || netInfo == nil || !version.IsValid(v) || respSvc == nil { - // this should never happen, otherwise it programmers bug - panic("can't create netmap execution service") - } + // this should never happen, otherwise it's a programmer's bug + msg := "BUG: can't create netmap execution service" + assert.False(s == nil, msg, "node state is nil") + assert.False(netInfo == nil, msg, "network info is nil") + assert.False(respSvc == nil, msg, "response service is nil") + assert.True(version.IsValid(v), msg, "invalid version") res := &executorSvc{ state: s, diff --git a/pkg/services/object_manager/tombstone/constructor.go b/pkg/services/object_manager/tombstone/constructor.go index 67ddf316f..2147a32fe 100644 --- a/pkg/services/object_manager/tombstone/constructor.go +++ b/pkg/services/object_manager/tombstone/constructor.go @@ -3,6 +3,7 @@ package tombstone import ( "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" lru "github.com/hashicorp/golang-lru/v2" "go.uber.org/zap" @@ -49,9 +50,7 @@ func NewChecker(oo ...Option) *ExpirationChecker { panicOnNil(cfg.tsSource, "Tombstone source") cache, err := lru.New[string, uint64](cfg.cacheSize) - if err != nil { - panic(fmt.Errorf("could not create LRU cache with %d size: %w", cfg.cacheSize, err)) - } + assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", cfg.cacheSize)) return &ExpirationChecker{ cache: cache, diff --git a/pkg/services/object_manager/tombstone/source/source.go b/pkg/services/object_manager/tombstone/source/source.go index 1ff07b05a..975941847 100644 --- a/pkg/services/object_manager/tombstone/source/source.go +++ b/pkg/services/object_manager/tombstone/source/source.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" @@ -38,9 +39,7 @@ func (s *TombstoneSourcePrm) SetGetService(v *getsvc.Service) { // Panics if any of the provided options does not allow // constructing a valid tombstone local Source. func NewSource(p TombstoneSourcePrm) Source { - if p.s == nil { - panic("Tombstone source: nil object service") - } + assert.False(p.s == nil, "Tombstone source: nil object service") return Source(p) } diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go index 4e8bacfec..d18b71a23 100644 --- a/pkg/services/policer/policer.go +++ b/pkg/services/policer/policer.go @@ -1,9 +1,11 @@ package policer import ( + "fmt" "sync" "time" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" lru "github.com/hashicorp/golang-lru/v2" "go.uber.org/zap" @@ -57,9 +59,7 @@ func New(opts ...Option) *Policer { c.log = c.log.With(zap.String("component", "Object Policer")) cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize)) - if err != nil { - panic(err) - } + assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize)) return &Policer{ cfg: c, From 5dd8d7e87ab20ff3d7e1c4c2d93982a1ae79381a Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Tue, 8 Apr 2025 16:54:51 +0300 Subject: [PATCH 559/591] [#1693] network: Replace conditional panics with asserts Change-Id: Icba39aa2ed0048d63c6efed398273627e1e4fbbe Signed-off-by: Ekaterina Lebedeva --- pkg/network/address.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/network/address.go b/pkg/network/address.go index cb83a813d..4643eef15 100644 --- a/pkg/network/address.go +++ b/pkg/network/address.go @@ -2,11 +2,11 @@ package network import ( "errors" - "fmt" "net" "net/url" "strings" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/rpc/client" "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" @@ -44,11 +44,9 @@ func (a Address) equal(addr Address) bool { // See also FromString. func (a Address) URIAddr() string { _, host, err := manet.DialArgs(a.ma) - if err != nil { - // the only correct way to construct Address is AddressFromString - // which makes this error appear unexpected - panic(fmt.Errorf("could not get host addr: %w", err)) - } + // the only correct way to construct Address is AddressFromString + // which makes this error appear unexpected + assert.NoError(err, "could not get host addr") if !a.IsTLSEnabled() { return host From e45382b0c18ef6f5b116df4cd89f7813d271d54d Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Tue, 8 Apr 2025 16:55:56 +0300 Subject: [PATCH 560/591] [#1693] util: Replace conditional panics with asserts Change-Id: I13b566cde3e6d43d8a75aa2e9b28e63b597adff9 Signed-off-by: Ekaterina Lebedeva --- pkg/util/keyer/dashboard.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/util/keyer/dashboard.go b/pkg/util/keyer/dashboard.go index b2942b52a..6337039a9 100644 --- a/pkg/util/keyer/dashboard.go +++ b/pkg/util/keyer/dashboard.go @@ -6,6 +6,7 @@ import ( "os" "text/tabwriter" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" "github.com/mr-tron/base58" "github.com/nspcc-dev/neo-go/pkg/crypto/hash" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" @@ -104,9 +105,7 @@ func (d Dashboard) PrettyPrint(uncompressed, useHex bool) { func base58ToHex(data string) string { val, err := base58.Decode(data) - if err != nil { - panic("produced incorrect base58 value") - } + assert.NoError(err, "produced incorrect base58 value") return hex.EncodeToString(val) } From 36fb15b9a4b1b7771b2011c745222326ee6e9299 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Wed, 16 Apr 2025 11:55:00 +0300 Subject: [PATCH 561/591] [#1689] engine: Return error if object is locked during inhume Return `object is locked` error if object doesn't exists but is locked, since the locked index may be populated even when the object itself doesn't exist. Change-Id: If1a145c6efead9873acd33bb4fd22cf6175cbabd Signed-off-by: Aleksey Savchuk --- pkg/local_object_storage/engine/inhume.go | 11 ++-- .../engine/inhume_test.go | 55 +++++++++++++++++++ 2 files changed, 62 insertions(+), 4 deletions(-) diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index 6ec9a4ef0..e5a2396e1 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -186,10 +186,6 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL default: } - if !objectExists { - return - } - if checkLocked { if isLocked, err := sh.IsLocked(ctx, addr); err != nil { e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck, @@ -202,6 +198,13 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL } } + // This exit point must come after checking if the object is locked, + // since the locked index may be populated even if the object doesn't + // exist. + if !objectExists { + return + } + ids = append(ids, sh.ID().String()) // Continue if it's a root object. diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 10cebfb52..0601a43f2 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -242,3 +242,58 @@ func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { b.StopTimer() } } + +func TestInhumeIfObjectDoesntExist(t *testing.T) { + t.Run("object is locked", func(t *testing.T) { + t.Run("inhume without tombstone", func(t *testing.T) { + testInhumeLockedIfObjectDoesntExist(t, false, false) + }) + t.Run("inhume with tombstone", func(t *testing.T) { + testInhumeLockedIfObjectDoesntExist(t, true, false) + }) + t.Run("force inhume without tombstone", func(t *testing.T) { + testInhumeLockedIfObjectDoesntExist(t, false, true) + }) + t.Run("force inhume with tombstone", func(t *testing.T) { + testInhumeLockedIfObjectDoesntExist(t, true, true) + }) + }) +} + +func testInhumeLockedIfObjectDoesntExist(t *testing.T, withTombstone, withForce bool) { + t.Parallel() + + var ( + errLocked *apistatus.ObjectLocked + inhumePrm InhumePrm + ctx = context.Background() + container = cidtest.ID() + object = oidtest.Address() + lock = oidtest.ID() + tombstone = oidtest.Address() + ) + object.SetContainer(container) + tombstone.SetContainer(container) + + engine := testNewEngine(t).setShardsNum(t, 4).prepare(t).engine + defer func() { require.NoError(t, engine.Close(ctx)) }() + + err := engine.Lock(ctx, container, lock, []oid.ID{object.Object()}) + require.NoError(t, err) + + if withTombstone { + inhumePrm.WithTarget(tombstone, object) + } else { + inhumePrm.MarkAsGarbage(object) + } + if withForce { + inhumePrm.WithForceRemoval() + } + + err = engine.Inhume(ctx, inhumePrm) + if withForce { + require.NoError(t, err) + } else { + require.ErrorAs(t, err, &errLocked) + } +} From 100eb8b654b55286caa5bf4aa18c358eab12fd26 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Wed, 29 Jan 2025 15:39:28 +0300 Subject: [PATCH 562/591] [#1619] logger: Set tags for `node` components Change-Id: I55ffcce9d2a74fdd47621674739b07f2e20199e3 Signed-off-by: Anton Nikiforov --- .../maintenance/zombie/storage_engine.go | 3 ++- cmd/frostfs-node/config.go | 13 ++++----- cmd/frostfs-node/config/logger/config_test.go | 3 +++ cmd/frostfs-node/morph.go | 14 +++++----- cmd/frostfs-node/object.go | 14 +++++----- cmd/frostfs-node/session.go | 3 ++- cmd/frostfs-node/tree.go | 3 ++- config/example/node.env | 2 ++ config/example/node.json | 8 +++++- config/example/node.yaml | 3 +++ docs/storage-node-configuration.md | 16 ++++++++--- .../blobovnicza/blobovnicza.go | 2 +- .../blobovniczatree/concurrency_test.go | 3 ++- .../blobstor/blobovniczatree/exists_test.go | 3 ++- .../blobstor/blobovniczatree/generic_test.go | 6 +++-- .../blobstor/blobovniczatree/option.go | 11 +++++--- .../blobovniczatree/rebuild_failover_test.go | 3 ++- .../blobstor/blobovniczatree/rebuild_test.go | 27 ++++++++++++------- pkg/local_object_storage/blobstor/blobstor.go | 2 +- .../blobstor/fstree/option.go | 3 +-- .../engine/engine_test.go | 3 ++- .../shard/gc_internal_test.go | 3 ++- pkg/local_object_storage/shard/lock_test.go | 3 ++- pkg/local_object_storage/shard/range_test.go | 3 ++- pkg/local_object_storage/shard/shard.go | 2 +- pkg/local_object_storage/shard/shard_test.go | 3 ++- .../writecache/options.go | 3 +-- pkg/services/object/delete/service.go | 2 +- pkg/services/object/get/service.go | 2 +- pkg/services/object/get/v2/service.go | 2 +- pkg/services/object/search/service.go | 2 +- pkg/services/policer/policer.go | 3 --- pkg/services/replicator/replicator.go | 3 --- pkg/services/session/executor.go | 5 +--- pkg/util/logger/logger.go | 1 + pkg/util/logger/tag_string.go | 19 +++++++++++-- pkg/util/logger/tags.go | 27 ++++++++++++++----- 37 files changed, 151 insertions(+), 77 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go index c54b331f3..5be34d502 100644 --- a/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go +++ b/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go @@ -159,7 +159,8 @@ func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubS blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()), blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()), blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()), - blobovniczatree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + blobovniczatree.WithBlobovniczaLogger(logger.NewLoggerWrapper(zap.NewNop())), + blobovniczatree.WithBlobovniczaTreeLogger(logger.NewLoggerWrapper(zap.NewNop())), blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()), } diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index f80401b5b..fae1ca1ca 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -891,7 +891,7 @@ func (c *cfg) engineOpts() []engine.Option { opts = append(opts, engine.WithErrorThreshold(c.EngineCfg.errorThreshold), - engine.WithLogger(c.log), + engine.WithLogger(c.log.WithTag(logger.TagEngine)), engine.WithLowMemoryConsumption(c.EngineCfg.lowMem), ) @@ -928,7 +928,7 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option { writecache.WithMaxCacheSize(wcRead.sizeLimit), writecache.WithMaxCacheCount(wcRead.countLimit), writecache.WithNoSync(wcRead.noSync), - writecache.WithLogger(c.log), + writecache.WithLogger(c.log.WithTag(logger.TagWriteCache)), writecache.WithQoSLimiter(shCfg.limiter), ) } @@ -968,7 +968,8 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor. blobovniczatree.WithOpenedCacheExpInterval(sRead.openedCacheExpInterval), blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount), blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout), - blobovniczatree.WithLogger(c.log), + blobovniczatree.WithBlobovniczaLogger(c.log.WithTag(logger.TagBlobovnicza)), + blobovniczatree.WithBlobovniczaTreeLogger(c.log.WithTag(logger.TagBlobovniczaTree)), blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit), } @@ -991,7 +992,7 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor. fstree.WithPerm(sRead.perm), fstree.WithDepth(sRead.depth), fstree.WithNoSync(sRead.noSync), - fstree.WithLogger(c.log), + fstree.WithLogger(c.log.WithTag(logger.TagFSTree)), } if c.metricsCollector != nil { fstreeOpts = append(fstreeOpts, @@ -1023,7 +1024,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID blobstoreOpts := []blobstor.Option{ blobstor.WithCompression(shCfg.compression), blobstor.WithStorages(ss), - blobstor.WithLogger(c.log), + blobstor.WithLogger(c.log.WithTag(logger.TagBlobstor)), } if c.metricsCollector != nil { blobstoreOpts = append(blobstoreOpts, blobstor.WithMetrics(lsmetrics.NewBlobstoreMetrics(c.metricsCollector.Blobstore()))) @@ -1048,7 +1049,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID var sh shardOptsWithID sh.configID = shCfg.id() sh.shOpts = []shard.Option{ - shard.WithLogger(c.log), + shard.WithLogger(c.log.WithTag(logger.TagShard)), shard.WithRefillMetabase(shCfg.refillMetabase), shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount), shard.WithMode(shCfg.mode), diff --git a/cmd/frostfs-node/config/logger/config_test.go b/cmd/frostfs-node/config/logger/config_test.go index ffe8ac693..796ad529e 100644 --- a/cmd/frostfs-node/config/logger/config_test.go +++ b/cmd/frostfs-node/config/logger/config_test.go @@ -22,6 +22,9 @@ func TestLoggerSection_Level(t *testing.T) { require.Equal(t, "debug", loggerconfig.Level(c)) require.Equal(t, "journald", loggerconfig.Destination(c)) require.Equal(t, true, loggerconfig.Timestamp(c)) + tags := loggerconfig.Tags(c) + require.Equal(t, "main, morph", tags[0][0]) + require.Equal(t, "debug", tags[0][1]) } configtest.ForEachFileType(path, fileConfigTest) diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index d3c0f7b81..917cf6fc0 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" netmapEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/netmap" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/subscriber" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/rand" "github.com/nspcc-dev/neo-go/pkg/core/block" "github.com/nspcc-dev/neo-go/pkg/core/state" @@ -84,7 +85,7 @@ func initMorphClient(ctx context.Context, c *cfg) { cli, err := client.New(ctx, c.key, client.WithDialTimeout(morphconfig.DialTimeout(c.appCfg)), - client.WithLogger(c.log), + client.WithLogger(c.log.WithTag(logger.TagMorph)), client.WithMetrics(c.metricsCollector.MorphClientMetrics()), client.WithEndpoints(addresses...), client.WithConnLostCallback(func() { @@ -165,6 +166,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { err error subs subscriber.Subscriber ) + log := c.log.WithTag(logger.TagMorph) fromSideChainBlock, err := c.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { @@ -173,14 +175,14 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { } subs, err = subscriber.New(ctx, &subscriber.Params{ - Log: c.log, + Log: log, StartFromBlock: fromSideChainBlock, Client: c.cfgMorph.client, }) fatalOnErr(err) lis, err := event.NewListener(event.ListenerParams{ - Logger: c.log, + Logger: log, Subscriber: subs, }) fatalOnErr(err) @@ -198,7 +200,7 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { setNetmapNotificationParser(c, newEpochNotification, func(src *state.ContainedNotificationEvent) (event.Event, error) { res, err := netmapEvent.ParseNewEpoch(src) if err == nil { - c.log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain, + log.Info(ctx, logs.FrostFSNodeNewEpochEventFromSidechain, zap.Uint64("number", res.(netmapEvent.NewEpoch).EpochNumber()), ) } @@ -209,11 +211,11 @@ func listenMorphNotifications(ctx context.Context, c *cfg) { registerNotificationHandlers(c.cfgContainer.scriptHash, lis, c.cfgContainer.parsers, c.cfgContainer.subscribers) registerBlockHandler(lis, func(ctx context.Context, block *block.Block) { - c.log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) + log.Debug(ctx, logs.FrostFSNodeNewBlock, zap.Uint32("index", block.Index)) err = c.persistate.SetUInt32(persistateSideChainLastBlockKey, block.Index) if err != nil { - c.log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState, + log.Warn(ctx, logs.FrostFSNodeCantUpdatePersistentState, zap.String("chain", "side"), zap.Uint32("block_index", block.Index)) } diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go index 527746d26..c33c02b3f 100644 --- a/cmd/frostfs-node/object.go +++ b/cmd/frostfs-node/object.go @@ -31,6 +31,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/policer" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" objectGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object/grpc" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" @@ -217,9 +218,8 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl } remoteReader := objectService.NewRemoteReader(keyStorage, clientConstructor) - pol := policer.New( - policer.WithLogger(c.log), + policer.WithLogger(c.log.WithTag(logger.TagPolicer)), policer.WithKeySpaceIterator(&keySpaceIterator{ng: ls}), policer.WithBuryFunc(buryFn), policer.WithContainerSource(c.cfgObject.cnrSource), @@ -291,7 +291,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa ls := c.cfgObject.cfgLocalStorage.localStorage return replicator.New( - replicator.WithLogger(c.log), + replicator.WithLogger(c.log.WithTag(logger.TagReplicator)), replicator.WithPutTimeout( replicatorconfig.PutTimeout(c.appCfg), ), @@ -348,7 +348,7 @@ func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Trav c.netMapSource, keyStorage, containerSource, - searchsvc.WithLogger(c.log), + searchsvc.WithLogger(c.log.WithTag(logger.TagSearchSvc)), ) } @@ -374,7 +374,7 @@ func createGetService(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Tra ), coreConstructor, containerSource, - getsvc.WithLogger(c.log)) + getsvc.WithLogger(c.log.WithTag(logger.TagGetSvc))) } func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorage) *getsvcV2.Service { @@ -385,7 +385,7 @@ func createGetServiceV2(c *cfg, sGet *getsvc.Service, keyStorage *util.KeyStorag c.netMapSource, c, c.cfgObject.cnrSource, - getsvcV2.WithLogger(c.log), + getsvcV2.WithLogger(c.log.WithTag(logger.TagGetSvc)), ) } @@ -402,7 +402,7 @@ func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Servi cfg: c, }, keyStorage, - deletesvc.WithLogger(c.log), + deletesvc.WithLogger(c.log.WithTag(logger.TagDeleteSvc)), ) } diff --git a/cmd/frostfs-node/session.go b/cmd/frostfs-node/session.go index 2f3c9cbfe..fbfe3f5e6 100644 --- a/cmd/frostfs-node/session.go +++ b/cmd/frostfs-node/session.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/persistent" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/session/storage/temporary" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session" sessionGRPC "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/session/grpc" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" @@ -55,7 +56,7 @@ func initSessionService(c *cfg) { server := sessionTransportGRPC.New( sessionSvc.NewSignService( &c.key.PrivateKey, - sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log), + sessionSvc.NewExecutionService(c.privateTokenStore, c.respSvc, c.log.WithTag(logger.TagSessionSvc)), ), ) diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go index 67d9c9df0..62af45389 100644 --- a/cmd/frostfs-node/tree.go +++ b/cmd/frostfs-node/tree.go @@ -14,6 +14,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event" containerEvent "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree" + "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "go.uber.org/zap" "google.golang.org/grpc" @@ -56,7 +57,7 @@ func initTreeService(c *cfg) { tree.WithFrostfsidSubjectProvider(c.frostfsidClient), tree.WithNetmapSource(c.netMapSource), tree.WithPrivateKey(&c.key.PrivateKey), - tree.WithLogger(c.log), + tree.WithLogger(c.log.WithTag(logger.TagTreeSvc)), tree.WithStorage(c.cfgObject.cfgLocalStorage.localStorage), tree.WithContainerCacheSize(treeConfig.CacheSize()), tree.WithReplicationTimeout(treeConfig.ReplicationTimeout()), diff --git a/config/example/node.env b/config/example/node.env index 9d054fe78..e7d7a6cc8 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -1,6 +1,8 @@ FROSTFS_LOGGER_LEVEL=debug FROSTFS_LOGGER_DESTINATION=journald FROSTFS_LOGGER_TIMESTAMP=true +FROSTFS_LOGGER_TAGS_0_NAMES="main, morph" +FROSTFS_LOGGER_TAGS_0_LEVEL="debug" FROSTFS_PPROF_ENABLED=true FROSTFS_PPROF_ADDRESS=localhost:6060 diff --git a/config/example/node.json b/config/example/node.json index 110e99ee8..3f7854d98 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -2,7 +2,13 @@ "logger": { "level": "debug", "destination": "journald", - "timestamp": true + "timestamp": true, + "tags": [ + { + "names": "main, morph", + "level": "debug" + } + ] }, "pprof": { "enabled": true, diff --git a/config/example/node.yaml b/config/example/node.yaml index de5eaa133..32f0cba67 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -2,6 +2,9 @@ logger: level: debug # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" destination: journald # logger destination: one of "stdout" (default), "journald" timestamp: true + tags: + - names: "main, morph" + level: debug systemdnotify: enabled: true diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 1494d2fca..3e770457b 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -112,11 +112,21 @@ Contains logger parameters. ```yaml logger: level: info + tags: + - names: "main, morph" + level: debug ``` -| Parameter | Type | Default value | Description | -|-----------|----------|---------------|---------------------------------------------------------------------------------------------------| -| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | +| Parameter | Type | Default value | Description | +|-----------|-----------------------------------------------|---------------|---------------------------------------------------------------------------------------------------| +| `level` | `string` | `info` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | +| `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. | + +## `tags` subsection +| Parameter | Type | Default value | Description | +|-----------|----------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `morph`, `grpcsvc`, `ir`, `processor`, `engine`, `blobovnicza`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. | +| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. | # `contracts` section Contains override values for FrostFS side-chain contract hashes. Most of the time contract diff --git a/pkg/local_object_storage/blobovnicza/blobovnicza.go b/pkg/local_object_storage/blobovnicza/blobovnicza.go index 08ef8b86c..a6c40f9fa 100644 --- a/pkg/local_object_storage/blobovnicza/blobovnicza.go +++ b/pkg/local_object_storage/blobovnicza/blobovnicza.go @@ -110,7 +110,7 @@ func WithFullSizeLimit(lim uint64) Option { // WithLogger returns an option to specify Blobovnicza's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "Blobovnicza")) + c.log = l } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go index ec9743b57..f87f4a144 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go @@ -19,7 +19,8 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) { st := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(1024), WithBlobovniczaShallowWidth(10), WithBlobovniczaShallowDepth(1), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go index 5414140f0..df2b4ffe5 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/exists_test.go @@ -19,7 +19,8 @@ func TestExistsInvalidStorageID(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(1024), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go index d390ecf1d..9244d765c 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/generic_test.go @@ -15,7 +15,8 @@ func TestGeneric(t *testing.T) { helper := func(t *testing.T, dir string) common.Storage { return NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(maxObjectSize), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), @@ -43,7 +44,8 @@ func TestControl(t *testing.T) { newTree := func(t *testing.T) common.Storage { return NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(maxObjectSize), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/option.go b/pkg/local_object_storage/blobstor/blobovniczatree/option.go index 2f6d31b4e..5f268b0f2 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/option.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/option.go @@ -63,10 +63,15 @@ func initConfig(c *cfg) { } } -func WithLogger(l *logger.Logger) Option { +func WithBlobovniczaTreeLogger(log *logger.Logger) Option { return func(c *cfg) { - c.log = l - c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(l)) + c.log = log + } +} + +func WithBlobovniczaLogger(log *logger.Logger) Option { + return func(c *cfg) { + c.blzOpts = append(c.blzOpts, blobovnicza.WithLogger(log)) } } diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go index 8832603c4..4146ef260 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go @@ -140,7 +140,8 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) { func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object, mustUpdateStorageID bool) { b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(2), WithBlobovniczaShallowDepth(2), diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index 9c971bfb6..a7a99fec3 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -50,7 +50,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -106,7 +107,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -160,7 +162,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -231,7 +234,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), // single directory WithBlobovniczaShallowDepth(1), @@ -262,7 +266,8 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) { require.NoError(t, b.Close(context.Background())) b = NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), WithBlobovniczaShallowWidth(1), WithBlobovniczaShallowDepth(1), @@ -304,7 +309,8 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(64*1024), // 64KB object size limit WithBlobovniczaShallowWidth(5), WithBlobovniczaShallowDepth(2), // depth = 2 @@ -332,7 +338,8 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) { b = NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(32*1024), // 32KB object size limit WithBlobovniczaShallowWidth(5), WithBlobovniczaShallowDepth(3), // depth = 3 @@ -374,7 +381,8 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta dir := t.TempDir() b := NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(sourceWidth), WithBlobovniczaShallowDepth(sourceDepth), @@ -415,7 +423,8 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta b = NewBlobovniczaTree( context.Background(), - WithLogger(test.NewLogger(t)), + WithBlobovniczaLogger(test.NewLogger(t)), + WithBlobovniczaTreeLogger(test.NewLogger(t)), WithObjectSizeLimit(2048), WithBlobovniczaShallowWidth(targetWidth), WithBlobovniczaShallowDepth(targetDepth), diff --git a/pkg/local_object_storage/blobstor/blobstor.go b/pkg/local_object_storage/blobstor/blobstor.go index edb2c1946..ceaf2538a 100644 --- a/pkg/local_object_storage/blobstor/blobstor.go +++ b/pkg/local_object_storage/blobstor/blobstor.go @@ -91,7 +91,7 @@ func WithStorages(st []SubStorage) Option { // WithLogger returns option to specify BlobStor's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "BlobStor")) + c.log = l } } diff --git a/pkg/local_object_storage/blobstor/fstree/option.go b/pkg/local_object_storage/blobstor/fstree/option.go index 7155ddcbb..6f2ac87e1 100644 --- a/pkg/local_object_storage/blobstor/fstree/option.go +++ b/pkg/local_object_storage/blobstor/fstree/option.go @@ -4,7 +4,6 @@ import ( "io/fs" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) type Option func(*FSTree) @@ -53,6 +52,6 @@ func WithFileCounter(c FileCounter) Option { func WithLogger(l *logger.Logger) Option { return func(f *FSTree) { - f.log = l.With(zap.String("component", "FSTree")) + f.log = l } } diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 711a76100..fc6d9ee9c 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -116,7 +116,8 @@ func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStor blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1), blobovniczatree.WithPermissions(0o700), - blobovniczatree.WithLogger(test.NewLogger(t))), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t))), Policy: func(_ *objectSDK.Object, data []byte) bool { return uint64(len(data)) < smallSize }, diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go index 9998bbae2..54d2f1510 100644 --- a/pkg/local_object_storage/shard/gc_internal_test.go +++ b/pkg/local_object_storage/shard/gc_internal_test.go @@ -37,7 +37,8 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) { { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go index 5caf3641f..3878a65cd 100644 --- a/pkg/local_object_storage/shard/lock_test.go +++ b/pkg/local_object_storage/shard/lock_test.go @@ -28,9 +28,10 @@ func TestShard_Lock(t *testing.T) { var sh *Shard rootPath := t.TempDir() + l := logger.NewLoggerWrapper(zap.NewNop()) opts := []Option{ WithID(NewIDFromBytes([]byte{})), - WithLogger(logger.NewLoggerWrapper(zap.NewNop())), + WithLogger(l), WithBlobStorOptions( blobstor.WithStorages([]blobstor.SubStorage{ { diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go index 146e834cc..06fe9f511 100644 --- a/pkg/local_object_storage/shard/range_test.go +++ b/pkg/local_object_storage/shard/range_test.go @@ -79,7 +79,8 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) { { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go index 304a6bf9d..d89b56266 100644 --- a/pkg/local_object_storage/shard/shard.go +++ b/pkg/local_object_storage/shard/shard.go @@ -205,7 +205,7 @@ func WithPiloramaOptions(opts ...pilorama.Option) Option { func WithLogger(l *logger.Logger) Option { return func(c *cfg) { c.log = l - c.gcCfg.log = l + c.gcCfg.log = l.WithTag(logger.TagGC) } } diff --git a/pkg/local_object_storage/shard/shard_test.go b/pkg/local_object_storage/shard/shard_test.go index f9ee34488..84be71c4d 100644 --- a/pkg/local_object_storage/shard/shard_test.go +++ b/pkg/local_object_storage/shard/shard_test.go @@ -60,7 +60,8 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard { Storage: blobovniczatree.NewBlobovniczaTree( context.Background(), - blobovniczatree.WithLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaLogger(test.NewLogger(t)), + blobovniczatree.WithBlobovniczaTreeLogger(test.NewLogger(t)), blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")), blobovniczatree.WithBlobovniczaShallowDepth(1), blobovniczatree.WithBlobovniczaShallowWidth(1)), diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go index dbbe66c19..a4f98ad06 100644 --- a/pkg/local_object_storage/writecache/options.go +++ b/pkg/local_object_storage/writecache/options.go @@ -5,7 +5,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) // Option represents write-cache configuration option. @@ -46,7 +45,7 @@ type options struct { // WithLogger sets logger. func WithLogger(log *logger.Logger) Option { return func(o *options) { - o.log = log.With(zap.String("component", "WriteCache")) + o.log = log } } diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go index 867d3f4ef..1c4d7d585 100644 --- a/pkg/services/object/delete/service.go +++ b/pkg/services/object/delete/service.go @@ -92,6 +92,6 @@ func New(gs *getsvc.Service, // WithLogger returns option to specify Delete service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "objectSDK.Delete service")) + c.log = l } } diff --git a/pkg/services/object/get/service.go b/pkg/services/object/get/service.go index 9ec10b5f2..a103f5a7f 100644 --- a/pkg/services/object/get/service.go +++ b/pkg/services/object/get/service.go @@ -53,6 +53,6 @@ func New( // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(s *Service) { - s.log = l.With(zap.String("component", "Object.Get service")) + s.log = l } } diff --git a/pkg/services/object/get/v2/service.go b/pkg/services/object/get/v2/service.go index fc483b74b..0ec8912fd 100644 --- a/pkg/services/object/get/v2/service.go +++ b/pkg/services/object/get/v2/service.go @@ -145,6 +145,6 @@ func (s *Service) Head(ctx context.Context, req *objectV2.HeadRequest) (*objectV func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "Object.Get V2 service")) + c.log = l } } diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go index e1aeca957..56fe56468 100644 --- a/pkg/services/object/search/service.go +++ b/pkg/services/object/search/service.go @@ -94,6 +94,6 @@ func New(e *engine.StorageEngine, // WithLogger returns option to specify Get service's logger. func WithLogger(l *logger.Logger) Option { return func(c *cfg) { - c.log = l.With(zap.String("component", "Object.Search service")) + c.log = l } } diff --git a/pkg/services/policer/policer.go b/pkg/services/policer/policer.go index d18b71a23..c91e7cc7c 100644 --- a/pkg/services/policer/policer.go +++ b/pkg/services/policer/policer.go @@ -8,7 +8,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-node/internal/assert" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" lru "github.com/hashicorp/golang-lru/v2" - "go.uber.org/zap" ) type objectsInWork struct { @@ -56,8 +55,6 @@ func New(opts ...Option) *Policer { opts[i](c) } - c.log = c.log.With(zap.String("component", "Object Policer")) - cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize)) assert.NoError(err, fmt.Sprintf("could not create LRU cache with %d size", c.cacheSize)) diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go index 6910fa5af..a940cef37 100644 --- a/pkg/services/replicator/replicator.go +++ b/pkg/services/replicator/replicator.go @@ -7,7 +7,6 @@ import ( objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer" getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger" - "go.uber.org/zap" ) // Replicator represents the utility that replicates @@ -45,8 +44,6 @@ func New(opts ...Option) *Replicator { opts[i](c) } - c.log = c.log.With(zap.String("component", "Object Replicator")) - return &Replicator{ cfg: c, } diff --git a/pkg/services/session/executor.go b/pkg/services/session/executor.go index 12b221613..f0591de71 100644 --- a/pkg/services/session/executor.go +++ b/pkg/services/session/executor.go @@ -33,10 +33,7 @@ func NewExecutionService(exec ServiceExecutor, respSvc *response.Service, l *log } func (s *executorSvc) Create(ctx context.Context, req *session.CreateRequest) (*session.CreateResponse, error) { - s.log.Debug(ctx, logs.ServingRequest, - zap.String("component", "SessionService"), - zap.String("request", "Create"), - ) + s.log.Debug(ctx, logs.ServingRequest, zap.String("request", "Create")) respBody, err := s.exec.Create(ctx, req.GetBody()) if err != nil { diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index 276847be1..a1998cb1a 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -238,5 +238,6 @@ func NewLoggerWrapper(z *zap.Logger) *Logger { return &Logger{ z: z.WithOptions(zap.AddCallerSkip(1)), t: TagMain, + c: z.Core(), } } diff --git a/pkg/util/logger/tag_string.go b/pkg/util/logger/tag_string.go index a25b3c445..1b98f2e62 100644 --- a/pkg/util/logger/tag_string.go +++ b/pkg/util/logger/tag_string.go @@ -13,11 +13,26 @@ func _() { _ = x[TagGrpcSvc-3] _ = x[TagIr-4] _ = x[TagProcessor-5] + _ = x[TagEngine-6] + _ = x[TagBlobovnicza-7] + _ = x[TagBlobovniczaTree-8] + _ = x[TagBlobstor-9] + _ = x[TagFSTree-10] + _ = x[TagGC-11] + _ = x[TagShard-12] + _ = x[TagWriteCache-13] + _ = x[TagDeleteSvc-14] + _ = x[TagGetSvc-15] + _ = x[TagSearchSvc-16] + _ = x[TagSessionSvc-17] + _ = x[TagTreeSvc-18] + _ = x[TagPolicer-19] + _ = x[TagReplicator-20] } -const _Tag_name = "mainmorphgrpc_svcirprocessor" +const _Tag_name = "mainmorphgrpcsvcirprocessorengineblobovniczablobovniczatreeblobstorfstreegcshardwritecachedeletesvcgetsvcsearchsvcsessionsvctreesvcpolicerreplicator" -var _Tag_index = [...]uint8{0, 4, 9, 17, 19, 28} +var _Tag_index = [...]uint8{0, 4, 9, 16, 18, 27, 33, 44, 59, 67, 73, 75, 80, 90, 99, 105, 114, 124, 131, 138, 148} func (i Tag) String() string { i -= 1 diff --git a/pkg/util/logger/tags.go b/pkg/util/logger/tags.go index 51a6f62da..a5386707e 100644 --- a/pkg/util/logger/tags.go +++ b/pkg/util/logger/tags.go @@ -13,12 +13,27 @@ import ( type Tag uint8 const ( - _ Tag = iota // - TagMain // main - TagMorph // morph - TagGrpcSvc // grpc_svc - TagIr // ir - TagProcessor // processor + _ Tag = iota // + TagMain // main + TagMorph // morph + TagGrpcSvc // grpcsvc + TagIr // ir + TagProcessor // processor + TagEngine // engine + TagBlobovnicza // blobovnicza + TagBlobovniczaTree // blobovniczatree + TagBlobstor // blobstor + TagFSTree // fstree + TagGC // gc + TagShard // shard + TagWriteCache // writecache + TagDeleteSvc // deletesvc + TagGetSvc // getsvc + TagSearchSvc // searchsvc + TagSessionSvc // sessionsvc + TagTreeSvc // treesvc + TagPolicer // policer + TagReplicator // replicator defaultLevel = zapcore.InfoLevel ) From 86264e4e20af3eb9f90ae65487a5b5e91eb8a28d Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Tue, 4 Feb 2025 09:11:36 +0300 Subject: [PATCH 563/591] [#1619] logger: Add benchmark Change-Id: I49e90e8a3689a755755afd0638b327a6b1884795 Signed-off-by: Anton Nikiforov --- pkg/util/logger/logger_test.go | 118 +++++++++++++++++++++++++++++ pkg/util/logger/logger_test.result | 46 +++++++++++ 2 files changed, 164 insertions(+) create mode 100644 pkg/util/logger/logger_test.go create mode 100644 pkg/util/logger/logger_test.result diff --git a/pkg/util/logger/logger_test.go b/pkg/util/logger/logger_test.go new file mode 100644 index 000000000..b867ee6cc --- /dev/null +++ b/pkg/util/logger/logger_test.go @@ -0,0 +1,118 @@ +package logger + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "go.uber.org/zap/zaptest/observer" +) + +func BenchmarkLogger(b *testing.B) { + ctx := context.Background() + m := map[string]Prm{} + + prm := Prm{} + require.NoError(b, prm.SetLevelString("debug")) + m["logging enabled"] = prm + + prm = Prm{} + require.NoError(b, prm.SetLevelString("error")) + m["logging disabled"] = prm + + prm = Prm{} + require.NoError(b, prm.SetLevelString("error")) + require.NoError(b, prm.SetTags([][]string{{"main", "debug"}, {"morph", "debug"}})) + m["logging enabled via tags"] = prm + + prm = Prm{} + require.NoError(b, prm.SetLevelString("debug")) + require.NoError(b, prm.SetTags([][]string{{"main", "error"}, {"morph", "debug"}})) + m["logging disabled via tags"] = prm + + for k, v := range m { + b.Run(k, func(b *testing.B) { + logger, err := createLogger(v) + require.NoError(b, err) + UpdateLevelForTags(v) + b.ResetTimer() + b.ReportAllocs() + for range b.N { + logger.Info(ctx, "test info") + } + }) + } +} + +type testCore struct { + core zapcore.Core +} + +func (c *testCore) Enabled(lvl zapcore.Level) bool { + return c.core.Enabled(lvl) +} + +func (c *testCore) With(fields []zapcore.Field) zapcore.Core { + c.core = c.core.With(fields) + return c +} + +func (c *testCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + return ce.AddCore(e, c) +} + +func (c *testCore) Write(zapcore.Entry, []zapcore.Field) error { + return nil +} + +func (c *testCore) Sync() error { + return c.core.Sync() +} + +func createLogger(prm Prm) (*Logger, error) { + prm.Options = []zap.Option{zap.WrapCore(func(core zapcore.Core) zapcore.Core { + tc := testCore{core: core} + return &tc + })} + return NewLogger(prm) +} + +func TestLoggerOutput(t *testing.T) { + obs, logs := observer.New(zap.NewAtomicLevelAt(zap.DebugLevel)) + + prm := Prm{} + require.NoError(t, prm.SetLevelString("debug")) + prm.Options = []zap.Option{zap.WrapCore(func(zapcore.Core) zapcore.Core { + return obs + })} + loggerMain, err := NewLogger(prm) + require.NoError(t, err) + UpdateLevelForTags(prm) + + loggerMainWith := loggerMain.With(zap.String("key", "value")) + + require.Panics(t, func() { + loggerMainWith.WithTag(TagShard) + }) + loggerShard := loggerMain.WithTag(TagShard) + loggerShard = loggerShard.With(zap.String("key1", "value1")) + + loggerMorph := loggerMain.WithTag(TagMorph) + loggerMorph = loggerMorph.With(zap.String("key2", "value2")) + + ctx := context.Background() + loggerMain.Debug(ctx, "main") + loggerMainWith.Debug(ctx, "main with") + loggerShard.Debug(ctx, "shard") + loggerMorph.Debug(ctx, "morph") + + require.Len(t, logs.All(), 4) + require.Len(t, logs.FilterFieldKey("key").All(), 1) + require.Len(t, logs.FilterFieldKey("key1").All(), 1) + require.Len(t, logs.FilterFieldKey("key2").All(), 1) + require.Len(t, logs.FilterField(zap.String("tag", TagMain.String())).All(), 2) + require.Len(t, logs.FilterField(zap.String("tag", TagShard.String())).All(), 1) + require.Len(t, logs.FilterField(zap.String("tag", TagMorph.String())).All(), 1) +} diff --git a/pkg/util/logger/logger_test.result b/pkg/util/logger/logger_test.result new file mode 100644 index 000000000..612fa2967 --- /dev/null +++ b/pkg/util/logger/logger_test.result @@ -0,0 +1,46 @@ +goos: linux +goarch: amd64 +pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger +cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz +BenchmarkLogger/logging_enabled-8 10000 1156 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1124 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1106 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1096 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1071 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1081 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1074 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1134 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1123 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled-8 10000 1144 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.15 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.54 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.22 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 17.01 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.31 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.61 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.17 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 16.26 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled-8 10000 21.02 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1146 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1086 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1113 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1157 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1069 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1073 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1096 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1092 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1060 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_enabled_via_tags-8 10000 1153 ns/op 240 B/op 1 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.23 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.39 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.47 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.62 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.53 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.74 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.20 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 17.06 ns/op 0 B/op 0 allocs/op +BenchmarkLogger/logging_disabled_via_tags-8 10000 16.60 ns/op 0 B/op 0 allocs/op +PASS +ok git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger 0.260s From cf48069fd8a12df6c0013d60cc0ce0a89420241b Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 16 Apr 2025 14:26:20 +0300 Subject: [PATCH 564/591] [#1718] linter: Resolve gocritic's appendAssign linter See https://go-critic.com/overview#appendassign for details. Change-Id: I991979ea680af25e2cec9097fa12b1c4eebc6c1d Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-cli/modules/object/util.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/cmd/frostfs-cli/modules/object/util.go b/cmd/frostfs-cli/modules/object/util.go index 3955f8ee1..8e4e8b287 100644 --- a/cmd/frostfs-cli/modules/object/util.go +++ b/cmd/frostfs-cli/modules/object/util.go @@ -262,13 +262,8 @@ func OpenSessionViaClient(cmd *cobra.Command, dst SessionPrm, cli *client.Client if _, ok := dst.(*internal.DeleteObjectPrm); ok { common.PrintVerbose(cmd, "Collecting relatives of the removal object...") - rels := collectObjectRelatives(cmd, cli, cnr, *obj) - - if len(rels) == 0 { - objs = []oid.ID{*obj} - } else { - objs = append(rels, *obj) - } + objs = collectObjectRelatives(cmd, cli, cnr, *obj) + objs = append(objs, *obj) } } From 2075e09cedad3150a91216524c014846edb3cb71 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 16 Apr 2025 14:28:21 +0300 Subject: [PATCH 565/591] [#1718] linter: Resolve gocritic's unslice linter See https://go-critic.com/overview#unslice for details. Change-Id: I6d21e8ce1c9bae56099dc203f5080b0e3ea0c1ef Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-adm/internal/modules/morph/config/config.go | 2 +- cmd/frostfs-lens/internal/schema/writecache/parsers.go | 2 +- pkg/local_object_storage/metabase/delete.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/config/config.go b/cmd/frostfs-adm/internal/modules/morph/config/config.go index f64cb4817..c17fb62ff 100644 --- a/cmd/frostfs-adm/internal/modules/morph/config/config.go +++ b/cmd/frostfs-adm/internal/modules/morph/config/config.go @@ -63,7 +63,7 @@ func dumpNetworkConfig(cmd *cobra.Command, _ []string) error { netmap.MaxObjectSizeConfig, netmap.WithdrawFeeConfig, netmap.MaxECDataCountConfig, netmap.MaxECParityCountConfig: nbuf := make([]byte, 8) - copy(nbuf[:], v) + copy(nbuf, v) n := binary.LittleEndian.Uint64(nbuf) _, _ = tw.Write(fmt.Appendf(nil, "%s:\t%d (int)\n", k, n)) case netmap.HomomorphicHashingDisabledKey, netmap.MaintenanceModeAllowedConfig: diff --git a/cmd/frostfs-lens/internal/schema/writecache/parsers.go b/cmd/frostfs-lens/internal/schema/writecache/parsers.go index 7d70b27b2..3bfe2608b 100644 --- a/cmd/frostfs-lens/internal/schema/writecache/parsers.go +++ b/cmd/frostfs-lens/internal/schema/writecache/parsers.go @@ -57,7 +57,7 @@ func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, r.addr.SetContainer(cnr) r.addr.SetObject(obj) - r.data = value[:] + r.data = value return &r, nil, nil } diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go index cf1d563e9..9a5a6e574 100644 --- a/pkg/local_object_storage/metabase/delete.go +++ b/pkg/local_object_storage/metabase/delete.go @@ -363,12 +363,12 @@ func (db *DB) deleteObject( func parentLength(tx *bbolt.Tx, addr oid.Address) int { bucketName := make([]byte, bucketKeySize) - bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName[:])) + bkt := tx.Bucket(parentBucketName(addr.Container(), bucketName)) if bkt == nil { return 0 } - lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName[:]))) + lst, err := decodeList(bkt.Get(objectKey(addr.Object(), bucketName))) if err != nil { return 0 } From d2114759aaaf4e99aa83c1185c0a5d184a83b97a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 16 Apr 2025 14:38:06 +0300 Subject: [PATCH 566/591] [#1718] linter: Resolve gocritic's typeSwitchVar linter See https://go-critic.com/overview#typeswitchvar for details Change-Id: Ic29db32c9b080576ab51dd484b4376114e9e775c Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-lens/internal/tui/ui.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go index bd7540b01..cc6b7859e 100644 --- a/cmd/frostfs-lens/internal/tui/ui.go +++ b/cmd/frostfs-lens/internal/tui/ui.go @@ -460,11 +460,11 @@ func (ui *UI) handleInputOnSearching(event *tcell.EventKey) { return } - switch ui.mountedPage.(type) { + switch v := ui.mountedPage.(type) { case *BucketsView: ui.moveNextPage(NewBucketsView(ui, res)) case *RecordsView: - bucket := ui.mountedPage.(*RecordsView).bucket + bucket := v.bucket ui.moveNextPage(NewRecordsView(ui, bucket, res)) } From 8d499f03fee98825d183d18a7ef492a6a019b25a Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 16 Apr 2025 14:40:31 +0300 Subject: [PATCH 567/591] [#1718] linter: Resolve gocritic's elseif linter See https://go-critic.com/overview#elseif for details. Change-Id: I8fd3edfacaeea2b0a83917575d545af7e7ab4d13 Signed-off-by: Dmitrii Stepanov --- pkg/services/container/ape.go | 6 ++---- pkg/services/object/common/target/target.go | 6 ++---- pkg/services/object_manager/tombstone/checker.go | 6 ++---- pkg/services/tree/service.go | 6 ++---- 4 files changed, 8 insertions(+), 16 deletions(-) diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go index e1fbe3960..01bd825d7 100644 --- a/pkg/services/container/ape.go +++ b/pkg/services/container/ape.go @@ -655,10 +655,8 @@ func (ac *apeChecker) namespaceByOwner(ctx context.Context, owner *refs.OwnerID) subject, err := ac.frostFSIDClient.GetSubject(ctx, addr) if err == nil { namespace = subject.Namespace - } else { - if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { - return "", fmt.Errorf("get subject error: %w", err) - } + } else if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) { + return "", fmt.Errorf("get subject error: %w", err) } return namespace, nil } diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go index b2ae79dbc..f2bd907db 100644 --- a/pkg/services/object/common/target/target.go +++ b/pkg/services/object/common/target/target.go @@ -89,10 +89,8 @@ func newTrustedTarget(ctx context.Context, prm *objectwriter.Params) (transforme if !ownerObj.Equals(ownerSession) { return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession) } - } else { - if !ownerObj.Equals(sessionInfo.Owner) { - return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj) - } + } else if !ownerObj.Equals(sessionInfo.Owner) { + return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj) } if prm.SignRequestPrivateKey == nil { diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go index a4e36c2dc..e5f001d5a 100644 --- a/pkg/services/object_manager/tombstone/checker.go +++ b/pkg/services/object_manager/tombstone/checker.go @@ -61,10 +61,8 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr logs.TombstoneCouldNotGetTheTombstoneTheSource, zap.Error(err), ) - } else { - if ts != nil { - return g.handleTS(ctx, addrStr, ts, epoch) - } + } else if ts != nil { + return g.handleTS(ctx, addrStr, ts, epoch) } // requested tombstone not diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index a00c8c1cd..5349dd13e 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -436,10 +436,8 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid } if ms == nil { ms = m.Items - } else { - if len(m.Items) != 1 { - return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided") - } + } else if len(m.Items) != 1 { + return status.Error(codes.InvalidArgument, "multiple non-internal nodes provided") } ts = append(ts, m.Time) ps = append(ps, p) From 64900f87e1872dea2794b02ddbc888f171a24a77 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 16 Apr 2025 14:44:00 +0300 Subject: [PATCH 568/591] [#1718] linter: Resolve gocritic's singleCaseSwitch linter See https://go-critic.com/overview#singlecaseswitch for details. Change-Id: Ied7885f83b4116969771de6f91bc5e1e3b2a4f1e Signed-off-by: Dmitrii Stepanov --- .../blobstor/fstree/fstree_write_generic.go | 9 +++------ pkg/services/control/ir/server/server.go | 3 +-- pkg/util/http/server.go | 3 +-- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go index 07a618b0a..6d633dad6 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go @@ -67,12 +67,9 @@ func (w *genericWriter) writeAndRename(tmpPath, p string, data []byte) error { err := w.writeFile(tmpPath, data) if err != nil { var pe *fs.PathError - if errors.As(err, &pe) { - switch pe.Err { - case syscall.ENOSPC: - err = common.ErrNoSpace - _ = os.RemoveAll(tmpPath) - } + if errors.As(err, &pe) && errors.Is(pe.Err, syscall.ENOSPC) { + err = common.ErrNoSpace + _ = os.RemoveAll(tmpPath) } return err } diff --git a/pkg/services/control/ir/server/server.go b/pkg/services/control/ir/server/server.go index c2a4f88a6..0cfca71c1 100644 --- a/pkg/services/control/ir/server/server.go +++ b/pkg/services/control/ir/server/server.go @@ -35,8 +35,7 @@ func panicOnPrmValue(n string, v any) { // the parameterized private key. func New(prm Prm, netmapClient *netmap.Client, containerClient *container.Client, opts ...Option) *Server { // verify required parameters - switch { - case prm.healthChecker == nil: + if prm.healthChecker == nil { panicOnPrmValue("health checker", prm.healthChecker) } diff --git a/pkg/util/http/server.go b/pkg/util/http/server.go index 923412a7f..2589ab786 100644 --- a/pkg/util/http/server.go +++ b/pkg/util/http/server.go @@ -76,8 +76,7 @@ func New(prm HTTPSrvPrm, opts ...Option) *Server { o(c) } - switch { - case c.shutdownTimeout <= 0: + if c.shutdownTimeout <= 0 { panicOnOptValue("shutdown timeout", c.shutdownTimeout) } From b88fe8c4a7f842c65d98a1edf6b83e54a7f2c3b3 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 16 Apr 2025 14:45:04 +0300 Subject: [PATCH 569/591] [#1718] linter: Resolve gocritic's assignOp linter See https://go-critic.com/overview#assignop for details. Change-Id: I839446846437c8c74c119d8b5669f5b866c247dc Signed-off-by: Dmitrii Stepanov --- pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index d2eef2074..a840275b8 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -226,7 +226,7 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M func (b *Blobovniczas) addRebuildTempFile(ctx context.Context, path string) (func(), error) { sysPath := filepath.Join(b.rootPath, path) - sysPath = sysPath + rebuildSuffix + sysPath += rebuildSuffix _, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm) if err != nil { return nil, err From ca8b01667fac7107a8451843a4b21c656dfc7527 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 16 Apr 2025 14:46:31 +0300 Subject: [PATCH 570/591] [#1718] linter: Resolve gocritic's unlambda linter See https://go-critic.com/overview#unlambda for details. Change-Id: Iccb2d293ce31a302fcbb2c3f9c55c9b3fa554db5 Signed-off-by: Dmitrii Stepanov --- pkg/services/object/get/v2/util.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go index bfa7fd619..4b7dcc530 100644 --- a/pkg/services/object/get/v2/util.go +++ b/pkg/services/object/get/v2/util.go @@ -182,9 +182,7 @@ func (s *Service) toHashRangePrm(req *objectV2.GetRangeHashRequest) (*getsvc.Ran default: return nil, errUnknownChechsumType(t) case refs.SHA256: - p.SetHashGenerator(func() hash.Hash { - return sha256.New() - }) + p.SetHashGenerator(sha256.New) case refs.TillichZemor: p.SetHashGenerator(func() hash.Hash { return tz.New() From 3cbff575351a9a8ffff384e920d06da56a6c9703 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 16 Apr 2025 14:47:32 +0300 Subject: [PATCH 571/591] [#1718] linter: Enable gocritic linter See https://go-critic.com/overview#checkers-from-the-diagnostic-group for list of default enabled checkers. `ifElseChain` disabled as it generates doubtful issues. Change-Id: I5937b116d9af8b3cdf8b06451c4904d0b3f67f68 Signed-off-by: Dmitrii Stepanov --- .golangci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index 3ac4eb651..e3ec09f60 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -18,6 +18,7 @@ linters: - exhaustive - funlen - gocognit + - gocritic - godot - importas - ineffassign @@ -44,6 +45,9 @@ linters: statements: 60 gocognit: min-complexity: 40 + gocritic: + disabled-checks: + - ifElseChain importas: alias: - pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object From c84013a85408a5403c2cc8f6f9cf98fb7bf79a7b Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Wed, 16 Apr 2025 15:09:43 +0300 Subject: [PATCH 572/591] [#1719] cli: Fix TTL description Change-Id: I051a27af57a74304713c1f832dc31dbaeb10cbc6 Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-cli/internal/commonflags/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/frostfs-cli/internal/commonflags/api.go b/cmd/frostfs-cli/internal/commonflags/api.go index 88321176f..6ed21e107 100644 --- a/cmd/frostfs-cli/internal/commonflags/api.go +++ b/cmd/frostfs-cli/internal/commonflags/api.go @@ -9,7 +9,7 @@ const ( TTL = "ttl" TTLShorthand = "" TTLDefault = 2 - TTLUsage = "TTL value in request meta header" + TTLUsage = "The maximum number of intermediate nodes in the request route" XHeadersKey = "xhdr" XHeadersShorthand = "x" From 1a738792c00518eff82b8643135c4bd4a40e0710 Mon Sep 17 00:00:00 2001 From: Anton Nikiforov Date: Thu, 17 Apr 2025 16:15:12 +0300 Subject: [PATCH 573/591] [#1722] node, ir: Fix documentation for logger tags Close #1722. Change-Id: Iee9b3d96a31353622e1680d2d596ca6a9ee104b4 Signed-off-by: Anton Nikiforov --- config/example/ir.yaml | 2 +- docs/storage-node-configuration.md | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/config/example/ir.yaml b/config/example/ir.yaml index a4a006550..ed53f014b 100644 --- a/config/example/ir.yaml +++ b/config/example/ir.yaml @@ -4,7 +4,7 @@ logger: level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" timestamp: true tags: - - names: "main, morph" # Possible values: `main`, `morph`, `grpc_svc`, `ir`, `processor`. + - names: "main, morph" # Possible values: `main`, `morph`, `grpcsvc`, `ir`, `processor`. level: debug wallet: diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 3e770457b..14ebb53b3 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -123,10 +123,10 @@ logger: | `tags` | list of [tags descriptions](#tags-subsection) | | Array of tags description. | ## `tags` subsection -| Parameter | Type | Default value | Description | -|-----------|----------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `morph`, `grpcsvc`, `ir`, `processor`, `engine`, `blobovnicza`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. | -| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. | +| Parameter | Type | Default value | Description | +|-----------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `names` | `string` | | List of components divided by `,`.
Possible values: `main`, `engine`, `blobovnicza`, `blobovniczatree`, `blobstor`, `fstree`, `gc`, `shard`, `writecache`, `deletesvc`, `getsvc`, `searchsvc`, `sessionsvc`, `treesvc`, `policer`, `replicator`. | +| `level` | `string` | | Logging level for the components from `names`, overrides default logging level. | # `contracts` section Contains override values for FrostFS side-chain contract hashes. Most of the time contract From 6567ceaf132c3295b8f011920d3718c989168b2f Mon Sep 17 00:00:00 2001 From: Alexander Chuprov Date: Thu, 17 Apr 2025 13:20:18 +0300 Subject: [PATCH 574/591] [#1702] tree: Support reloading 'tree.authorized_keys' on SIGHUP Allows updating the list of public keys authorized to invoke 'Tree service' operations without restarting the node. Change-Id: I01b6e05875b7ae3f3218062eb12bf9755e87f2a3 Signed-off-by: Alexander Chuprov --- cmd/frostfs-node/config.go | 7 +++++++ pkg/services/tree/options.go | 8 +++----- pkg/services/tree/service.go | 14 ++++++++++++++ pkg/services/tree/signature.go | 4 ++-- pkg/services/tree/signature_test.go | 1 + 5 files changed, 27 insertions(+), 7 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index fae1ca1ca..b688acfde 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -30,6 +30,7 @@ import ( objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object" replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator" tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing" + treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics" internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" @@ -1403,6 +1404,12 @@ func (c *cfg) getComponents(ctx context.Context) []dCmp { } return err }}) + if c.treeService != nil { + components = append(components, dCmp{"tree", func() error { + c.treeService.ReloadAuthorizedKeys(treeconfig.Tree(c.appCfg).AuthorizedKeys()) + return nil + }}) + } if cmp, updated := metricsComponent(c); updated { if cmp.enabled { cmp.preReload = enableMetricsSvc diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go index 4ad760846..56cbcc081 100644 --- a/pkg/services/tree/options.go +++ b/pkg/services/tree/options.go @@ -3,6 +3,7 @@ package tree import ( "context" "crypto/ecdsa" + "sync/atomic" "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" @@ -41,7 +42,7 @@ type cfg struct { replicatorWorkerCount int replicatorTimeout time.Duration containerCacheSize int - authorizedKeys [][]byte + authorizedKeys atomic.Pointer[[][]byte] syncBatchSize int localOverrideStorage policyengine.LocalOverrideStorage @@ -147,10 +148,7 @@ func WithMetrics(v MetricsRegister) Option { // keys that have rights to use Tree service. func WithAuthorizedKeys(keys keys.PublicKeys) Option { return func(c *cfg) { - c.authorizedKeys = nil - for _, key := range keys { - c.authorizedKeys = append(c.authorizedKeys, key.Bytes()) - } + c.authorizedKeys.Store(fromPublicKeys(keys)) } } diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index 5349dd13e..a4bc0c97c 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -17,6 +17,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/panjf2000/ants/v2" "go.uber.org/zap" "google.golang.org/grpc/codes" @@ -59,6 +60,7 @@ func New(opts ...Option) *Service { s.replicatorTimeout = defaultReplicatorSendTimeout s.syncBatchSize = defaultSyncBatchSize s.metrics = defaultMetricsRegister{} + s.authorizedKeys.Store(&[][]byte{}) for i := range opts { opts[i](&s.cfg) @@ -782,3 +784,15 @@ func (s *Service) Healthcheck(context.Context, *HealthcheckRequest) (*Healthchec return new(HealthcheckResponse), nil } + +func (s *Service) ReloadAuthorizedKeys(newKeys keys.PublicKeys) { + s.authorizedKeys.Store(fromPublicKeys(newKeys)) +} + +func fromPublicKeys(keys keys.PublicKeys) *[][]byte { + buff := make([][]byte, len(keys)) + for i, k := range keys { + buff[i] = k.Bytes() + } + return &buff +} diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go index 89056056d..8221a4546 100644 --- a/pkg/services/tree/signature.go +++ b/pkg/services/tree/signature.go @@ -95,8 +95,8 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) { } key := sign.GetKey() - for i := range s.authorizedKeys { - if bytes.Equal(s.authorizedKeys[i], key) { + for _, currentKey := range *s.authorizedKeys.Load() { + if bytes.Equal(currentKey, key) { return true, nil } } diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index 947de8e58..f5659d5e2 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -152,6 +152,7 @@ func TestMessageSign(t *testing.T) { apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}), } + s.cfg.authorizedKeys.Store(&[][]byte{}) rawCID1 := make([]byte, sha256.Size) cid1.Encode(rawCID1) From 4bcb67263076baebb2192c49c4d9c6e8d46627cb Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 18 Apr 2025 18:28:36 +0300 Subject: [PATCH 575/591] [#1707] tree: Pass tracing context in `forEachNode` Change-Id: I884dcd215bd3934f9b4ea43dcc77e38f9dadcf10 Signed-off-by: Dmitrii Stepanov --- pkg/services/tree/redirect.go | 14 +++++++------- pkg/services/tree/service.go | 8 ++++---- pkg/services/tree/sync.go | 4 ++-- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/services/tree/redirect.go b/pkg/services/tree/redirect.go index 3dcdc4fc7..647f8cb30 100644 --- a/pkg/services/tree/redirect.go +++ b/pkg/services/tree/redirect.go @@ -19,8 +19,8 @@ var errNoSuitableNode = errors.New("no node was found to execute the request") func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapSDK.NodeInfo, req *Req, callback func(TreeServiceClient, context.Context, *Req, ...grpc.CallOption) (*Resp, error)) (*Resp, error) { var resp *Resp var outErr error - err := s.forEachNode(ctx, ns, func(c TreeServiceClient) bool { - resp, outErr = callback(c, ctx, req) + err := s.forEachNode(ctx, ns, func(fCtx context.Context, c TreeServiceClient) bool { + resp, outErr = callback(c, fCtx, req) return true }) if err != nil { @@ -31,7 +31,7 @@ func relayUnary[Req any, Resp any](ctx context.Context, s *Service, ns []netmapS // forEachNode executes callback for each node in the container until true is returned. // Returns errNoSuitableNode if there was no successful attempt to dial any node. -func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(c TreeServiceClient) bool) error { +func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo, f func(context.Context, TreeServiceClient) bool) error { for _, n := range cntNodes { if bytes.Equal(n.PublicKey(), s.rawPub) { return nil @@ -42,9 +42,9 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo for _, n := range cntNodes { var stop bool for endpoint := range n.NetworkEndpoints() { - stop = s.execOnClient(ctx, endpoint, func(c TreeServiceClient) bool { + stop = s.execOnClient(ctx, endpoint, func(fCtx context.Context, c TreeServiceClient) bool { called = true - return f(c) + return f(fCtx, c) }) if called { break @@ -60,7 +60,7 @@ func (s *Service) forEachNode(ctx context.Context, cntNodes []netmapSDK.NodeInfo return nil } -func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(TreeServiceClient) bool) bool { +func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(context.Context, TreeServiceClient) bool) bool { ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.IterateNetworkEndpoints", trace.WithAttributes( attribute.String("endpoint", endpoint), @@ -73,5 +73,5 @@ func (s *Service) execOnClient(ctx context.Context, endpoint string, f func(Tree } s.log.Debug(ctx, logs.TreeRedirectingTreeServiceQuery, zap.String("endpoint", endpoint)) - return f(c) + return f(ctx, c) } diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go index a4bc0c97c..3994d6973 100644 --- a/pkg/services/tree/service.go +++ b/pkg/services/tree/service.go @@ -391,8 +391,8 @@ func (s *Service) GetSubTree(req *GetSubTreeRequest, srv TreeService_GetSubTreeS if pos < 0 { var cli TreeService_GetSubTreeClient var outErr error - err = s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool { - cli, outErr = c.GetSubTree(srv.Context(), req) + err = s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { + cli, outErr = c.GetSubTree(fCtx, req) return true }) if err != nil { @@ -655,8 +655,8 @@ func (s *Service) GetOpLog(req *GetOpLogRequest, srv TreeService_GetOpLogServer) if pos < 0 { var cli TreeService_GetOpLogClient var outErr error - err := s.forEachNode(srv.Context(), ns, func(c TreeServiceClient) bool { - cli, outErr = c.GetOpLog(srv.Context(), req) + err := s.forEachNode(srv.Context(), ns, func(fCtx context.Context, c TreeServiceClient) bool { + cli, outErr = c.GetOpLog(fCtx, req) return true }) if err != nil { diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 32297f9d7..d4040337d 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -76,8 +76,8 @@ func (s *Service) synchronizeAllTrees(ctx context.Context, cid cid.ID) error { var treesToSync []string var outErr error - err = s.forEachNode(ctx, nodes, func(c TreeServiceClient) bool { - resp, outErr = c.TreeList(ctx, req) + err = s.forEachNode(ctx, nodes, func(fCtx context.Context, c TreeServiceClient) bool { + resp, outErr = c.TreeList(fCtx, req) if outErr != nil { return false } From a27e0035083b8e66ee446c949f98405c1ac2791d Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 21 Apr 2025 10:53:51 +0300 Subject: [PATCH 576/591] [#1709] qos: Add context.Done check before schedule request Do not push request to schedule queue, if context is already cancelled. Change-Id: Ieef837a7d423158e3dbb0c3b4efecaa20744c845 Signed-off-by: Dmitrii Stepanov --- internal/qos/limiter.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/qos/limiter.go b/internal/qos/limiter.go index 5851d7626..2d7de32fc 100644 --- a/internal/qos/limiter.go +++ b/internal/qos/limiter.go @@ -149,6 +149,11 @@ func (n *mClockLimiter) WriteRequest(ctx context.Context) (ReleaseFunc, error) { } func requestArrival(ctx context.Context, s scheduler, stats map[string]*stat) (ReleaseFunc, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } tag, ok := tagging.IOTagFromContext(ctx) if !ok { tag = IOTagClient.String() From 3a441f072ffb955c6513eb7dd7733da311590ad7 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 21 Apr 2025 12:13:32 +0300 Subject: [PATCH 577/591] [#1709] shard: Check if context canceled for shard iteration If context has already been canceled, then there is no need to check other shards. At the same time, it is necessary to avoid handling context cancellation in each handler. Therefore, the context check has been moved to the shard iteration method, which now returns an error. Change-Id: I70030ace36593ce7d2b8376bee39fe82e9dbf88f Signed-off-by: Dmitrii Stepanov --- internal/logs/logs.go | 1 + pkg/local_object_storage/engine/container.go | 27 ++++--- pkg/local_object_storage/engine/delete.go | 12 +-- pkg/local_object_storage/engine/exists.go | 6 +- pkg/local_object_storage/engine/get.go | 16 ++-- pkg/local_object_storage/engine/head.go | 6 +- pkg/local_object_storage/engine/inhume.go | 77 ++++++++++++-------- pkg/local_object_storage/engine/lock.go | 16 +++- pkg/local_object_storage/engine/put.go | 6 +- pkg/local_object_storage/engine/range.go | 16 ++-- pkg/local_object_storage/engine/select.go | 30 +++++--- pkg/local_object_storage/engine/shards.go | 22 +++++- 12 files changed, 149 insertions(+), 86 deletions(-) diff --git a/internal/logs/logs.go b/internal/logs/logs.go index dedc7e12c..626372f43 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -198,6 +198,7 @@ const ( EngineInterruptProcessingTheExpiredLocks = "interrupt processing the expired locks" EngineInterruptGettingLockers = "can't get object's lockers" EngineInterruptProcessingTheDeletedLocks = "interrupt processing the deleted locks" + EngineInterruptProcessingTheExpiredTombstones = "interrupt processing the expired tombstones" EngineFailedToMoveShardInDegradedreadonlyModeMovingToReadonly = "failed to move shard in degraded-read-only mode, moving to read-only" EngineFailedToMoveShardInReadonlyMode = "failed to move shard in read-only mode" EngineShardIsMovedInReadonlyModeDueToErrorThreshold = "shard is moved in read-only mode due to error threshold" diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index 03a299b93..e0617a832 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -48,8 +48,9 @@ func (e *StorageEngine) ContainerSize(ctx context.Context, prm ContainerSizePrm) defer elapsed("ContainerSize", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res = e.containerSize(ctx, prm) - return nil + var csErr error + res, csErr = e.containerSize(ctx, prm) + return csErr }) return @@ -69,8 +70,9 @@ func ContainerSize(ctx context.Context, e *StorageEngine, id cid.ID) (uint64, er return res.Size(), nil } -func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (res ContainerSizeRes) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { +func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) (ContainerSizeRes, error) { + var res ContainerSizeRes + err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { var csPrm shard.ContainerSizePrm csPrm.SetContainerID(prm.cnr) @@ -86,7 +88,7 @@ func (e *StorageEngine) containerSize(ctx context.Context, prm ContainerSizePrm) return false }) - return + return res, err } // ListContainers returns a unique container IDs presented in the engine objects. @@ -96,8 +98,9 @@ func (e *StorageEngine) ListContainers(ctx context.Context, _ ListContainersPrm) defer elapsed("ListContainers", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res = e.listContainers(ctx) - return nil + var lcErr error + res, lcErr = e.listContainers(ctx) + return lcErr }) return @@ -115,10 +118,10 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) { return res.Containers(), nil } -func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes { +func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) { uniqueIDs := make(map[string]cid.ID) - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { res, err := sh.ListContainers(ctx, shard.ListContainersPrm{}) if err != nil { e.reportShardError(ctx, sh, "can't get list of containers", err) @@ -133,7 +136,9 @@ func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes { } return false - }) + }); err != nil { + return ListContainersRes{}, err + } result := make([]cid.ID, 0, len(uniqueIDs)) for _, v := range uniqueIDs { @@ -142,5 +147,5 @@ func (e *StorageEngine) listContainers(ctx context.Context) ListContainersRes { return ListContainersRes{ containers: result, - } + }, nil } diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go index 5e5f65fa2..223cdbc48 100644 --- a/pkg/local_object_storage/engine/delete.go +++ b/pkg/local_object_storage/engine/delete.go @@ -71,7 +71,7 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error { // Removal of a big object is done in multiple stages: // 1. Remove the parent object. If it is locked or already removed, return immediately. // 2. Otherwise, search for all objects with a particular SplitID and delete them too. - e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { var existsPrm shard.ExistsPrm existsPrm.Address = prm.addr @@ -116,20 +116,22 @@ func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) error { // If a parent object is removed we should set GC mark on each shard. return splitInfo == nil - }) + }); err != nil { + return err + } if locked.is { return new(apistatus.ObjectLocked) } if splitInfo != nil { - e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) + return e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID()) } return nil } -func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) { +func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) error { var fs objectSDK.SearchFilters fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID) @@ -142,7 +144,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo inhumePrm.ForceRemoval() } - e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { + return e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Select(ctx, selectPrm) if err != nil { e.log.Warn(ctx, logs.EngineErrorDuringSearchingForObjectChildren, diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go index 9d2b1c1b7..7dac9eb97 100644 --- a/pkg/local_object_storage/engine/exists.go +++ b/pkg/local_object_storage/engine/exists.go @@ -18,7 +18,7 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool exists := false locked := false - e.iterateOverSortedShards(shPrm.Address, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, shPrm.Address, func(_ int, sh hashedShard) (stop bool) { res, err := sh.Exists(ctx, shPrm) if err != nil { if client.IsErrObjectAlreadyRemoved(err) { @@ -50,7 +50,9 @@ func (e *StorageEngine) exists(ctx context.Context, shPrm shard.ExistsPrm) (bool } return false - }) + }); err != nil { + return false, false, err + } if alreadyRemoved { return false, false, new(apistatus.ObjectAlreadyRemoved) diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index 74c64bbb6..0694c53f3 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -78,7 +78,9 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { Engine: e, } - it.tryGetWithMeta(ctx) + if err := it.tryGetWithMeta(ctx); err != nil { + return GetRes{}, err + } if it.SplitInfo != nil { return GetRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) @@ -97,7 +99,9 @@ func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) { return GetRes{}, it.OutError } - it.tryGetFromBlobstore(ctx) + if err := it.tryGetFromBlobstore(ctx); err != nil { + return GetRes{}, err + } if it.Object == nil { return GetRes{}, it.OutError @@ -133,8 +137,8 @@ type getShardIterator struct { ecInfoErr *objectSDK.ECInfoError } -func (i *getShardIterator) tryGetWithMeta(ctx context.Context) { - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { +func (i *getShardIterator) tryGetWithMeta(ctx context.Context) error { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { noMeta := sh.GetMode().NoMetabase() i.ShardPrm.SetIgnoreMeta(noMeta) @@ -187,13 +191,13 @@ func (i *getShardIterator) tryGetWithMeta(ctx context.Context) { }) } -func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) { +func (i *getShardIterator) tryGetFromBlobstore(ctx context.Context) error { // If the object is not found but is present in metabase, // try to fetch it from blobstor directly. If it is found in any // blobstor, increase the error counter for the shard which contains the meta. i.ShardPrm.SetIgnoreMeta(true) - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { if sh.GetMode().NoMetabase() { // Already visited. return false diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go index d6892f129..d436dd411 100644 --- a/pkg/local_object_storage/engine/head.go +++ b/pkg/local_object_storage/engine/head.go @@ -82,7 +82,7 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) shPrm.SetAddress(prm.addr) shPrm.SetRaw(prm.raw) - e.iterateOverSortedShards(prm.addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, prm.addr, func(_ int, sh hashedShard) (stop bool) { shPrm.ShardLooksBad = sh.errorCount.Load() >= e.errorsThreshold res, err := sh.Head(ctx, shPrm) if err != nil { @@ -123,7 +123,9 @@ func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) } head = res.Object() return true - }) + }); err != nil { + return HeadRes{}, err + } if head != nil { return HeadRes{head: head}, nil diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index e5a2396e1..e13f04927 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -158,7 +158,7 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL objectExists bool ) - e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { objectExists = false prm.Address = addr @@ -209,7 +209,9 @@ func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkL // Continue if it's a root object. return !isRootObject - }) + }); err != nil { + return nil, err + } if retErr != nil { return nil, retErr @@ -229,7 +231,7 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e var err error var outErr error - e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { locked, err = h.IsLocked(ctx, addr) if err != nil { e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr)) @@ -238,7 +240,9 @@ func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, e } return locked - }) + }); err != nil { + return false, err + } if locked { return locked, nil @@ -258,7 +262,7 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I var allLocks []oid.ID var outErr error - e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(h hashedShard) (stop bool) { locks, err := h.GetLocks(ctx, addr) if err != nil { e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr)) @@ -266,7 +270,9 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I } allLocks = append(allLocks, locks...) return false - }) + }); err != nil { + return nil, err + } if len(allLocks) > 0 { return allLocks, nil } @@ -274,20 +280,23 @@ func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.I } func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { sh.HandleExpiredTombstones(ctx, addrs) select { case <-ctx.Done(): + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(ctx.Err())) return true default: return false } - }) + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredTombstones, zap.Error(err)) + } } func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { sh.HandleExpiredLocks(ctx, epoch, lockers) select { @@ -297,11 +306,13 @@ func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, l default: return false } - }) + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(err)) + } } func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) { - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { sh.HandleDeletedLocks(ctx, lockers) select { @@ -311,26 +322,25 @@ func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.A default: return false } - }) + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(err)) + } } func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid.ID) { if len(ids) == 0 { return } - idMap, err := e.selectNonExistentIDs(ctx, ids) if err != nil { return } - if len(idMap) == 0 { return } - var failed bool var prm shard.ContainerSizePrm - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) @@ -357,13 +367,15 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid } return len(idMap) == 0 - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) + return + } if failed || len(idMap) == 0 { return } - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err())) @@ -381,12 +393,13 @@ func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid } return false - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(err)) + return + } if failed { return } - for id := range idMap { e.metrics.DeleteContainerSize(id.EncodeToString()) } @@ -396,19 +409,16 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci if len(ids) == 0 { return } - idMap, err := e.selectNonExistentIDs(ctx, ids) if err != nil { return } - if len(idMap) == 0 { return } - var failed bool var prm shard.ContainerCountPrm - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) @@ -435,13 +445,15 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci } return len(idMap) == 0 - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) + return + } if failed || len(idMap) == 0 { return } - e.iterateOverUnsortedShards(func(sh hashedShard) bool { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) bool { select { case <-ctx.Done(): e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err())) @@ -459,12 +471,13 @@ func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []ci } return false - }) - + }); err != nil { + e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(err)) + return + } if failed { return } - for id := range idMap { e.metrics.DeleteContainerCount(id.EncodeToString()) } diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go index 18fff9cad..3b0cf74f9 100644 --- a/pkg/local_object_storage/engine/lock.go +++ b/pkg/local_object_storage/engine/lock.go @@ -41,11 +41,19 @@ func (e *StorageEngine) Lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, locked []oid.ID) error { for i := range locked { - switch e.lockSingle(ctx, idCnr, locker, locked[i], true) { + st, err := e.lockSingle(ctx, idCnr, locker, locked[i], true) + if err != nil { + return err + } + switch st { case 1: return logicerr.Wrap(new(apistatus.LockNonRegularObject)) case 0: - switch e.lockSingle(ctx, idCnr, locker, locked[i], false) { + st, err = e.lockSingle(ctx, idCnr, locker, locked[i], false) + if err != nil { + return err + } + switch st { case 1: return logicerr.Wrap(new(apistatus.LockNonRegularObject)) case 0: @@ -61,13 +69,13 @@ func (e *StorageEngine) lock(ctx context.Context, idCnr cid.ID, locker oid.ID, l // - 0: fail // - 1: locking irregular object // - 2: ok -func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8) { +func (e *StorageEngine) lockSingle(ctx context.Context, idCnr cid.ID, locker, locked oid.ID, checkExists bool) (status uint8, retErr error) { // code is pretty similar to inhumeAddr, maybe unify? root := false var addrLocked oid.Address addrLocked.SetContainer(idCnr) addrLocked.SetObject(locked) - e.iterateOverSortedShards(addrLocked, func(_ int, sh hashedShard) (stop bool) { + retErr = e.iterateOverSortedShards(ctx, addrLocked, func(_ int, sh hashedShard) (stop bool) { defer func() { // if object is root we continue since information about it // can be presented in other shards diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index b348d13a2..10cf5ffd5 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -96,7 +96,7 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { } var shRes putToShardRes - e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) { + if err := e.iterateOverSortedShards(ctx, addr, func(_ int, sh hashedShard) (stop bool) { e.mtx.RLock() _, ok := e.shards[sh.ID().String()] e.mtx.RUnlock() @@ -106,7 +106,9 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error { } shRes = e.putToShard(ctx, sh, addr, prm.Object, prm.IsIndexedContainer) return shRes.status != putToShardUnknown - }) + }); err != nil { + return err + } switch shRes.status { case putToShardUnknown: return errPutShard diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index a468cf594..7ec4742d8 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -93,7 +93,9 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error Engine: e, } - it.tryGetWithMeta(ctx) + if err := it.tryGetWithMeta(ctx); err != nil { + return RngRes{}, err + } if it.SplitInfo != nil { return RngRes{}, logicerr.Wrap(objectSDK.NewSplitInfoError(it.SplitInfo)) @@ -109,7 +111,9 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error return RngRes{}, it.OutError } - it.tryGetFromBlobstor(ctx) + if err := it.tryGetFromBlobstor(ctx); err != nil { + return RngRes{}, err + } if it.Object == nil { return RngRes{}, it.OutError @@ -157,8 +161,8 @@ type getRangeShardIterator struct { Engine *StorageEngine } -func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) { - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { +func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) error { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { noMeta := sh.GetMode().NoMetabase() i.HasDegraded = i.HasDegraded || noMeta i.ShardPrm.SetIgnoreMeta(noMeta) @@ -209,13 +213,13 @@ func (i *getRangeShardIterator) tryGetWithMeta(ctx context.Context) { }) } -func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) { +func (i *getRangeShardIterator) tryGetFromBlobstor(ctx context.Context) error { // If the object is not found but is present in metabase, // try to fetch it from blobstor directly. If it is found in any // blobstor, increase the error counter for the shard which contains the meta. i.ShardPrm.SetIgnoreMeta(true) - i.Engine.iterateOverSortedShards(i.Address, func(_ int, sh hashedShard) (stop bool) { + return i.Engine.iterateOverSortedShards(ctx, i.Address, func(_ int, sh hashedShard) (stop bool) { if sh.GetMode().NoMetabase() { // Already processed it without a metabase. return false diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index fc8b4a9a7..4243a5481 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -54,14 +54,15 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe defer elapsed("Select", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res = e._select(ctx, prm) - return nil + var sErr error + res, sErr = e._select(ctx, prm) + return sErr }) return } -func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) SelectRes { +func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) { addrList := make([]oid.Address, 0) uniqueMap := make(map[string]struct{}) @@ -69,7 +70,7 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) SelectRes { shPrm.SetContainerID(prm.cnr, prm.indexedContainer) shPrm.SetFilters(prm.filters) - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { res, err := sh.Select(ctx, shPrm) if err != nil { e.reportShardError(ctx, sh, "could not select objects from shard", err) @@ -84,11 +85,13 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) SelectRes { } return false - }) + }); err != nil { + return SelectRes{}, err + } return SelectRes{ addrList: addrList, - } + }, nil } // List returns `limit` available physically storage object addresses in engine. @@ -98,20 +101,21 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) SelectRes { func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes, err error) { defer elapsed("List", e.metrics.AddMethodDuration)() err = e.execIfNotBlocked(func() error { - res = e.list(ctx, limit) - return nil + var lErr error + res, lErr = e.list(ctx, limit) + return lErr }) return } -func (e *StorageEngine) list(ctx context.Context, limit uint64) SelectRes { +func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) { addrList := make([]oid.Address, 0, limit) uniqueMap := make(map[string]struct{}) ln := uint64(0) // consider iterating over shuffled shards - e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) { + if err := e.iterateOverUnsortedShards(ctx, func(sh hashedShard) (stop bool) { res, err := sh.List(ctx) // consider limit result of shard iterator if err != nil { e.reportShardError(ctx, sh, "could not select objects from shard", err) @@ -130,11 +134,13 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) SelectRes { } return false - }) + }); err != nil { + return SelectRes{}, err + } return SelectRes{ addrList: addrList, - } + }, nil } // Select selects objects from local storage using provided filters. diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 6e6c08bb5..69067c500 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -280,20 +280,32 @@ func (e *StorageEngine) unsortedShards() []hashedShard { return shards } -func (e *StorageEngine) iterateOverSortedShards(addr oid.Address, handler func(int, hashedShard) (stop bool)) { +func (e *StorageEngine) iterateOverSortedShards(ctx context.Context, addr oid.Address, handler func(int, hashedShard) (stop bool)) error { for i, sh := range e.sortShards(addr) { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } if handler(i, sh) { break } } + return nil } -func (e *StorageEngine) iterateOverUnsortedShards(handler func(hashedShard) (stop bool)) { +func (e *StorageEngine) iterateOverUnsortedShards(ctx context.Context, handler func(hashedShard) (stop bool)) error { for _, sh := range e.unsortedShards() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } if handler(sh) { break } } + return nil } // SetShardMode sets mode of the shard with provided identifier. @@ -433,7 +445,7 @@ func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address var siErr *objectSDK.SplitInfoError var ecErr *objectSDK.ECInfoError - e.iterateOverUnsortedShards(func(hs hashedShard) (stop bool) { + if itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { res, exErr := hs.Exists(ctx, prm) if exErr != nil { if client.IsErrObjectAlreadyRemoved(exErr) { @@ -463,6 +475,8 @@ func (e *StorageEngine) ListShardsForObject(ctx context.Context, obj oid.Address info = append(info, hs.DumpInfo()) } return false - }) + }); itErr != nil { + return nil, itErr + } return info, err } From 908b08108d7781bad2054d449c427389f2dfe9f3 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Mon, 21 Apr 2025 11:59:21 +0300 Subject: [PATCH 578/591] [#1689] engine/test: Fix `TestInhumeIfObjectDoesntExist` test Removed an invalid test case which used exclusive options, added object status check after removal. Change-Id: I4551c0e4532fb669ee6c72871dc4bd34707d8469 Signed-off-by: Aleksey Savchuk --- .../engine/inhume_test.go | 25 +++++++++++++------ 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 0601a43f2..fa73dcad5 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -11,6 +11,7 @@ import ( meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -251,21 +252,23 @@ func TestInhumeIfObjectDoesntExist(t *testing.T) { t.Run("inhume with tombstone", func(t *testing.T) { testInhumeLockedIfObjectDoesntExist(t, true, false) }) - t.Run("force inhume without tombstone", func(t *testing.T) { + t.Run("force inhume", func(t *testing.T) { testInhumeLockedIfObjectDoesntExist(t, false, true) }) - t.Run("force inhume with tombstone", func(t *testing.T) { - testInhumeLockedIfObjectDoesntExist(t, true, true) - }) }) } func testInhumeLockedIfObjectDoesntExist(t *testing.T, withTombstone, withForce bool) { t.Parallel() + // Due to the tests design it is possible to set both the options, + // however removal with tombstone and force removal are exclusive. + require.False(t, withTombstone && withForce) + var ( errLocked *apistatus.ObjectLocked inhumePrm InhumePrm + headPrm HeadPrm ctx = context.Background() container = cidtest.ID() object = oidtest.Address() @@ -291,9 +294,17 @@ func testInhumeLockedIfObjectDoesntExist(t *testing.T, withTombstone, withForce } err = engine.Inhume(ctx, inhumePrm) - if withForce { - require.NoError(t, err) - } else { + if !withForce { require.ErrorAs(t, err, &errLocked) + return + } + require.NoError(t, err) + + headPrm.WithAddress(object) + _, err = engine.Head(ctx, headPrm) + if withTombstone { + require.True(t, client.IsErrObjectAlreadyRemoved(err)) + } else { + require.True(t, client.IsErrObjectNotFound(err)) } } From 487cb34c5dcf21bf385d8ddf8a397652260101a8 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Mon, 21 Apr 2025 13:26:32 +0300 Subject: [PATCH 579/591] [#1689] engine/test: Refactor `TestInhumeIfObjectDoesntExist` test - Use the same storage engine in multiple parallel tests - Move `Lock`, `Inhume`, `Head` calls to separate functions Change-Id: I00849c1f068f0ab8d92061719d67d6fe786200db Signed-off-by: Aleksey Savchuk --- .../engine/inhume_test.go | 84 ++++++++++--------- 1 file changed, 46 insertions(+), 38 deletions(-) diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index fa73dcad5..813c329f2 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -245,66 +245,74 @@ func benchmarkInhumeMultipart(b *testing.B, numShards, numObjects int) { } func TestInhumeIfObjectDoesntExist(t *testing.T) { + const numShards = 4 + + engine := testNewEngine(t).setShardsNum(t, numShards).prepare(t).engine + t.Cleanup(func() { require.NoError(t, engine.Close(context.Background())) }) + t.Run("object is locked", func(t *testing.T) { t.Run("inhume without tombstone", func(t *testing.T) { - testInhumeLockedIfObjectDoesntExist(t, false, false) + testInhumeLockedIfObjectDoesntExist(t, engine, false, false) }) t.Run("inhume with tombstone", func(t *testing.T) { - testInhumeLockedIfObjectDoesntExist(t, true, false) + testInhumeLockedIfObjectDoesntExist(t, engine, true, false) }) t.Run("force inhume", func(t *testing.T) { - testInhumeLockedIfObjectDoesntExist(t, false, true) + testInhumeLockedIfObjectDoesntExist(t, engine, false, true) }) }) } -func testInhumeLockedIfObjectDoesntExist(t *testing.T, withTombstone, withForce bool) { +func testInhumeLockedIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { t.Parallel() - // Due to the tests design it is possible to set both the options, - // however removal with tombstone and force removal are exclusive. - require.False(t, withTombstone && withForce) + object := oidtest.Address() + require.NoError(t, testLockObject(e, object)) - var ( - errLocked *apistatus.ObjectLocked - inhumePrm InhumePrm - headPrm HeadPrm - ctx = context.Background() - container = cidtest.ID() - object = oidtest.Address() - lock = oidtest.ID() - tombstone = oidtest.Address() - ) - object.SetContainer(container) - tombstone.SetContainer(container) - - engine := testNewEngine(t).setShardsNum(t, 4).prepare(t).engine - defer func() { require.NoError(t, engine.Close(ctx)) }() - - err := engine.Lock(ctx, container, lock, []oid.ID{object.Object()}) - require.NoError(t, err) - - if withTombstone { - inhumePrm.WithTarget(tombstone, object) - } else { - inhumePrm.MarkAsGarbage(object) - } - if withForce { - inhumePrm.WithForceRemoval() - } - - err = engine.Inhume(ctx, inhumePrm) + err := testInhumeObject(t, e, object, withTombstone, withForce) if !withForce { + var errLocked *apistatus.ObjectLocked require.ErrorAs(t, err, &errLocked) return } require.NoError(t, err) - headPrm.WithAddress(object) - _, err = engine.Head(ctx, headPrm) + err = testHeadObject(e, object) if withTombstone { require.True(t, client.IsErrObjectAlreadyRemoved(err)) } else { require.True(t, client.IsErrObjectNotFound(err)) } } + +func testLockObject(e *StorageEngine, obj oid.Address) error { + return e.Lock(context.Background(), obj.Container(), oidtest.ID(), []oid.ID{obj.Object()}) +} + +func testInhumeObject(t testing.TB, e *StorageEngine, obj oid.Address, withTombstone, withForce bool) error { + tombstone := oidtest.Address() + tombstone.SetContainer(obj.Container()) + + // Due to the tests design it is possible to set both the options, + // however removal with tombstone and force removal are exclusive. + require.False(t, withTombstone && withForce) + + var inhumePrm InhumePrm + if withTombstone { + inhumePrm.WithTarget(tombstone, obj) + } else { + inhumePrm.MarkAsGarbage(obj) + } + if withForce { + inhumePrm.WithForceRemoval() + } + return e.Inhume(context.Background(), inhumePrm) +} + +func testHeadObject(e *StorageEngine, obj oid.Address) error { + var headPrm HeadPrm + headPrm.WithAddress(obj) + + _, err := e.Head(context.Background(), headPrm) + return err +} From 77b8545601f13ba4e596542be58b7e6f9c8e36d1 Mon Sep 17 00:00:00 2001 From: Aleksey Savchuk Date: Mon, 21 Apr 2025 13:31:54 +0300 Subject: [PATCH 580/591] [#1689] engine: Fix removal of objects not found on node Ensured correct object status if the object is not found on a node. Fixed regression introduced in #1450. Besides an object not being found on any shard, it also important to remove it anyway in order to populate the metabase indexes because they are responsible for the correct object status, i.e., the status will be `object not found` without the indexes, the status will be `object is already removed` with the indexes. Change-Id: I6237fbc0f8bb0c4f2a51ada3a68f52950050e660 Signed-off-by: Aleksey Savchuk --- pkg/local_object_storage/engine/inhume.go | 100 +++++++++++++++--- .../engine/inhume_test.go | 24 +++++ 2 files changed, 107 insertions(+), 17 deletions(-) diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index e13f04927..e5f7072e2 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -74,7 +74,7 @@ func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) error { } func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { - addrsPerShard, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) + addrsPerShard, notFoundObjects, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval) if err != nil { return err } @@ -84,8 +84,6 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { shPrm.ForceRemoval() } - var errLocked *apistatus.ObjectLocked - for shardID, addrs := range addrsPerShard { if prm.tombstone != nil { shPrm.SetTarget(*prm.tombstone, addrs...) @@ -103,39 +101,107 @@ func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) error { } if _, err := sh.Inhume(ctx, shPrm); err != nil { - switch { - case errors.As(err, &errLocked): - case errors.Is(err, shard.ErrLockObjectRemoval): - case errors.Is(err, shard.ErrReadOnlyMode): - case errors.Is(err, shard.ErrDegradedMode): - default: - e.reportShardError(ctx, sh, "couldn't inhume object in shard", err) - } + e.reportInhumeError(ctx, err, sh) return err } } - return nil + return e.inhumeNotFoundObjects(ctx, notFoundObjects, prm) +} + +func (e *StorageEngine) reportInhumeError(ctx context.Context, err error, hs hashedShard) { + if err == nil { + return + } + + var errLocked *apistatus.ObjectLocked + switch { + case errors.As(err, &errLocked): + case errors.Is(err, shard.ErrLockObjectRemoval): + case errors.Is(err, shard.ErrReadOnlyMode): + case errors.Is(err, shard.ErrDegradedMode): + default: + e.reportShardError(ctx, hs, "couldn't inhume object in shard", err) + } +} + +// inhumeNotFoundObjects removes object which are not found on any shard. +// +// Besides an object not being found on any shard, it is also important to +// remove it anyway in order to populate the metabase indexes because they are +// responsible for the correct object status, i.e., the status will be `object +// not found` without the indexes, the status will be `object is already +// removed` with the indexes. +// +// It is suggested to evenly remove those objects on each shard with the batch +// size equal to 1 + floor(number of objects / number of shards). +func (e *StorageEngine) inhumeNotFoundObjects(ctx context.Context, addrs []oid.Address, prm InhumePrm) error { + if len(addrs) == 0 { + return nil + } + + var shPrm shard.InhumePrm + if prm.forceRemoval { + shPrm.ForceRemoval() + } + + numObjectsPerShard := 1 + len(addrs)/len(e.shards) + + var inhumeErr error + itErr := e.iterateOverUnsortedShards(ctx, func(hs hashedShard) (stop bool) { + numObjects := min(numObjectsPerShard, len(addrs)) + + if numObjects == 0 { + return true + } + + if prm.tombstone != nil { + shPrm.SetTarget(*prm.tombstone, addrs[:numObjects]...) + } else { + shPrm.MarkAsGarbage(addrs[:numObjects]...) + } + addrs = addrs[numObjects:] + + _, inhumeErr = hs.Inhume(ctx, shPrm) + e.reportInhumeError(ctx, inhumeErr, hs) + return inhumeErr != nil + }) + if inhumeErr != nil { + return inhumeErr + } + return itErr } // groupObjectsByShard groups objects based on the shard(s) they are stored on. // // If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of // the objects are locked. -func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (map[string][]oid.Address, error) { - groups := make(map[string][]oid.Address) +// +// Returns two sets of objects: found objects which are grouped per shard and +// not found object. Not found objects are objects which are not found on any +// shard. This can happen if a node is a container node but doesn't participate +// in a replica group of the object. +func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (groups map[string][]oid.Address, notFoundObjects []oid.Address, err error) { + groups = make(map[string][]oid.Address) + var ids []string for _, addr := range addrs { - ids, err := e.findShards(ctx, addr, checkLocked) + ids, err = e.findShards(ctx, addr, checkLocked) if err != nil { - return nil, err + return } + + if len(ids) == 0 { + notFoundObjects = append(notFoundObjects, addr) + continue + } + for _, id := range ids { groups[id] = append(groups[id], addr) } } - return groups, nil + return } // findShards determines the shard(s) where the object is stored. diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 813c329f2..0e268cd23 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -250,6 +250,16 @@ func TestInhumeIfObjectDoesntExist(t *testing.T) { engine := testNewEngine(t).setShardsNum(t, numShards).prepare(t).engine t.Cleanup(func() { require.NoError(t, engine.Close(context.Background())) }) + t.Run("inhume without tombstone", func(t *testing.T) { + testInhumeIfObjectDoesntExist(t, engine, false, false) + }) + t.Run("inhume with tombstone", func(t *testing.T) { + testInhumeIfObjectDoesntExist(t, engine, true, false) + }) + t.Run("force inhume", func(t *testing.T) { + testInhumeIfObjectDoesntExist(t, engine, false, true) + }) + t.Run("object is locked", func(t *testing.T) { t.Run("inhume without tombstone", func(t *testing.T) { testInhumeLockedIfObjectDoesntExist(t, engine, false, false) @@ -263,6 +273,20 @@ func TestInhumeIfObjectDoesntExist(t *testing.T) { }) } +func testInhumeIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { + t.Parallel() + + object := oidtest.Address() + require.NoError(t, testInhumeObject(t, e, object, withTombstone, withForce)) + + err := testHeadObject(e, object) + if withTombstone { + require.True(t, client.IsErrObjectAlreadyRemoved(err)) + } else { + require.True(t, client.IsErrObjectNotFound(err)) + } +} + func testInhumeLockedIfObjectDoesntExist(t *testing.T, e *StorageEngine, withTombstone, withForce bool) { t.Parallel() From 6bdbe6a18b2fc666af67837a3b5ce6434ddbea96 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Tue, 22 Apr 2025 17:37:29 +0300 Subject: [PATCH 581/591] [#1689] adm: Fix `printSubjectInfo` method * Fix print format for primary and additional subject keys from frostfsid contract. Since the format corresponds to `neo-go wallet dump-keys` output format. Change-Id: I9ae9fd43bfb378970786b97bd3d9d7f739466ae6 Signed-off-by: Airat Arifullin --- .../internal/modules/morph/frostfsid/frostfsid.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go index 8ae606f1a..7f777db98 100644 --- a/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go +++ b/cmd/frostfs-adm/internal/modules/morph/frostfsid/frostfsid.go @@ -1,6 +1,7 @@ package frostfsid import ( + "encoding/hex" "errors" "fmt" "math/big" @@ -604,7 +605,7 @@ func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclie cmd.Printf("Address: %s\n", address.Uint160ToString(addr)) pk := "" if subj.PrimaryKey != nil { - pk = subj.PrimaryKey.String() + pk = hex.EncodeToString(subj.PrimaryKey.Bytes()) } cmd.Printf("Primary key: %s\n", pk) cmd.Printf("Name: %s\n", subj.Name) @@ -614,7 +615,7 @@ func printSubjectInfo(cmd *cobra.Command, addr util.Uint160, subj *frostfsidclie for _, key := range subj.AdditionalKeys { k := "" if key != nil { - k = key.String() + k = hex.EncodeToString(key.Bytes()) } cmd.Printf("- %s\n", k) } From b0f39dca16c9564c6a68da874ceab705288dde9a Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Tue, 22 Apr 2025 18:14:00 +0300 Subject: [PATCH 582/591] [#1721] object: Make `CheckAPE` always validate bearer token * The bearer token must always be validated, regardless of whether it has been impersonated; * Fix unit-tests for tree service which check verification with bearer token. Close #1721 Change-Id: I5f715c498ae10b2e758244e60b8f21849328a04f Signed-off-by: Airat Arifullin --- pkg/services/common/ape/checker.go | 12 ++++++++---- pkg/services/tree/signature_test.go | 28 +++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go index c9b0b7363..a2e628144 100644 --- a/pkg/services/common/ape/checker.go +++ b/pkg/services/common/ape/checker.go @@ -73,14 +73,18 @@ func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStora // CheckAPE performs the common policy-engine check logic on a prepared request. func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error { var cr policyengine.ChainRouter - if prm.BearerToken != nil && !prm.BearerToken.Impersonate() { + if prm.BearerToken != nil { var err error if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil { return fmt.Errorf("bearer validation error: %w", err) } - cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, prm.BearerToken.APEOverride()) - if err != nil { - return fmt.Errorf("create chain router error: %w", err) + if prm.BearerToken.Impersonate() { + cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) + } else { + cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, prm.BearerToken.APEOverride()) + if err != nil { + return fmt.Errorf("create chain router error: %w", err) + } } } else { cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index f5659d5e2..dd37b4191 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -238,14 +238,40 @@ func TestMessageSign(t *testing.T) { t.Run("impersonate", func(t *testing.T) { cnr.Value.SetBasicACL(acl.PublicRWExtended) var bt bearer.Token + bt.SetExp(10) bt.SetImpersonate(true) + bt.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid1.EncodeToString(), + }, + Chains: []ape.Chain{}, + }) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + }) + + t.Run("impersonate but invalid signer", func(t *testing.T) { + var bt bearer.Token + bt.SetExp(10) + bt.SetImpersonate(true) + bt.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid1.EncodeToString(), + }, + Chains: []ape.Chain{}, + }) require.NoError(t, bt.Sign(privs[1].PrivateKey)) req.Body.BearerToken = bt.Marshal() require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) - require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) bt := testBearerToken(cid1, privs[1].PublicKey(), privs[2].PublicKey()) From 5b6cba04cb5c3fb0589caa6e26bdc67c8ef183e2 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Fri, 18 Apr 2025 18:17:00 +0300 Subject: [PATCH 583/591] [#1689] node: Drop `node.relay` config It is not used and not tested properly, so drop it. Change-Id: I7c90c7391ecb4be17459415d209811ba1a693f7a Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/attributes.go | 4 --- cmd/frostfs-node/config.go | 27 +++++---------------- cmd/frostfs-node/config/node/config.go | 8 ------ cmd/frostfs-node/config/node/config_test.go | 5 ---- cmd/frostfs-node/netmap.go | 20 +++++---------- config/example/node.env | 1 - config/example/node.json | 1 - config/example/node.yaml | 2 -- docs/storage-node-configuration.md | 2 -- 9 files changed, 12 insertions(+), 58 deletions(-) diff --git a/cmd/frostfs-node/attributes.go b/cmd/frostfs-node/attributes.go index 64c3beba7..ce8ae9662 100644 --- a/cmd/frostfs-node/attributes.go +++ b/cmd/frostfs-node/attributes.go @@ -6,9 +6,5 @@ import ( ) func parseAttributes(c *cfg) { - if nodeconfig.Relay(c.appCfg) { - return - } - fatalOnErr(attributes.ReadNodeAttributes(&c.cfgNodeInfo.localInfo, nodeconfig.Attributes(c.appCfg))) } diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index b688acfde..96274e625 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -651,7 +651,6 @@ type cfgNetmap struct { state *networkState - needBootstrap bool reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime } @@ -710,11 +709,9 @@ func initCfg(appCfg *config.Config) *cfg { key := nodeconfig.Key(appCfg) - relayOnly := nodeconfig.Relay(appCfg) - netState := newNetworkState() - c.shared = initShared(appCfg, key, netState, relayOnly) + c.shared = initShared(appCfg, key, netState) netState.metrics = c.metricsCollector @@ -734,7 +731,7 @@ func initCfg(appCfg *config.Config) *cfg { c.cfgFrostfsID = initFrostfsID(appCfg) - c.cfgNetmap = initNetmap(appCfg, netState, relayOnly) + c.cfgNetmap = initNetmap(appCfg, netState) c.cfgGRPC = initCfgGRPC() @@ -780,12 +777,8 @@ func initSdNotify(appCfg *config.Config) bool { return false } -func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState, relayOnly bool) shared { - var netAddr network.AddressGroup - - if !relayOnly { - netAddr = nodeconfig.BootstrapAddresses(appCfg) - } +func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkState) shared { + netAddr := nodeconfig.BootstrapAddresses(appCfg) persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path()) fatalOnErr(err) @@ -836,18 +829,15 @@ func internalNetConfig(appCfg *config.Config, m metrics.MultinetMetrics) interna return result } -func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap { +func initNetmap(appCfg *config.Config, netState *networkState) cfgNetmap { netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize) fatalOnErr(err) - var reBootstrapTurnedOff atomic.Bool - reBootstrapTurnedOff.Store(relayOnly) return cfgNetmap{ scriptHash: contractsconfig.Netmap(appCfg), state: netState, workerPool: netmapWorkerPool, - needBootstrap: !relayOnly, - reBoostrapTurnedOff: &reBootstrapTurnedOff, + reBoostrapTurnedOff: &atomic.Bool{}, } } @@ -1256,11 +1246,6 @@ func (c *cfg) bootstrap(ctx context.Context) error { return bootstrapOnline(ctx, c) } -// needBootstrap checks if local node should be registered in network on bootup. -func (c *cfg) needBootstrap() bool { - return c.cfgNetmap.needBootstrap -} - type dCmp struct { name string reloadFunc func() error diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go index 81b191e96..c50718c5f 100644 --- a/cmd/frostfs-node/config/node/config.go +++ b/cmd/frostfs-node/config/node/config.go @@ -131,14 +131,6 @@ func Attributes(c *config.Config) (attrs []string) { return } -// Relay returns the value of "relay" config parameter -// from "node" section. -// -// Returns false if the value is not set. -func Relay(c *config.Config) bool { - return config.BoolSafe(c.Sub(subsection), "relay") -} - // PersistentSessions returns structure that provides access to "persistent_sessions" // subsection of "node" section. func PersistentSessions(c *config.Config) PersistentSessionsConfig { diff --git a/cmd/frostfs-node/config/node/config_test.go b/cmd/frostfs-node/config/node/config_test.go index 7b9adecf4..9af1dc038 100644 --- a/cmd/frostfs-node/config/node/config_test.go +++ b/cmd/frostfs-node/config/node/config_test.go @@ -29,12 +29,10 @@ func TestNodeSection(t *testing.T) { ) attribute := Attributes(empty) - relay := Relay(empty) persisessionsPath := PersistentSessions(empty).Path() persistatePath := PersistentState(empty).Path() require.Empty(t, attribute) - require.Equal(t, false, relay) require.Equal(t, "", persisessionsPath) require.Equal(t, PersistentStatePathDefault, persistatePath) }) @@ -45,7 +43,6 @@ func TestNodeSection(t *testing.T) { key := Key(c) addrs := BootstrapAddresses(c) attributes := Attributes(c) - relay := Relay(c) wKey := Wallet(c) persisessionsPath := PersistentSessions(c).Path() persistatePath := PersistentState(c).Path() @@ -87,8 +84,6 @@ func TestNodeSection(t *testing.T) { return false }) - require.Equal(t, true, relay) - require.Len(t, attributes, 2) require.Equal(t, "Price:11", attributes[0]) require.Equal(t, "UN-LOCODE:RU MSK", attributes[1]) diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go index 82b799e4c..7dfb4fe12 100644 --- a/cmd/frostfs-node/netmap.go +++ b/cmd/frostfs-node/netmap.go @@ -187,7 +187,7 @@ func addNewEpochNotificationHandlers(c *cfg) { c.updateContractNodeInfo(ctx, e) - if !c.needBootstrap() || c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 + if c.cfgNetmap.reBoostrapTurnedOff.Load() { // fixes #470 return } @@ -209,14 +209,12 @@ func addNewEpochNotificationHandlers(c *cfg) { // bootstrapNode adds current node to the Network map. // Must be called after initNetmapService. func bootstrapNode(ctx context.Context, c *cfg) { - if c.needBootstrap() { - if c.IsMaintenance() { - c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) - return - } - err := c.bootstrap(ctx) - fatalOnErrDetails("bootstrap error", err) + if c.IsMaintenance() { + c.log.Info(ctx, logs.FrostFSNodeNodeIsUnderMaintenanceSkipInitialBootstrap) + return } + err := c.bootstrap(ctx) + fatalOnErrDetails("bootstrap error", err) } func addNetmapNotificationHandler(c *cfg, sTyp string, h event.Handler) { @@ -352,8 +350,6 @@ func addNewEpochAsyncNotificationHandler(c *cfg, h event.Handler) { ) } -var errRelayBootstrap = errors.New("setting netmap status is forbidden in relay mode") - func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) error { switch st { default: @@ -365,10 +361,6 @@ func (c *cfg) SetNetmapStatus(ctx context.Context, st control.NetmapStatus) erro c.stopMaintenance(ctx) - if !c.needBootstrap() { - return errRelayBootstrap - } - if st == control.NetmapStatus_ONLINE { c.cfgNetmap.reBoostrapTurnedOff.Store(false) return bootstrapOnline(ctx, c) diff --git a/config/example/node.env b/config/example/node.env index e7d7a6cc8..9a2426358 100644 --- a/config/example/node.env +++ b/config/example/node.env @@ -22,7 +22,6 @@ FROSTFS_NODE_WALLET_PASSWORD=password FROSTFS_NODE_ADDRESSES="s01.frostfs.devenv:8080 /dns4/s02.frostfs.devenv/tcp/8081 grpc://127.0.0.1:8082 grpcs://localhost:8083" FROSTFS_NODE_ATTRIBUTE_0=Price:11 FROSTFS_NODE_ATTRIBUTE_1="UN-LOCODE:RU MSK" -FROSTFS_NODE_RELAY=true FROSTFS_NODE_PERSISTENT_SESSIONS_PATH=/sessions FROSTFS_NODE_PERSISTENT_STATE_PATH=/state FROSTFS_NODE_LOCODE_DB_PATH=/path/to/locode/db diff --git a/config/example/node.json b/config/example/node.json index 3f7854d98..6b7a9c2c6 100644 --- a/config/example/node.json +++ b/config/example/node.json @@ -37,7 +37,6 @@ ], "attribute_0": "Price:11", "attribute_1": "UN-LOCODE:RU MSK", - "relay": true, "persistent_sessions": { "path": "/sessions" }, diff --git a/config/example/node.yaml b/config/example/node.yaml index 32f0cba67..2d4bc90fb 100644 --- a/config/example/node.yaml +++ b/config/example/node.yaml @@ -34,7 +34,6 @@ node: - grpcs://localhost:8083 attribute_0: "Price:11" attribute_1: UN-LOCODE:RU MSK - relay: true # start Storage node in relay mode without bootstrapping into the Network map persistent_sessions: path: /sessions # path to persistent session tokens file of Storage node (default: in-memory sessions) persistent_state: @@ -141,7 +140,6 @@ rpc: max_ops: 10000 storage: - # note: shard configuration can be omitted for relay node (see `node.relay`) shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors) shard: diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md index 14ebb53b3..da9fdfed0 100644 --- a/docs/storage-node-configuration.md +++ b/docs/storage-node-configuration.md @@ -410,7 +410,6 @@ node: - "Price:11" - "UN-LOCODE:RU MSK" - "key:value" - relay: false persistent_sessions: path: /sessions persistent_state: @@ -424,7 +423,6 @@ node: | `wallet` | [Wallet config](#wallet-subsection) | | Wallet configuration. Has no effect if `key` is provided. | | `addresses` | `[]string` | | Addresses advertised in the netmap. | | `attribute` | `[]string` | | Node attributes as a list of key-value pairs in `:` format. | -| `relay` | `bool` | | Enable relay mode. | | `persistent_sessions` | [Persistent sessions config](#persistent_sessions-subsection) | | Persistent session token store configuration. | | `persistent_state` | [Persistent state config](#persistent_state-subsection) | | Persistent state configuration. | | `locode_db_path` | `string` | empty | Path to UN/LOCODE [database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/) for FrostFS. | From 652237500fb9e1618eb66310d3f2a51c599a033e Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 24 Apr 2025 16:54:47 +0300 Subject: [PATCH 584/591] [#1689] tree: Dial tree service after create connection on sync Background trees sync creates grpc connection with `grpc.WithDefaultCallOptions(grpc.WaitForReady(true))` option. When grpc connection created with this option, client will wait until a connection becomes available or the RPC's deadline is reached. As background sync has no timeout in context, so in case of client is in TRANSIENT_FAILURE RPC call will hang forever. Change-Id: I17c8c1d2779bb81c541f47dd0e558e0b8ed2e7c1 Signed-off-by: Dmitrii Stepanov --- pkg/services/tree/cache.go | 36 ++++++------------------------------ pkg/services/tree/sync.go | 28 +++++++++++++++++++++++++++- 2 files changed, 33 insertions(+), 31 deletions(-) diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go index 462c8554f..a11700771 100644 --- a/pkg/services/tree/cache.go +++ b/pkg/services/tree/cache.go @@ -66,7 +66,12 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl } } - cc, err := c.dialTreeService(ctx, netmapAddr) + var netAddr network.Address + if err := netAddr.FromString(netmapAddr); err != nil { + return nil, err + } + + cc, err := dialTreeService(ctx, netAddr, c.key, c.ds) lastTry := time.Now() c.Lock() @@ -83,32 +88,3 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl return NewTreeServiceClient(cc), nil } - -func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) { - var netAddr network.Address - if err := netAddr.FromString(netmapAddr); err != nil { - return nil, err - } - - cc, err := createConnection(netAddr, grpc.WithContextDialer(c.ds.GrpcContextDialer())) - if err != nil { - return nil, err - } - - ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) - defer cancel() - - req := &HealthcheckRequest{ - Body: &HealthcheckRequest_Body{}, - } - if err := SignMessage(req, c.key); err != nil { - return nil, err - } - - // perform some request to check connection - if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil { - _ = cc.Close() - return nil, err - } - return cc, nil -} diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index d4040337d..af355639f 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -2,6 +2,7 @@ package tree import ( "context" + "crypto/ecdsa" "crypto/sha256" "crypto/tls" "errors" @@ -14,6 +15,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net" "git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos" containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama" @@ -304,7 +306,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, continue } - cc, err := createConnection(a, grpc.WithContextDialer(s.ds.GrpcContextDialer())) + cc, err := dialTreeService(ctx, a, s.key, s.ds) if err != nil { s.log.Warn(ctx, logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr)) continue @@ -342,6 +344,30 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, return from } +func dialTreeService(ctx context.Context, netAddr network.Address, key *ecdsa.PrivateKey, ds *net.DialerSource) (*grpc.ClientConn, error) { + cc, err := createConnection(netAddr, grpc.WithContextDialer(ds.GrpcContextDialer())) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout) + defer cancel() + + req := &HealthcheckRequest{ + Body: &HealthcheckRequest_Body{}, + } + if err := SignMessage(req, key); err != nil { + return nil, err + } + + // perform some request to check connection + if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil { + _ = cc.Close() + return nil, err + } + return cc, nil +} + func createConnection(a network.Address, opts ...grpc.DialOption) (*grpc.ClientConn, error) { host, isTLS, err := client.ParseURI(a.URIAddr()) if err != nil { From 8e2f919df0fe4525bf25fcbe1fa11f6af642637e Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 28 Apr 2025 20:07:56 +0300 Subject: [PATCH 585/591] [#1689] go.mod: Bump SDK version * Fix `APEOverride` method usage in ape checker. * Fix linter errors: factor out deprecated methods and packages. Change-Id: I8c939f4c58c2a4e3c4e795c7224d935d40ce6f24 Signed-off-by: Airat Arifullin --- cmd/frostfs-cli/modules/container/get.go | 4 +- cmd/frostfs-cli/modules/container/list.go | 4 +- go.mod | 24 ++++----- go.sum | 52 +++++++++---------- pkg/core/container/util.go | 4 +- .../internal/testutil/generators.go | 7 +-- .../internal/testutil/object.go | 2 +- pkg/services/common/ape/checker.go | 5 +- .../populate-metabase/internal/generate.go | 5 +- 9 files changed, 55 insertions(+), 52 deletions(-) diff --git a/cmd/frostfs-cli/modules/container/get.go b/cmd/frostfs-cli/modules/container/get.go index 8c4ab14f8..fac6eb2cd 100644 --- a/cmd/frostfs-cli/modules/container/get.go +++ b/cmd/frostfs-cli/modules/container/get.go @@ -93,9 +93,9 @@ func prettyPrintContainer(cmd *cobra.Command, cnr container.Container, jsonEncod cmd.Println("created:", container.CreatedAt(cnr)) cmd.Println("attributes:") - cnr.IterateAttributes(func(key, val string) { + for key, val := range cnr.Attributes() { cmd.Printf("\t%s=%s\n", key, val) - }) + } cmd.Println("placement policy:") commonCmd.ExitOnErr(cmd, "write policy: %w", cnr.PlacementPolicy().WriteStringTo((*stringWriter)(cmd))) diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go index bbb8da840..e4a023d91 100644 --- a/cmd/frostfs-cli/modules/container/list.go +++ b/cmd/frostfs-cli/modules/container/list.go @@ -102,9 +102,9 @@ func printContainer(cmd *cobra.Command, prmGet internalclient.GetContainerPrm, i cmd.Println(id.String()) if flagVarListPrintAttr { - cnr.IterateUserAttributes(func(key, val string) { + for key, val := range cnr.Attributes() { cmd.Printf(" %s: %s\n", key, val) - }) + } } } diff --git a/go.mod b/go.mod index 5ed4a90be..fb45c3874 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module git.frostfs.info/TrueCloudLab/frostfs-node -go 1.23 +go 1.23.0 require ( code.gitea.io/sdk/gitea v0.17.1 @@ -9,7 +9,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.5.2 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248 git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d + git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88 @@ -28,7 +28,7 @@ require ( github.com/klauspost/compress v1.17.4 github.com/mailru/easyjson v0.7.7 github.com/mr-tron/base58 v1.2.0 - github.com/multiformats/go-multiaddr v0.14.0 + github.com/multiformats/go-multiaddr v0.15.0 github.com/nspcc-dev/neo-go v0.106.3 github.com/olekukonko/tablewriter v0.0.5 github.com/panjf2000/ants/v2 v2.9.0 @@ -44,10 +44,9 @@ require ( go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/zap v1.27.0 - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/sync v0.10.0 - golang.org/x/sys v0.28.0 - golang.org/x/term v0.27.0 + golang.org/x/sync v0.12.0 + golang.org/x/sys v0.31.0 + golang.org/x/term v0.30.0 google.golang.org/grpc v1.69.2 google.golang.org/protobuf v1.36.1 gopkg.in/yaml.v3 v3.0.1 @@ -86,9 +85,9 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ipfs/go-cid v0.4.1 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/klauspost/reedsolomon v1.12.1 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -124,13 +123,14 @@ require ( go.opentelemetry.io/otel/sdk v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.31.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect golang.org/x/net v0.30.0 // indirect - golang.org/x/text v0.21.0 // indirect + golang.org/x/text v0.23.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - lukechampine.com/blake3 v1.2.1 // indirect + lukechampine.com/blake3 v1.4.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index 38aba9bde..acc26af36 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20250321063246-93b681a20248/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47 h1:O2c3VOlaGZ862hf2ZPLBMdTG6vGJzhIgDvFEFGfntzU= git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250331080422-b5ed0b6eff47/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d h1:ZLKDupw362Ciing7kdIZhDYGMyo2QZyJ6sS/8X9QWJ0= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250404152210-6458c11e833d/go.mod h1:2PWt5GwJTnhjHp+mankcfCeAJBMn7puxPm+RS+lliVk= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa h1:ttJxiw5+Wti3outhaPFaLGwCinmUTQgyVQfD/sIU5sg= +git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa/go.mod h1:mimnb6yQUBLLQ8PboNc5ZP8iz4VMhFRKrfZcjfR9CVs= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= @@ -145,14 +145,14 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= -github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q= github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -192,8 +192,8 @@ github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aG github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= -github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= +github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo= +github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= @@ -324,15 +324,15 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -353,8 +353,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -381,16 +381,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -398,15 +398,15 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -445,7 +445,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= -lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= +lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go index a24b36944..61c568052 100644 --- a/pkg/core/container/util.go +++ b/pkg/core/container/util.go @@ -26,10 +26,10 @@ func WasRemoved(ctx context.Context, s Source, cid cid.ID) (bool, error) { // IsIndexedContainer returns True if container attributes should be indexed. func IsIndexedContainer(cnr containerSDK.Container) bool { var isS3Container bool - cnr.IterateAttributes(func(key, _ string) { + for key := range cnr.Attributes() { if key == ".s3-location-constraint" { isS3Container = true } - }) + } return !isS3Container } diff --git a/pkg/local_object_storage/internal/testutil/generators.go b/pkg/local_object_storage/internal/testutil/generators.go index 383c596af..52b199b0b 100644 --- a/pkg/local_object_storage/internal/testutil/generators.go +++ b/pkg/local_object_storage/internal/testutil/generators.go @@ -1,7 +1,9 @@ package testutil import ( + cryptorand "crypto/rand" "encoding/binary" + "math/rand" "sync/atomic" "testing" @@ -9,7 +11,6 @@ import ( objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "github.com/stretchr/testify/require" - "golang.org/x/exp/rand" ) // AddressGenerator is the interface of types that generate object addresses. @@ -61,7 +62,7 @@ var _ ObjectGenerator = &SeqObjGenerator{} func generateObjectWithOIDWithCIDWithSize(oid oid.ID, cid cid.ID, sz uint64) *objectSDK.Object { data := make([]byte, sz) - _, _ = rand.Read(data) + _, _ = cryptorand.Read(data) obj := GenerateObjectWithCIDWithPayload(cid, data) obj.SetID(oid) return obj @@ -82,7 +83,7 @@ var _ ObjectGenerator = &RandObjGenerator{} func (g *RandObjGenerator) Next() *objectSDK.Object { var id oid.ID - _, _ = rand.Read(id[:]) + _, _ = cryptorand.Read(id[:]) return generateObjectWithOIDWithCIDWithSize(id, cid.ID{}, g.ObjSize) } diff --git a/pkg/local_object_storage/internal/testutil/object.go b/pkg/local_object_storage/internal/testutil/object.go index 60e9211d5..1087e40be 100644 --- a/pkg/local_object_storage/internal/testutil/object.go +++ b/pkg/local_object_storage/internal/testutil/object.go @@ -1,6 +1,7 @@ package testutil import ( + "crypto/rand" "crypto/sha256" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" @@ -11,7 +12,6 @@ import ( usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" - "golang.org/x/exp/rand" ) const defaultDataSize = 32 diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go index a2e628144..02c1e2ee1 100644 --- a/pkg/services/common/ape/checker.go +++ b/pkg/services/common/ape/checker.go @@ -81,7 +81,8 @@ func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error { if prm.BearerToken.Impersonate() { cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) } else { - cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, prm.BearerToken.APEOverride()) + override, _ := prm.BearerToken.APEOverride() + cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override) if err != nil { return fmt.Errorf("create chain router error: %w", err) } @@ -130,7 +131,7 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe } // Check for ape overrides defined in the bearer token. - apeOverride := token.APEOverride() + apeOverride, _ := token.APEOverride() if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer { return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String()) } diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go index f2f8881cf..39a420358 100644 --- a/scripts/populate-metabase/internal/generate.go +++ b/scripts/populate-metabase/internal/generate.go @@ -1,8 +1,10 @@ package internal import ( + cryptorand "crypto/rand" "crypto/sha256" "fmt" + "math/rand" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -14,14 +16,13 @@ import ( usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version" "git.frostfs.info/TrueCloudLab/tzhash/tz" - "golang.org/x/exp/rand" ) func GeneratePayloadPool(count uint, size uint) [][]byte { var pool [][]byte for range count { payload := make([]byte, size) - _, _ = rand.Read(payload) + _, _ = cryptorand.Read(payload) pool = append(pool, payload) } From 64b46746e409a40a93a41c030a646d69fbf45282 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 28 Apr 2025 20:10:09 +0300 Subject: [PATCH 586/591] [#1689] ape: Fix validation for overrides in bearer * APE-overrides are optional for bearer. So, it should validate only set override; * Bearer can set overrides for containers, not only the one container - validation expects for any target type for set override. Basically, APE-overrides for all container must be set for namespace target; * Add unit-test cases to check bearer token validation. Change-Id: I6b8e19eb73d24f8cd8799bf99b6c551287da67d9 Signed-off-by: Airat Arifullin --- pkg/services/common/ape/checker.go | 32 ++++++++------- pkg/services/tree/signature_test.go | 61 +++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 15 deletions(-) diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go index 02c1e2ee1..fcd3efa44 100644 --- a/pkg/services/common/ape/checker.go +++ b/pkg/services/common/ape/checker.go @@ -20,7 +20,6 @@ import ( ) var ( - errInvalidTargetType = errors.New("bearer token defines non-container target override") errBearerExpired = errors.New("bearer token has expired") errBearerInvalidSignature = errors.New("bearer token has invalid signature") errBearerInvalidContainerID = errors.New("bearer token was created for another container") @@ -81,7 +80,10 @@ func (c *checkerCoreImpl) CheckAPE(ctx context.Context, prm CheckPrm) error { if prm.BearerToken.Impersonate() { cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage) } else { - override, _ := prm.BearerToken.APEOverride() + override, isSet := prm.BearerToken.APEOverride() + if !isSet { + return errors.New("expected for override within bearer") + } cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, override) if err != nil { return fmt.Errorf("create chain router error: %w", err) @@ -131,19 +133,19 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe } // Check for ape overrides defined in the bearer token. - apeOverride, _ := token.APEOverride() - if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer { - return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String()) - } - - // Then check if container is either empty or equal to the container in the request. - var targetCnr cid.ID - err := targetCnr.DecodeString(apeOverride.Target.Name) - if err != nil { - return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) - } - if !cntID.Equals(targetCnr) { - return errBearerInvalidContainerID + if apeOverride, isSet := token.APEOverride(); isSet { + switch apeOverride.Target.TargetType { + case ape.TargetTypeContainer: + var targetCnr cid.ID + err := targetCnr.DecodeString(apeOverride.Target.Name) + if err != nil { + return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name) + } + if !cntID.Equals(targetCnr) { + return errBearerInvalidContainerID + } + default: + } } // Then check if container owner signed this token. diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index dd37b4191..13a5c1395 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -235,6 +235,48 @@ func TestMessageSign(t *testing.T) { require.Error(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) }) + t.Run("omit override within bt", func(t *testing.T) { + t.Run("personated", func(t *testing.T) { + bt := testBearerTokenNoOverride() + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) + require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "expected for override") + }) + + t.Run("impersonated", func(t *testing.T) { + bt := testBearerTokenNoOverride() + bt.SetImpersonate(true) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + }) + }) + + t.Run("invalid override within bearer token", func(t *testing.T) { + t.Run("personated", func(t *testing.T) { + bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) + require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") + }) + + t.Run("impersonated", func(t *testing.T) { + bt := testBearerTokenCorruptOverride(privs[1].PublicKey(), privs[2].PublicKey()) + bt.SetImpersonate(true) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[0].PrivateKey)) + require.ErrorContains(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut), "invalid cid") + }) + }) + t.Run("impersonate", func(t *testing.T) { cnr.Value.SetBasicACL(acl.PublicRWExtended) var bt bearer.Token @@ -311,6 +353,25 @@ func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token return b } +func testBearerTokenCorruptOverride(forPutGet, forGet *keys.PublicKey) bearer.Token { + var b bearer.Token + b.SetExp(currentEpoch + 1) + b.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + }, + Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}}, + }) + + return b +} + +func testBearerTokenNoOverride() bearer.Token { + var b bearer.Token + b.SetExp(currentEpoch + 1) + return b +} + func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain { ruleGet := chain.Rule{ Status: chain.Allow, From e0a4835ea3eb1e2dfda921319c86f4de69b686fe Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Thu, 24 Apr 2025 18:48:24 +0300 Subject: [PATCH 587/591] [#1689] go.mod: Bump `policy-engine` package version * The bumped version of `policy-engine` package introduces new native schema properties for resource. Change-Id: If392c51415cff66c46798a788a2f0944f15900d3 Signed-off-by: Airat Arifullin --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fb45c3874..6f1950936 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250428134706-8822aedbbbaa git.frostfs.info/TrueCloudLab/hrw v1.2.1 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 - git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88 + git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 git.frostfs.info/TrueCloudLab/tzhash v1.8.0 git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 github.com/VictoriaMetrics/easyproto v0.1.4 diff --git a/go.sum b/go.sum index acc26af36..5b075f60a 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,8 @@ git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/96 git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07 h1:gPaqGsk6gSWQyNVjaStydfUz6Z/loHc9XyvGrJ5qSPY= git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20241015133823-8aee80dbdc07/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88 h1:V0a7ia84ZpSM2YxpJq1SKLQfeYmsqFWqcxwweBHJIzc= -git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250402100642-acd94d200f88/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991 h1:eTefR8y2y9cg7X5kybIcXDdmABfk/3A2awdmFD3zOsA= +git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20250425083815-09ff3bf14991/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc= git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA= From 6cedfbc17a871f6d96110b63071760488dbccb19 Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Mon, 5 May 2025 16:20:13 +0300 Subject: [PATCH 588/591] [#1689] getSvc: Stop node addresses iteratation on logical errors Before this fix: If first node address returns non-logical error (context deadline exceeded, context canceled, etc), then this error will override logical error from second node address. Change-Id: Ib30d0209828fdc83b55308ca2e33a361aa4caee6 Signed-off-by: Dmitrii Stepanov --- pkg/services/object/get/v2/util.go | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/pkg/services/object/get/v2/util.go b/pkg/services/object/get/v2/util.go index 4b7dcc530..e699a3779 100644 --- a/pkg/services/object/get/v2/util.go +++ b/pkg/services/object/get/v2/util.go @@ -3,6 +3,7 @@ package getsvc import ( "context" "crypto/sha256" + "errors" "hash" "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client" @@ -358,19 +359,20 @@ func groupAddressRequestForwarder(f func(context.Context, network.Address, clien info.AddressGroup().IterateAddresses(func(addr network.Address) (stop bool) { var err error - - defer func() { - stop = err == nil - - if stop || firstErr == nil { - firstErr = err - } - - // would be nice to log otherwise - }() - res, err = f(ctx, addr, c, key) + // non-status logic error that could be returned + // from the SDK client; should not be considered + // as a connection error + var siErr *objectSDK.SplitInfoError + var eiErr *objectSDK.ECInfoError + + stop = err == nil || errors.As(err, &siErr) || errors.As(err, &eiErr) + + if stop || firstErr == nil { + firstErr = err + } + return }) From a5f76a609deb8a414685015fb7a39d5c99ad085c Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Mon, 5 May 2025 16:33:07 +0300 Subject: [PATCH 589/591] [#1689] ape: Fix bearer token validation * Request's sender is set to the token's issuer's public key if it's impersonated. Thus, token's user assertion must be fixed; * Add unit-test: check impersonated token but set user with `ForUser`. Change-Id: I5e299947761e237b1b4b339cf2d1278ef518239d Signed-off-by: Airat Arifullin --- pkg/services/common/ape/checker.go | 12 ++++++++++-- pkg/services/tree/signature_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go index fcd3efa44..eb6263320 100644 --- a/pkg/services/common/ape/checker.go +++ b/pkg/services/common/ape/checker.go @@ -157,8 +157,16 @@ func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKe var usrSender user.ID user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey)) - if !token.AssertUser(usrSender) { - return errBearerInvalidOwner + // Then check if sender is valid. If it is an impersonated token, the sender is set to the token's issuer's + // public key, but not the actual sender. + if !token.Impersonate() { + if !token.AssertUser(usrSender) { + return errBearerInvalidOwner + } + } else { + if !bearer.ResolveIssuer(*token).Equals(usrSender) { + return errBearerInvalidOwner + } } return nil diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go index 13a5c1395..8815c227f 100644 --- a/pkg/services/tree/signature_test.go +++ b/pkg/services/tree/signature_test.go @@ -297,6 +297,30 @@ func TestMessageSign(t *testing.T) { require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) }) + t.Run("impersonate, but target user is still set", func(t *testing.T) { + var bt bearer.Token + bt.SetExp(10) + bt.SetImpersonate(true) + + var reqSigner user.ID + user.IDFromKey(&reqSigner, (ecdsa.PublicKey)(*privs[1].PublicKey())) + + bt.ForUser(reqSigner) + bt.SetAPEOverride(bearer.APEOverride{ + Target: ape.ChainTarget{ + TargetType: ape.TargetTypeContainer, + Name: cid1.EncodeToString(), + }, + Chains: []ape.Chain{}, + }) + require.NoError(t, bt.Sign(privs[0].PrivateKey)) + req.Body.BearerToken = bt.Marshal() + + require.NoError(t, SignMessage(req, &privs[1].PrivateKey)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectPut)) + require.NoError(t, s.verifyClient(context.Background(), req, cid1, versionTreeID, req.GetBody().GetBearerToken(), acl.OpObjectGet)) + }) + t.Run("impersonate but invalid signer", func(t *testing.T) { var bt bearer.Token bt.SetExp(10) From c2a495814f32b9ccfe3efedff76937ba03a61858 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Thu, 24 Apr 2025 18:57:23 +0300 Subject: [PATCH 590/591] [#1689] object: Make APE middleware form container system attributes * Extract container system attributes into request info; * Form APE-resource proprties from the extracted attributes; * Fix unit-test. Change-Id: I8fbd9a167ad05af0e75df350ac882b866da7bdcb Signed-off-by: Airat Arifullin --- pkg/services/object/ape/checker.go | 3 + pkg/services/object/ape/metadata.go | 7 + pkg/services/object/ape/request.go | 9 +- pkg/services/object/ape/request_test.go | 20 ++- pkg/services/object/ape/service.go | 177 +++++++++++++----------- 5 files changed, 128 insertions(+), 88 deletions(-) diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go index b96757def..bb6067a37 100644 --- a/pkg/services/object/ape/checker.go +++ b/pkg/services/object/ape/checker.go @@ -64,6 +64,9 @@ type Prm struct { // An encoded container's owner user ID. ContainerOwner user.ID + // Attributes defined for the container. + ContainerAttributes map[string]string + // The request's bearer token. It is used in order to check APE overrides with the token. BearerToken *bearer.Token diff --git a/pkg/services/object/ape/metadata.go b/pkg/services/object/ape/metadata.go index b37c3b6f8..102985aa6 100644 --- a/pkg/services/object/ape/metadata.go +++ b/pkg/services/object/ape/metadata.go @@ -63,6 +63,8 @@ type RequestInfo struct { ContainerOwner user.ID + ContainerAttributes map[string]string + // Namespace defines to which namespace a container is belonged. Namespace string @@ -131,6 +133,11 @@ func (e *extractor) GetRequestInfo(ctx context.Context, m Metadata, method strin ri.Role = nativeSchemaRole(res.Role) ri.ContainerOwner = cnr.Value.Owner() + ri.ContainerAttributes = map[string]string{} + for key, val := range cnr.Value.Attributes() { + ri.ContainerAttributes[key] = val + } + cnrNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr.Value).Zone(), ".ns") if hasNamespace { ri.Namespace = cnrNamespace diff --git a/pkg/services/object/ape/request.go b/pkg/services/object/ape/request.go index 001a5f71e..39dd7f476 100644 --- a/pkg/services/object/ape/request.go +++ b/pkg/services/object/ape/request.go @@ -57,11 +57,16 @@ func resourceName(cid cid.ID, oid *oid.ID, namespace string) string { } // objectProperties collects object properties from address parameters and a header if it is passed. -func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, header *objectV2.Header) map[string]string { +func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, cnrAttrs map[string]string, header *objectV2.Header) map[string]string { objectProps := map[string]string{ nativeschema.PropertyKeyObjectContainerID: cnr.EncodeToString(), } + for attrName, attrValue := range cnrAttrs { + prop := fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, attrName) + objectProps[prop] = attrValue + } + objectProps[nativeschema.PropertyKeyContainerOwnerID] = cnrOwner.EncodeToString() if oid != nil { @@ -155,7 +160,7 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Re prm.Method, aperequest.NewResource( resourceName(prm.Container, prm.Object, prm.Namespace), - objectProperties(prm.Container, prm.Object, prm.ContainerOwner, header), + objectProperties(prm.Container, prm.Object, prm.ContainerOwner, prm.ContainerAttributes, header), ), reqProps, ), nil diff --git a/pkg/services/object/ape/request_test.go b/pkg/services/object/ape/request_test.go index f270bf97d..fcf7c4c40 100644 --- a/pkg/services/object/ape/request_test.go +++ b/pkg/services/object/ape/request_test.go @@ -7,6 +7,7 @@ import ( "testing" aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request" + cnrV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" objectV2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/object" checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test" objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -22,8 +23,17 @@ const ( testOwnerID = "NURFM8PWbLA2aLt2vrD8q4FyfAdgESwM8y" incomingIP = "192.92.33.1" + + testSysAttrName = "unittest" + + testSysAttrZone = "eggplant" ) +var containerAttrs = map[string]string{ + cnrV2.SysAttributeName: testSysAttrName, + cnrV2.SysAttributeZone: testSysAttrZone, +} + func ctxWithPeerInfo() context.Context { return peer.NewContext(context.Background(), &peer.Peer{ Addr: &net.TCPAddr{ @@ -105,7 +115,7 @@ func TestObjectProperties(t *testing.T) { var testCnrOwner user.ID require.NoError(t, testCnrOwner.DecodeString(testOwnerID)) - props := objectProperties(cnr, obj, testCnrOwner, header.ToV2().GetHeader()) + props := objectProperties(cnr, obj, testCnrOwner, containerAttrs, header.ToV2().GetHeader()) require.Equal(t, test.container, props[nativeschema.PropertyKeyObjectContainerID]) require.Equal(t, testOwnerID, props[nativeschema.PropertyKeyContainerOwnerID]) @@ -124,6 +134,8 @@ func TestObjectProperties(t *testing.T) { require.Equal(t, test.header.typ.String(), props[nativeschema.PropertyKeyObjectType]) require.Equal(t, test.header.payloadChecksum.String(), props[nativeschema.PropertyKeyObjectPayloadHash]) require.Equal(t, test.header.payloadHomomorphicHash.String(), props[nativeschema.PropertyKeyObjectHomomorphicHash]) + require.Equal(t, containerAttrs[cnrV2.SysAttributeName], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeName)]) + require.Equal(t, containerAttrs[cnrV2.SysAttributeZone], props[fmt.Sprintf(nativeschema.PropertyKeyFormatObjectContainerAttribute, cnrV2.SysAttributeZone)]) for _, attr := range test.header.attributes { require.Equal(t, attr.val, props[attr.key]) @@ -245,6 +257,10 @@ func TestNewAPERequest(t *testing.T) { Role: role, SenderKey: senderKey, ContainerOwner: testCnrOwner, + ContainerAttributes: map[string]string{ + cnrV2.SysAttributeZone: testSysAttrZone, + cnrV2.SysAttributeName: testSysAttrName, + }, } headerSource := newHeaderProviderMock() @@ -277,7 +293,7 @@ func TestNewAPERequest(t *testing.T) { method, aperequest.NewResource( resourceName(cnr, obj, prm.Namespace), - objectProperties(cnr, obj, testCnrOwner, func() *objectV2.Header { + objectProperties(cnr, obj, testCnrOwner, containerAttrs, func() *objectV2.Header { if headerObjSDK != nil { return headerObjSDK.ToV2().GetHeader() } diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go index e199e2638..5e04843f3 100644 --- a/pkg/services/object/ape/service.go +++ b/pkg/services/object/ape/service.go @@ -86,16 +86,17 @@ func (g *getStreamBasicChecker) Send(resp *objectV2.GetResponse) error { } prm := Prm{ - Namespace: g.reqInfo.Namespace, - Container: cnrID, - Object: objID, - Header: partInit.GetHeader(), - Method: nativeschema.MethodGetObject, - SenderKey: g.reqInfo.SenderKey, - ContainerOwner: g.reqInfo.ContainerOwner, - Role: g.reqInfo.Role, - BearerToken: g.metadata.BearerToken, - XHeaders: resp.GetMetaHeader().GetXHeaders(), + Namespace: g.reqInfo.Namespace, + Container: cnrID, + Object: objID, + Header: partInit.GetHeader(), + Method: nativeschema.MethodGetObject, + SenderKey: g.reqInfo.SenderKey, + ContainerOwner: g.reqInfo.ContainerOwner, + ContainerAttributes: g.reqInfo.ContainerAttributes, + Role: g.reqInfo.Role, + BearerToken: g.metadata.BearerToken, + XHeaders: resp.GetMetaHeader().GetXHeaders(), } if err := g.apeChecker.CheckAPE(g.Context(), prm); err != nil { @@ -142,16 +143,17 @@ func (p *putStreamBasicChecker) Send(ctx context.Context, request *objectV2.PutR } prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Header: partInit.GetHeader(), - Method: nativeschema.MethodPutObject, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - Role: reqInfo.Role, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Header: partInit.GetHeader(), + Method: nativeschema.MethodPutObject, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + Role: reqInfo.Role, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -200,15 +202,16 @@ func (p *patchStreamBasicChecker) Send(ctx context.Context, request *objectV2.Pa } prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Method: nativeschema.MethodPatchObject, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - Role: reqInfo.Role, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Method: nativeschema.MethodPatchObject, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + Role: reqInfo.Role, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err := p.apeChecker.CheckAPE(ctx, prm); err != nil { @@ -268,16 +271,17 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Header: header, - Method: nativeschema.MethodHeadObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Header: header, + Method: nativeschema.MethodHeadObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -296,14 +300,15 @@ func (c *Service) Search(request *objectV2.SearchRequest, stream objectSvc.Searc } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Method: nativeschema.MethodSearchObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Method: nativeschema.MethodSearchObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -323,15 +328,16 @@ func (c *Service) Delete(ctx context.Context, request *objectV2.DeleteRequest) ( } err = c.apeChecker.CheckAPE(ctx, Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Method: nativeschema.MethodDeleteObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Method: nativeschema.MethodDeleteObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return nil, toStatusErr(err) @@ -356,15 +362,16 @@ func (c *Service) GetRange(request *objectV2.GetRangeRequest, stream objectSvc.G } err = c.apeChecker.CheckAPE(stream.Context(), Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Method: nativeschema.MethodRangeObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Method: nativeschema.MethodRangeObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), }) if err != nil { return toStatusErr(err) @@ -384,15 +391,16 @@ func (c *Service) GetRangeHash(ctx context.Context, request *objectV2.GetRangeHa } prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Method: nativeschema.MethodHashObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Method: nativeschema.MethodHashObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } resp, err := c.next.GetRangeHash(ctx, request) @@ -417,16 +425,17 @@ func (c *Service) PutSingle(ctx context.Context, request *objectV2.PutSingleRequ } prm := Prm{ - Namespace: reqInfo.Namespace, - Container: md.Container, - Object: md.Object, - Header: request.GetBody().GetObject().GetHeader(), - Method: nativeschema.MethodPutObject, - Role: reqInfo.Role, - SenderKey: reqInfo.SenderKey, - ContainerOwner: reqInfo.ContainerOwner, - BearerToken: md.BearerToken, - XHeaders: md.MetaHeader.GetXHeaders(), + Namespace: reqInfo.Namespace, + Container: md.Container, + Object: md.Object, + Header: request.GetBody().GetObject().GetHeader(), + Method: nativeschema.MethodPutObject, + Role: reqInfo.Role, + SenderKey: reqInfo.SenderKey, + ContainerOwner: reqInfo.ContainerOwner, + ContainerAttributes: reqInfo.ContainerAttributes, + BearerToken: md.BearerToken, + XHeaders: md.MetaHeader.GetXHeaders(), } if err = c.apeChecker.CheckAPE(ctx, prm); err != nil { From 86aec1ad6dfd2404a385349d1937fa6af4488780 Mon Sep 17 00:00:00 2001 From: Airat Arifullin Date: Thu, 24 Apr 2025 19:08:30 +0300 Subject: [PATCH 591/591] [#1689] container: Make APE middleware form container system attributes * Make `Put` handler extract container attributes from request body and form APE-resource properties; * Make `validateContainerBoundedOperation` used by `Get` and `Delete` handlers extracts attributes from read container. Change-Id: I005345575c3d25b505bae4108f60cd320a7489ba Signed-off-by: Airat Arifullin --- pkg/services/container/ape.go | 29 ++++- pkg/services/container/ape_test.go | 181 +++++++++++++++++++++++++++++ 2 files changed, 206 insertions(+), 4 deletions(-) diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go index 01bd825d7..3b5dab9aa 100644 --- a/pkg/services/container/ape.go +++ b/pkg/services/container/ape.go @@ -280,11 +280,16 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont return nil, err } + cnrProps, err := getContainerPropsFromV2(req.GetBody().GetContainer()) + if err != nil { + return nil, fmt.Errorf("get container properties: %w", err) + } + request := aperequest.NewRequest( nativeschema.MethodPutContainer, aperequest.NewResource( resourceName(namespace, ""), - make(map[string]string), + cnrProps, ), reqProps, ) @@ -395,7 +400,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, con op, aperequest.NewResource( resourceName(namespace, id.EncodeToString()), - ac.getContainerProps(cont), + getContainerProps(cont), ), reqProps, ) @@ -445,10 +450,26 @@ func resourceName(namespace string, container string) string { return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, namespace, container) } -func (ac *apeChecker) getContainerProps(c *containercore.Container) map[string]string { - return map[string]string{ +func getContainerProps(c *containercore.Container) map[string]string { + props := map[string]string{ nativeschema.PropertyKeyContainerOwnerID: c.Value.Owner().EncodeToString(), } + for attrName, attrVal := range c.Value.Attributes() { + name := fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, attrName) + props[name] = attrVal + } + return props +} + +func getContainerPropsFromV2(cnrV2 *container.Container) (map[string]string, error) { + if cnrV2 == nil { + return nil, errors.New("container is not set") + } + c := cnrSDK.Container{} + if err := c.ReadFromV2(*cnrV2); err != nil { + return nil, err + } + return getContainerProps(&containercore.Container{Value: c}), nil } func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go index 77a981d1a..6438c34ca 100644 --- a/pkg/services/container/ape_test.go +++ b/pkg/services/container/ape_test.go @@ -54,6 +54,8 @@ func TestAPE(t *testing.T) { t.Run("deny put container with invlaid namespace", testDenyPutContainerInvalidNamespace) t.Run("deny list containers for owner with PK", testDenyListContainersForPK) t.Run("deny list containers by namespace invalidation", testDenyListContainersValidationNamespaceError) + t.Run("deny get by container attribute rules", testDenyGetContainerSysZoneAttr) + t.Run("deny put by container attribute rules", testDenyPutContainerSysZoneAttr) } const ( @@ -564,6 +566,185 @@ func testDenyGetContainerByIP(t *testing.T) { require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) } +func testDenyGetContainerSysZoneAttr(t *testing.T) { + t.Parallel() + srv := &srvStub{ + calls: map[string]int{}, + } + router := inmemory.NewInMemory() + contRdr := &containerStub{ + c: map[cid.ID]*containercore.Container{}, + } + ir := &irStub{ + keys: [][]byte{}, + } + nm := &netmapStub{} + pk, err := keys.NewPrivateKey() + require.NoError(t, err) + + frostfsIDSubjectReader := &frostfsidStub{ + subjects: map[util.Uint160]*client.Subject{ + pk.PublicKey().GetScriptHash(): { + KV: map[string]string{ + "tag-attr1": "value1", + "tag-attr2": "value2", + }, + }, + }, + subjectsExt: map[util.Uint160]*client.SubjectExtended{ + pk.PublicKey().GetScriptHash(): { + KV: map[string]string{ + "tag-attr1": "value1", + "tag-attr2": "value2", + }, + Groups: []*client.Group{ + { + ID: 19888, + }, + }, + }, + }, + } + + apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) + + contID := cidtest.ID() + testContainer := containertest.Container() + pp := netmap.PlacementPolicy{} + require.NoError(t, pp.DecodeString("REP 1")) + testContainer.SetPlacementPolicy(pp) + testContainer.SetAttribute(container.SysAttributeZone, "eggplant") + contRdr.c[contID] = &containercore.Container{Value: testContainer} + + nm.currentEpoch = 100 + nm.netmaps = map[uint64]*netmap.NetMap{} + var testNetmap netmap.NetMap + testNetmap.SetEpoch(nm.currentEpoch) + testNetmap.SetNodes([]netmap.NodeInfo{{}}) + nm.netmaps[nm.currentEpoch] = &testNetmap + nm.netmaps[nm.currentEpoch-1] = &testNetmap + + _, _, err = router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.AccessDenied, + Actions: chain.Actions{ + Names: []string{ + nativeschema.MethodGetContainer, + }, + }, + Resources: chain.Resources{ + Names: []string{ + fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()), + }, + }, + Condition: []chain.Condition{ + { + Kind: chain.KindResource, + Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone), + Value: "eggplant", + Op: chain.CondStringEquals, + }, + }, + }, + }, + }) + require.NoError(t, err) + + req := &container.GetRequest{} + req.SetBody(&container.GetRequestBody{}) + var refContID refs.ContainerID + contID.WriteToV2(&refContID) + req.GetBody().SetContainerID(&refContID) + + require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req)) + + resp, err := apeSrv.Get(ctxWithPeerInfo(), req) + require.Nil(t, resp) + var errAccessDenied *apistatus.ObjectAccessDenied + require.ErrorAs(t, err, &errAccessDenied) + require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) +} + +func testDenyPutContainerSysZoneAttr(t *testing.T) { + t.Parallel() + srv := &srvStub{ + calls: map[string]int{}, + } + router := inmemory.NewInMemory() + contRdr := &containerStub{ + c: map[cid.ID]*containercore.Container{}, + } + ir := &irStub{ + keys: [][]byte{}, + } + nm := &netmapStub{} + + contID := cidtest.ID() + testContainer := containertest.Container() + pp := netmap.PlacementPolicy{} + require.NoError(t, pp.DecodeString("REP 1")) + testContainer.SetPlacementPolicy(pp) + testContainer.SetAttribute(container.SysAttributeZone, "eggplant") + contRdr.c[contID] = &containercore.Container{Value: testContainer} + owner := testContainer.Owner() + ownerAddr := owner.ScriptHash() + + frostfsIDSubjectReader := &frostfsidStub{ + subjects: map[util.Uint160]*client.Subject{ + ownerAddr: {}, + }, + subjectsExt: map[util.Uint160]*client.SubjectExtended{ + ownerAddr: {}, + }, + } + + apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv) + + nm.currentEpoch = 100 + nm.netmaps = map[uint64]*netmap.NetMap{} + var testNetmap netmap.NetMap + testNetmap.SetEpoch(nm.currentEpoch) + testNetmap.SetNodes([]netmap.NodeInfo{{}}) + nm.netmaps[nm.currentEpoch] = &testNetmap + nm.netmaps[nm.currentEpoch-1] = &testNetmap + + _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.NamespaceTarget(""), &chain.Chain{ + Rules: []chain.Rule{ + { + Status: chain.AccessDenied, + Actions: chain.Actions{ + Names: []string{ + nativeschema.MethodPutContainer, + }, + }, + Resources: chain.Resources{ + Names: []string{ + nativeschema.ResourceFormatRootContainers, + }, + }, + Condition: []chain.Condition{ + { + Kind: chain.KindResource, + Key: fmt.Sprintf(nativeschema.PropertyKeyFormatContainerAttribute, container.SysAttributeZone), + Value: "eggplant", + Op: chain.CondStringEquals, + }, + }, + }, + }, + }) + require.NoError(t, err) + + req := initPutRequest(t, testContainer) + + resp, err := apeSrv.Put(ctxWithPeerInfo(), req) + require.Nil(t, resp) + var errAccessDenied *apistatus.ObjectAccessDenied + require.ErrorAs(t, err, &errAccessDenied) + require.Contains(t, errAccessDenied.Reason(), chain.AccessDenied.String()) +} + func testDenyGetContainerByGroupID(t *testing.T) { t.Parallel() srv := &srvStub{