Strict APE check for EC & fix sign EC part put requests #1451

Merged
fyrchik merged 5 commits from dstepanov-yadro/frostfs-node:fix/ec_ape_strict into master 2024-11-06 08:18:11 +00:00
3 changed files with 36 additions and 11 deletions
Showing only changes of commit 0befda5f4f - Show all commits

View file

@ -37,10 +37,12 @@ type ECWriter struct {
ObjectMeta object.ContentMeta
ObjectMetaValid bool
remoteRequestSignKey *ecdsa.PrivateKey
}
func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
relayed, err := e.relayIfNotContainerNode(ctx, obj)
relayed, isContainerNode, err := e.relayIfNotContainerNode(ctx, obj)
if err != nil {
return err
}
@ -60,23 +62,35 @@ func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error
e.ObjectMetaValid = true
}
if isContainerNode {
restoreTokens := e.CommonPrm.ForgetTokens()

Restore tokens as in case of complex object use session token to store linking object (as it was before)

Restore tokens as in case of complex object use session token to store linking object (as it was before)
defer restoreTokens()
// As request executed on container node, so sign request with container key.
e.remoteRequestSignKey, err = e.Config.KeyStorage.GetKey(nil)
if err != nil {
return err
}

Let's consider a situation:
ec-container: s01, s03, s04. We're sending a request signed by wallet.json. We pass through APE-check for s01, resign the request by s01 key and send writeECPart to s03, s04.
Imagine that at this time interval (30sec before) a local override has been added to s03 or s04 to restrict putting object to this container - the local override is totally ignored due to this actor role change.
I don't consider a situation when an ape chain is present within policy contract because it'd reject the request before this point and that's OK

Let's consider a situation: `ec-container: s01, s03, s04`. We're sending a request signed by `wallet.json`. We pass through APE-check for `s01`, _resign_ the request by `s01` key and send `writeECPart` to `s03, s04`. Imagine that at this time interval (30sec before) a local override has been added to `s03` or `s04` to restrict putting object to this container - the local override is totally ignored due to this actor role change. I don't consider a situation when an ape chain is present within `policy` contract because it'd reject the request before this point and that's OK

I think it is ok as frostfs-storage is not strongly consistent system.
Also it depends on local override: if local override restricts put objects with owner, it will still work.

I think it is ok as frostfs-storage is not strongly consistent system. Also it depends on local override: if local override restricts put objects with owner, it will still work.

I think it is ok as frostfs-storage is not strongly consistent system.

It seems to be so

if local override restricts put objects with owner, it will still work

We won't get to the check at all: look at these lines

Anyway, currently I am not requesting to change this. We just need to keep this in mind

> I think it is ok as frostfs-storage is not strongly consistent system. It seems to be so > if local override restricts put objects with owner, it will still work We won't get to the check at all: [look at these lines](https://git.frostfs.info/TrueCloudLab/frostfs-node/src/branch/master/pkg/services/object/ape/checker.go#L85-L86) Anyway, currently I am not requesting to change this. We just need to keep this in mind

Oh, right.
But as we discussed with @fyrchik we should try to check user's request once (on first container node now) to reduce unnecessary operations count.

Oh, right. But as we discussed with @fyrchik we should try to check user's request once (on first container node now) to reduce unnecessary operations count.

OK. I'll keep this comment open to not forget about this point in the future if we refer back to this PR

OK. I'll keep this comment open to not forget about this point in the future if we refer back to this PR
} else {
e.remoteRequestSignKey = e.Key
}
if obj.ECHeader() != nil {
return e.writeECPart(ctx, obj)
}
return e.writeRawObject(ctx, obj)
}
func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, error) {
if e.Relay == nil {
return false, nil
}
func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, bool, error) {
currentNodeIsContainerNode, err := e.currentNodeIsContainerNode()
if err != nil {
return false, err
return false, false, err
}
if currentNodeIsContainerNode {
// object can be splitted or saved local
return false, nil
return false, true, nil
}
if e.Relay == nil {
return false, currentNodeIsContainerNode, nil
}
objID := object.AddressOf(obj).Object()
var index uint32
@ -85,9 +99,9 @@ func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.O
index = obj.ECHeader().Index()
}
if err := e.relayToContainerNode(ctx, objID, index); err != nil {
return false, err
return false, false, err
}
return true, nil
return true, currentNodeIsContainerNode, nil
}
func (e *ECWriter) currentNodeIsContainerNode() (bool, error) {
@ -338,7 +352,7 @@ func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, n
client.NodeInfoFromNetmapElement(&clientNodeInfo, node)
remoteTaget := remoteWriter{
privateKey: e.Key,
privateKey: e.remoteRequestSignKey,
clientConstructor: e.Config.ClientConstructor,
commonPrm: e.CommonPrm,
nodeInfo: clientNodeInfo,

View file

@ -14,6 +14,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
@ -127,6 +128,8 @@ func TestECWriter(t *testing.T) {
ownerKey, err := keys.NewPrivateKey()
require.NoError(t, err)
nodeKey, err := keys.NewPrivateKey()
require.NoError(t, err)
pool, err := ants.NewPool(4, ants.WithNonblocking(true))
require.NoError(t, err)
@ -141,6 +144,7 @@ func TestECWriter(t *testing.T) {
RemotePool: pool,
Logger: log,
ClientConstructor: clientConstructor{vectors: ns},
KeyStorage: util.NewKeyStorage(&nodeKey.PrivateKey, nil, nil),
},
PlacementOpts: append(
[]placement.Option{placement.UseBuilder(builder), placement.ForContainer(cnr)},

View file

@ -100,12 +100,19 @@ func (p *CommonPrm) SetNetmapLookupDepth(v uint64) {
// ForgetTokens forgets all the tokens read from the request's
// meta information before.
func (p *CommonPrm) ForgetTokens() {
func (p *CommonPrm) ForgetTokens() func() {
if p != nil {
tk := p.token
br := p.bearer
p.token = nil
p.bearer = nil
return func() {
p.token = tk
p.bearer = br
}
}
return func() {}
}
func CommonPrmFromV2(req interface {
GetMetaHeader() *session.RequestMetaHeader