frostfs-node/pkg/core/object/sender_classifier.go
Aleksey Savchuk f0c43c8d80
All checks were successful
Vulncheck / Vulncheck (pull_request) Successful in 3m1s
Pre-commit hooks / Pre-commit (pull_request) Successful in 3m29s
Tests and linters / gopls check (pull_request) Successful in 3m50s
Tests and linters / Lint (pull_request) Successful in 4m35s
DCO action / DCO (pull_request) Successful in 5m12s
Tests and linters / Run gofumpt (pull_request) Successful in 5m33s
Build / Build Components (pull_request) Successful in 5m45s
Tests and linters / Tests with -race (pull_request) Successful in 6m37s
Tests and linters / Tests (pull_request) Successful in 7m17s
Tests and linters / Staticcheck (pull_request) Successful in 7m36s
Tests and linters / Run gofumpt (push) Successful in 1m22s
Tests and linters / Staticcheck (push) Successful in 3m19s
Tests and linters / Lint (push) Successful in 4m35s
Vulncheck / Vulncheck (push) Successful in 5m20s
Build / Build Components (push) Successful in 6m16s
Pre-commit hooks / Pre-commit (push) Successful in 6m37s
Tests and linters / Tests (push) Successful in 6m48s
Tests and linters / Tests with -race (push) Successful in 7m15s
Tests and linters / gopls check (push) Successful in 7m27s
[#1502] Use zap.Error for logging errors
Use `zap.Error` instead of `zap.String` for logging errors: change all expressions like
`zap.String("error", err.Error())` or `zap.String("err", err.Error())` to `zap.Error(err)`.
Leave similar expressions with other messages unchanged, for example,
`zap.String("last_error", lastErr.Error())` or `zap.String("reason", ctx.Err().Error())`.

This change was made by applying the following patch:
```diff
@@
var err expression
@@
-zap.String("error", err.Error())
+zap.Error(err)

@@
var err expression
@@
-zap.String("err", err.Error())
+zap.Error(err)
```

Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
2024-12-16 11:13:42 +03:00

163 lines
4 KiB
Go

package object
import (
"bytes"
"context"
"crypto/sha256"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"go.uber.org/zap"
)
type InnerRing interface {
InnerRingKeys() ([][]byte, error)
}
type SenderClassifier struct {
log *logger.Logger
innerRing InnerRing
netmap core.Source
}
func NewSenderClassifier(innerRing InnerRing, netmap core.Source, log *logger.Logger) SenderClassifier {
return SenderClassifier{
log: log,
innerRing: innerRing,
netmap: netmap,
}
}
type ClassifyResult struct {
Role acl.Role
Key []byte
}
func (c SenderClassifier) Classify(
ctx context.Context,
ownerID *user.ID,
ownerKey *keys.PublicKey,
idCnr cid.ID,
cnr container.Container,
) (res *ClassifyResult, err error) {
ownerKeyInBytes := ownerKey.Bytes()
// TODO: #767 get owner from frostfs.id if present
// if request owner is the same as container owner, return RoleUser
if ownerID.Equals(cnr.Owner()) {
return &ClassifyResult{
Role: acl.RoleOwner,
Key: ownerKeyInBytes,
}, nil
}
return c.IsInnerRingOrContainerNode(ctx, ownerKeyInBytes, idCnr, cnr)
}
func (c SenderClassifier) IsInnerRingOrContainerNode(ctx context.Context, ownerKeyInBytes []byte, idCnr cid.ID, cnr container.Container) (*ClassifyResult, error) {
isInnerRingNode, err := c.isInnerRingKey(ownerKeyInBytes)
if err != nil {
// do not throw error, try best case matching
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromInnerRing,
zap.Error(err))
} else if isInnerRingNode {
return &ClassifyResult{
Role: acl.RoleInnerRing,
Key: ownerKeyInBytes,
}, nil
}
binCnr := make([]byte, sha256.Size)
idCnr.Encode(binCnr)
isContainerNode, err := c.isContainerKey(ownerKeyInBytes, binCnr, cnr)
if err != nil {
// error might happen if request has `RoleOther` key and placement
// is not possible for previous epoch, so
// do not throw error, try best case matching
c.log.Debug(ctx, logs.V2CantCheckIfRequestFromContainerNode,
zap.Error(err))
} else if isContainerNode {
return &ClassifyResult{
Role: acl.RoleContainer,
Key: ownerKeyInBytes,
}, nil
}
// if none of above, return RoleOthers
return &ClassifyResult{
Role: acl.RoleOthers,
Key: ownerKeyInBytes,
}, nil
}
func (c SenderClassifier) isInnerRingKey(owner []byte) (bool, error) {
innerRingKeys, err := c.innerRing.InnerRingKeys()
if err != nil {
return false, err
}
// if request owner key in the inner ring list, return RoleSystem
for i := range innerRingKeys {
if bytes.Equal(innerRingKeys[i], owner) {
return true, nil
}
}
return false, nil
}
func (c SenderClassifier) isContainerKey(
owner, idCnr []byte,
cnr container.Container,
) (bool, error) {
nm, err := core.GetLatestNetworkMap(c.netmap) // first check current netmap
if err != nil {
return false, err
}
in, err := LookupKeyInContainer(nm, owner, idCnr, cnr)
if err != nil {
return false, err
} else if in {
return true, nil
}
// then check previous netmap, this can happen in-between epoch change
// when node migrates data from last epoch container
nm, err = core.GetPreviousNetworkMap(c.netmap)
if err != nil {
return false, err
}
return LookupKeyInContainer(nm, owner, idCnr, cnr)
}
func LookupKeyInContainer(
nm *netmap.NetMap,
pkey, idCnr []byte,
cnr container.Container,
) (bool, error) {
cnrVectors, err := nm.ContainerNodes(cnr.PlacementPolicy(), idCnr)
if err != nil {
return false, err
}
for i := range cnrVectors {
for j := range cnrVectors[i] {
if bytes.Equal(cnrVectors[i][j].PublicKey(), pkey) {
return true, nil
}
}
}
return false, nil
}