Aleksey Savchuk
d28a5d2d7a
All checks were successful
DCO action / DCO (pull_request) Successful in 1m37s
Vulncheck / Vulncheck (pull_request) Successful in 2m8s
Pre-commit hooks / Pre-commit (pull_request) Successful in 2m38s
Tests and linters / Run gofumpt (pull_request) Successful in 2m35s
Tests and linters / Staticcheck (pull_request) Successful in 2m56s
Build / Build Components (pull_request) Successful in 3m4s
Tests and linters / gopls check (pull_request) Successful in 3m12s
Tests and linters / Lint (pull_request) Successful in 3m48s
Tests and linters / Tests (pull_request) Successful in 4m38s
Tests and linters / Tests with -race (pull_request) Successful in 6m20s
When getting a role in the APE checker for the container services, an error may be returned if network maps of the previous two epochs don't have enough nodes to fulfil a container placement policy. It's a logical error, so we should ignore it. Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
670 lines
19 KiB
Go
670 lines
19 KiB
Go
package container
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"crypto/ecdsa"
|
|
"crypto/elliptic"
|
|
"crypto/sha256"
|
|
"encoding/hex"
|
|
"errors"
|
|
"fmt"
|
|
"net"
|
|
"strings"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
|
|
session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
|
|
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
|
|
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
|
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
sessionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
|
policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
|
commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
|
|
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
"google.golang.org/grpc/peer"
|
|
)
|
|
|
|
var (
|
|
errMissingContainerID = errors.New("missing container ID")
|
|
errSessionContainerMissmatch = errors.New("requested container is not related to the session")
|
|
errMissingVerificationHeader = errors.New("malformed request: empty verification header")
|
|
errInvalidSessionTokenSignature = errors.New("malformed request: invalid session token signature")
|
|
errInvalidSessionTokenOwner = errors.New("malformed request: invalid session token owner")
|
|
errEmptyBodySignature = errors.New("malformed request: empty body signature")
|
|
errMissingOwnerID = errors.New("malformed request: missing owner ID")
|
|
errOwnerIDIsNotSet = errors.New("owner id is not set")
|
|
errInvalidDomainZone = errors.New("invalid domain zone: no namespace is expected")
|
|
|
|
undefinedContainerID = cid.ID{}
|
|
)
|
|
|
|
type ir interface {
|
|
InnerRingKeys() ([][]byte, error)
|
|
}
|
|
|
|
type containers interface {
|
|
Get(cid.ID) (*containercore.Container, error)
|
|
}
|
|
|
|
type apeChecker struct {
|
|
router policyengine.ChainRouter
|
|
reader containers
|
|
ir ir
|
|
nm netmap.Source
|
|
|
|
frostFSIDClient frostfsidcore.SubjectProvider
|
|
|
|
next Server
|
|
}
|
|
|
|
func NewAPEServer(router policyengine.ChainRouter, reader containers, ir ir, nm netmap.Source, frostFSIDClient frostfsidcore.SubjectProvider, srv Server) Server {
|
|
return &apeChecker{
|
|
router: router,
|
|
reader: reader,
|
|
ir: ir,
|
|
next: srv,
|
|
nm: nm,
|
|
frostFSIDClient: frostFSIDClient,
|
|
}
|
|
}
|
|
|
|
func (ac *apeChecker) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) {
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Delete")
|
|
defer span.End()
|
|
|
|
if err := ac.validateContainerBoundedOperation(ctx, req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(),
|
|
nativeschema.MethodDeleteContainer); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return ac.next.Delete(ctx, req)
|
|
}
|
|
|
|
func (ac *apeChecker) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) {
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Get")
|
|
defer span.End()
|
|
|
|
if err := ac.validateContainerBoundedOperation(ctx, req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(),
|
|
nativeschema.MethodGetContainer); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return ac.next.Get(ctx, req)
|
|
}
|
|
|
|
func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List")
|
|
defer span.End()
|
|
|
|
role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
reqProps := map[string]string{
|
|
nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
|
|
nativeschema.PropertyKeyActorRole: role,
|
|
}
|
|
|
|
reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if p, ok := peer.FromContext(ctx); ok {
|
|
if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
|
|
reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
|
|
}
|
|
}
|
|
|
|
namespace, err := ac.namespaceByOwner(req.GetBody().GetOwnerID())
|
|
if err != nil {
|
|
return nil, fmt.Errorf("could not get owner namespace: %w", err)
|
|
}
|
|
if err := ac.validateNamespaceByPublicKey(pk, namespace); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
request := aperequest.NewRequest(
|
|
nativeschema.MethodListContainers,
|
|
aperequest.NewResource(
|
|
resourceName(namespace, ""),
|
|
make(map[string]string),
|
|
),
|
|
reqProps,
|
|
)
|
|
|
|
groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get group ids: %w", err)
|
|
}
|
|
|
|
// Policy contract keeps group related chains as namespace-group pair.
|
|
for i := range groups {
|
|
groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
|
|
}
|
|
|
|
rt := policyengine.NewRequestTargetWithNamespace(namespace)
|
|
rt.User = &policyengine.Target{
|
|
Type: policyengine.User,
|
|
Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
|
|
}
|
|
rt.Groups = make([]policyengine.Target, len(groups))
|
|
for i := range groups {
|
|
rt.Groups[i] = policyengine.GroupTarget(groups[i])
|
|
}
|
|
|
|
s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if found && s == apechain.Allow {
|
|
return ac.next.List(ctx, req)
|
|
}
|
|
|
|
return nil, apeErr(nativeschema.MethodListContainers, s)
|
|
}
|
|
|
|
func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) {
|
|
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.Put")
|
|
defer span.End()
|
|
|
|
role, pk, err := ac.getRoleWithoutContainerID(req.GetBody().GetContainer().GetOwnerID(), req.GetMetaHeader(), req.GetVerificationHeader())
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
reqProps := map[string]string{
|
|
nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
|
|
nativeschema.PropertyKeyActorRole: role,
|
|
}
|
|
|
|
reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if p, ok := peer.FromContext(ctx); ok {
|
|
if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
|
|
reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
|
|
}
|
|
}
|
|
|
|
namespace, err := ac.namespaceByKnownOwner(req.GetBody().GetContainer().GetOwnerID())
|
|
if err != nil {
|
|
return nil, fmt.Errorf("get namespace error: %w", err)
|
|
}
|
|
if err = validateNamespace(req.GetBody().GetContainer(), namespace); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
request := aperequest.NewRequest(
|
|
nativeschema.MethodPutContainer,
|
|
aperequest.NewResource(
|
|
resourceName(namespace, ""),
|
|
make(map[string]string),
|
|
),
|
|
reqProps,
|
|
)
|
|
|
|
groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get group ids: %w", err)
|
|
}
|
|
|
|
// Policy contract keeps group related chains as namespace-group pair.
|
|
for i := range groups {
|
|
groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
|
|
}
|
|
|
|
rt := policyengine.NewRequestTargetWithNamespace(namespace)
|
|
rt.User = &policyengine.Target{
|
|
Type: policyengine.User,
|
|
Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
|
|
}
|
|
rt.Groups = make([]policyengine.Target, len(groups))
|
|
for i := range groups {
|
|
rt.Groups[i] = policyengine.GroupTarget(groups[i])
|
|
}
|
|
|
|
s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if found && s == apechain.Allow {
|
|
return ac.next.Put(ctx, req)
|
|
}
|
|
|
|
return nil, apeErr(nativeschema.MethodPutContainer, s)
|
|
}
|
|
|
|
func (ac *apeChecker) getRoleWithoutContainerID(oID *refs.OwnerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader) (string, *keys.PublicKey, error) {
|
|
if vh == nil {
|
|
return "", nil, errMissingVerificationHeader
|
|
}
|
|
|
|
if oID == nil {
|
|
return "", nil, errMissingOwnerID
|
|
}
|
|
var ownerID user.ID
|
|
if err := ownerID.ReadFromV2(*oID); err != nil {
|
|
return "", nil, err
|
|
}
|
|
|
|
actor, pk, err := ac.getActorAndPublicKey(mh, vh, undefinedContainerID)
|
|
if err != nil {
|
|
return "", nil, err
|
|
}
|
|
|
|
if actor.Equals(ownerID) {
|
|
return nativeschema.PropertyValueContainerRoleOwner, pk, nil
|
|
}
|
|
|
|
pkBytes := pk.Bytes()
|
|
isIR, err := ac.isInnerRingKey(pkBytes)
|
|
if err != nil {
|
|
return "", nil, err
|
|
}
|
|
if isIR {
|
|
return nativeschema.PropertyValueContainerRoleIR, pk, nil
|
|
}
|
|
|
|
return nativeschema.PropertyValueContainerRoleOthers, pk, nil
|
|
}
|
|
|
|
func (ac *apeChecker) validateContainerBoundedOperation(ctx context.Context, containerID *refs.ContainerID, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, op string) error {
|
|
if vh == nil {
|
|
return errMissingVerificationHeader
|
|
}
|
|
|
|
id, err := getContainerID(containerID)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
cont, err := ac.reader.Get(id)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
reqProps, pk, err := ac.getRequestProps(ctx, mh, vh, cont, id)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
namespace := ""
|
|
cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cont.Value).Zone(), ".ns")
|
|
if hasNamespace {
|
|
namespace = cntNamespace
|
|
}
|
|
|
|
groups, err := aperequest.Groups(ac.frostFSIDClient, pk)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to get group ids: %w", err)
|
|
}
|
|
|
|
// Policy contract keeps group related chains as namespace-group pair.
|
|
for i := range groups {
|
|
groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
|
|
}
|
|
|
|
request := aperequest.NewRequest(
|
|
op,
|
|
aperequest.NewResource(
|
|
resourceName(namespace, id.EncodeToString()),
|
|
ac.getContainerProps(cont),
|
|
),
|
|
reqProps,
|
|
)
|
|
|
|
s, found, err := ac.router.IsAllowed(apechain.Ingress,
|
|
policyengine.NewRequestTargetExtended(namespace, id.EncodeToString(), fmt.Sprintf("%s:%s", namespace, pk.Address()), groups),
|
|
request)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if found && s == apechain.Allow {
|
|
return nil
|
|
}
|
|
|
|
return apeErr(op, s)
|
|
}
|
|
|
|
func apeErr(operation string, status apechain.Status) error {
|
|
errAccessDenied := &apistatus.ObjectAccessDenied{}
|
|
errAccessDenied.WriteReason(fmt.Sprintf("access to container operation %s is denied by access policy engine: %s", operation, status.String()))
|
|
return errAccessDenied
|
|
}
|
|
|
|
func getContainerID(reqContID *refs.ContainerID) (cid.ID, error) {
|
|
if reqContID == nil {
|
|
return cid.ID{}, errMissingContainerID
|
|
}
|
|
var id cid.ID
|
|
err := id.ReadFromV2(*reqContID)
|
|
if err != nil {
|
|
return cid.ID{}, fmt.Errorf("invalid container ID: %w", err)
|
|
}
|
|
return id, nil
|
|
}
|
|
|
|
func resourceName(namespace string, container string) string {
|
|
if namespace == "" && container == "" {
|
|
return nativeschema.ResourceFormatRootContainers
|
|
}
|
|
if namespace == "" && container != "" {
|
|
return fmt.Sprintf(nativeschema.ResourceFormatRootContainer, container)
|
|
}
|
|
if namespace != "" && container == "" {
|
|
return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainers, namespace)
|
|
}
|
|
return fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainer, namespace, container)
|
|
}
|
|
|
|
func (ac *apeChecker) getContainerProps(c *containercore.Container) map[string]string {
|
|
return map[string]string{
|
|
nativeschema.PropertyKeyContainerOwnerID: c.Value.Owner().EncodeToString(),
|
|
}
|
|
}
|
|
|
|
func (ac *apeChecker) getRequestProps(ctx context.Context, mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader,
|
|
cont *containercore.Container, cnrID cid.ID,
|
|
) (map[string]string, *keys.PublicKey, error) {
|
|
actor, pk, err := ac.getActorAndPublicKey(mh, vh, cnrID)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
role, err := ac.getRole(actor, pk, cont, cnrID)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
reqProps := map[string]string{
|
|
nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
|
|
nativeschema.PropertyKeyActorRole: role,
|
|
}
|
|
reqProps, err = ac.fillWithUserClaimTags(reqProps, pk)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
if p, ok := peer.FromContext(ctx); ok {
|
|
if tcpAddr, ok := p.Addr.(*net.TCPAddr); ok {
|
|
reqProps[commonschema.PropertyKeyFrostFSSourceIP] = tcpAddr.IP.String()
|
|
}
|
|
}
|
|
return reqProps, pk, nil
|
|
}
|
|
|
|
func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) {
|
|
if cont.Value.Owner().Equals(*actor) {
|
|
return nativeschema.PropertyValueContainerRoleOwner, nil
|
|
}
|
|
|
|
pkBytes := pk.Bytes()
|
|
isIR, err := ac.isInnerRingKey(pkBytes)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
if isIR {
|
|
return nativeschema.PropertyValueContainerRoleIR, nil
|
|
}
|
|
|
|
isContainer, err := ac.isContainerKey(pkBytes, cnrID, cont)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
if isContainer {
|
|
return nativeschema.PropertyValueContainerRoleContainer, nil
|
|
}
|
|
|
|
return nativeschema.PropertyValueContainerRoleOthers, nil
|
|
}
|
|
|
|
func (ac *apeChecker) getActorAndPublicKey(mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader, cnrID cid.ID) (*user.ID, *keys.PublicKey, error) {
|
|
st, err := ac.getSessionToken(mh)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
if st != nil {
|
|
return ac.getActorAndPKFromSessionToken(st, cnrID)
|
|
}
|
|
return ac.getActorAndPKFromSignature(vh)
|
|
}
|
|
|
|
func (ac *apeChecker) getActorAndPKFromSignature(vh *session.RequestVerificationHeader) (*user.ID, *keys.PublicKey, error) {
|
|
for vh.GetOrigin() != nil {
|
|
vh = vh.GetOrigin()
|
|
}
|
|
sig := vh.GetBodySignature()
|
|
if sig == nil {
|
|
return nil, nil, errEmptyBodySignature
|
|
}
|
|
key, err := keys.NewPublicKeyFromBytes(sig.GetKey(), elliptic.P256())
|
|
if err != nil {
|
|
return nil, nil, fmt.Errorf("invalid signature key: %w", err)
|
|
}
|
|
|
|
var userID user.ID
|
|
user.IDFromKey(&userID, (ecdsa.PublicKey)(*key))
|
|
|
|
return &userID, key, nil
|
|
}
|
|
|
|
func (ac *apeChecker) getSessionToken(mh *session.RequestMetaHeader) (*sessionSDK.Container, error) {
|
|
for mh.GetOrigin() != nil {
|
|
mh = mh.GetOrigin()
|
|
}
|
|
st := mh.GetSessionToken()
|
|
if st == nil {
|
|
return nil, nil
|
|
}
|
|
|
|
var tok sessionSDK.Container
|
|
err := tok.ReadFromV2(*st)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("invalid session token: %w", err)
|
|
}
|
|
|
|
return &tok, nil
|
|
}
|
|
|
|
func (ac *apeChecker) getActorAndPKFromSessionToken(st *sessionSDK.Container, cnrID cid.ID) (*user.ID, *keys.PublicKey, error) {
|
|
if cnrID != undefinedContainerID && !st.AppliedTo(cnrID) {
|
|
return nil, nil, errSessionContainerMissmatch
|
|
}
|
|
if !st.VerifySignature() {
|
|
return nil, nil, errInvalidSessionTokenSignature
|
|
}
|
|
var tok session.Token
|
|
st.WriteToV2(&tok)
|
|
|
|
signaturePublicKey, err := keys.NewPublicKeyFromBytes(tok.GetSignature().GetKey(), elliptic.P256())
|
|
if err != nil {
|
|
return nil, nil, fmt.Errorf("invalid key in session token signature: %w", err)
|
|
}
|
|
|
|
tokenIssuer := st.Issuer()
|
|
if !isOwnerFromKey(tokenIssuer, signaturePublicKey) {
|
|
return nil, nil, errInvalidSessionTokenOwner
|
|
}
|
|
|
|
return &tokenIssuer, signaturePublicKey, nil
|
|
}
|
|
|
|
func isOwnerFromKey(id user.ID, key *keys.PublicKey) bool {
|
|
if key == nil {
|
|
return false
|
|
}
|
|
|
|
var id2 user.ID
|
|
user.IDFromKey(&id2, (ecdsa.PublicKey)(*key))
|
|
|
|
return id2.Equals(id)
|
|
}
|
|
|
|
func (ac *apeChecker) isInnerRingKey(pk []byte) (bool, error) {
|
|
innerRingKeys, err := ac.ir.InnerRingKeys()
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
|
|
for i := range innerRingKeys {
|
|
if bytes.Equal(innerRingKeys[i], pk) {
|
|
return true, nil
|
|
}
|
|
}
|
|
|
|
return false, nil
|
|
}
|
|
|
|
func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercore.Container) (bool, error) {
|
|
binCnrID := make([]byte, sha256.Size)
|
|
cnrID.Encode(binCnrID)
|
|
|
|
nm, err := netmap.GetLatestNetworkMap(ac.nm)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
|
|
if isContainerNode(nm, pk, binCnrID, cont) {
|
|
return true, nil
|
|
}
|
|
|
|
// then check previous netmap, this can happen in-between epoch change
|
|
// when node migrates data from last epoch container
|
|
nm, err = netmap.GetPreviousNetworkMap(ac.nm)
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
|
|
return isContainerNode(nm, pk, binCnrID, cont), nil
|
|
}
|
|
|
|
func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) bool {
|
|
// It could an error only if the network map doesn't have enough nodes to
|
|
// fulfil the policy. It's a logical error that doesn't affect an actor role
|
|
// determining, so we ignore it
|
|
cnrVectors, _ := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID)
|
|
|
|
for i := range cnrVectors {
|
|
for j := range cnrVectors[i] {
|
|
if bytes.Equal(cnrVectors[i][j].PublicKey(), pk) {
|
|
return true
|
|
}
|
|
}
|
|
}
|
|
|
|
return false
|
|
}
|
|
|
|
func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) {
|
|
var ownerSDK user.ID
|
|
if owner == nil {
|
|
return "", errOwnerIDIsNotSet
|
|
}
|
|
if err := ownerSDK.ReadFromV2(*owner); err != nil {
|
|
return "", err
|
|
}
|
|
addr, err := ownerSDK.ScriptHash()
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
|
|
namespace := ""
|
|
subject, err := ac.frostFSIDClient.GetSubject(addr)
|
|
if err == nil {
|
|
namespace = subject.Namespace
|
|
} else {
|
|
if !strings.Contains(err.Error(), frostfsidcore.SubjectNotFoundErrorMessage) {
|
|
return "", fmt.Errorf("get subject error: %w", err)
|
|
}
|
|
}
|
|
return namespace, nil
|
|
}
|
|
|
|
func (ac *apeChecker) namespaceByKnownOwner(owner *refs.OwnerID) (string, error) {
|
|
var ownerSDK user.ID
|
|
if owner == nil {
|
|
return "", errOwnerIDIsNotSet
|
|
}
|
|
if err := ownerSDK.ReadFromV2(*owner); err != nil {
|
|
return "", err
|
|
}
|
|
addr, err := ownerSDK.ScriptHash()
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
subject, err := ac.frostFSIDClient.GetSubject(addr)
|
|
if err != nil {
|
|
return "", fmt.Errorf("get subject error: %w", err)
|
|
}
|
|
return subject.Namespace, nil
|
|
}
|
|
|
|
// validateNamespace validates a namespace set in a container.
|
|
// If frostfs-id contract stores a namespace N1 for an owner ID and a container within a request
|
|
// is set with namespace N2 (via Zone() property), then N2 is invalid and the request is denied.
|
|
func validateNamespace(cnrV2 *container.Container, ownerIDNamespace string) error {
|
|
if cnrV2 == nil {
|
|
return nil
|
|
}
|
|
var cnr cnrSDK.Container
|
|
if err := cnr.ReadFromV2(*cnrV2); err != nil {
|
|
return err
|
|
}
|
|
cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(cnr).Zone(), ".ns")
|
|
if hasNamespace {
|
|
if cntNamespace != ownerIDNamespace {
|
|
if ownerIDNamespace == "" {
|
|
return errInvalidDomainZone
|
|
}
|
|
return fmt.Errorf("invalid domain zone: expected namespace %s, but got %s", ownerIDNamespace, cntNamespace)
|
|
}
|
|
} else if ownerIDNamespace != "" {
|
|
return fmt.Errorf("invalid domain zone: expected namespace %s, but got invalid or empty", ownerIDNamespace)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// validateNamespace validates if a namespace of a request actor equals to owner's namespace.
|
|
// An actor's namespace is calculated by a public key.
|
|
func (ac *apeChecker) validateNamespaceByPublicKey(pk *keys.PublicKey, ownerIDNamespace string) error {
|
|
var actor user.ID
|
|
user.IDFromKey(&actor, (ecdsa.PublicKey)(*pk))
|
|
actorOwnerID := new(refs.OwnerID)
|
|
actor.WriteToV2(actorOwnerID)
|
|
actorNamespace, err := ac.namespaceByOwner(actorOwnerID)
|
|
if err != nil {
|
|
return fmt.Errorf("could not get actor namespace: %w", err)
|
|
}
|
|
if actorNamespace != ownerIDNamespace {
|
|
return fmt.Errorf("actor namespace %s differs from owner: %s", actorNamespace, ownerIDNamespace)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
|
|
func (ac *apeChecker) fillWithUserClaimTags(reqProps map[string]string, pk *keys.PublicKey) (map[string]string, error) {
|
|
if reqProps == nil {
|
|
reqProps = make(map[string]string)
|
|
}
|
|
props, err := aperequest.FormFrostfsIDRequestProperties(ac.frostFSIDClient, pk)
|
|
if err != nil {
|
|
return reqProps, err
|
|
}
|
|
for propertyName, properyValue := range props {
|
|
reqProps[propertyName] = properyValue
|
|
}
|
|
return reqProps, nil
|
|
}
|