[#175] Use gate owner as object owner #181
22 changed files with 122 additions and 84 deletions
|
@ -35,6 +35,7 @@ This document outlines major changes between releases.
|
|||
- Use default registerer for app metrics (#155)
|
||||
- Use chi router instead of archived gorlilla/mux (#149)
|
||||
- Complete multipart upload doesn't unnecessary copy now. Thus, the total time of multipart upload was reduced by 2 times (#63)
|
||||
- Use gate key to form object owner (#175)
|
||||
|
||||
### Removed
|
||||
- Drop `tree.service` param (now endpoints from `peers` section are used) (#133)
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
|
@ -105,7 +106,7 @@ func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
|
|||
require.NoError(t, err)
|
||||
|
||||
l := zap.NewExample()
|
||||
tp := layer.NewTestFrostFS()
|
||||
tp := layer.NewTestFrostFS(key)
|
||||
|
||||
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
||||
testResolver.SetResolveFunc(func(_ context.Context, name string) (cid.ID, error) {
|
||||
|
@ -183,6 +184,7 @@ func createTestBucket(hc *handlerContext, bktName string) *data.BucketInfo {
|
|||
_, err := hc.MockedPool().CreateContainer(hc.Context(), layer.PrmContainerCreate{
|
||||
Creator: hc.owner,
|
||||
Name: bktName,
|
||||
BasicACL: acl.PublicRWExtended,
|
||||
})
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -85,17 +86,30 @@ func headObject(t *testing.T, tc *handlerContext, bktName, objName string, heade
|
|||
}
|
||||
|
||||
func TestInvalidAccessThroughCache(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-cache", "obj-for-cache"
|
||||
createBucketAndObject(tc, bktName, objName)
|
||||
bktInfo, _ := createBucketAndObject(hc, bktName, objName)
|
||||
setContainerEACL(hc, bktInfo.CID)
|
||||
|
||||
headObject(t, tc, bktName, objName, nil, http.StatusOK)
|
||||
headObject(t, hc, bktName, objName, nil, http.StatusOK)
|
||||
|
||||
w, r := prepareTestRequest(tc, bktName, objName, nil)
|
||||
tc.Handler().HeadObjectHandler(w, r.WithContext(context.WithValue(r.Context(), middleware.BoxData, newTestAccessBox(t, nil))))
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, r.WithContext(context.WithValue(r.Context(), middleware.BoxData, newTestAccessBox(t, nil))))
|
||||
assertStatus(t, w, http.StatusForbidden)
|
||||
}
|
||||
|
||||
func setContainerEACL(hc *handlerContext, cnrID cid.ID) {
|
||||
table := eacl.NewTable()
|
||||
table.SetCID(cnrID)
|
||||
for _, op := range fullOps {
|
||||
table.AddRecord(getOthersRecord(op, eacl.ActionDeny))
|
||||
}
|
||||
|
||||
err := hc.MockedPool().SetContainerEACL(hc.Context(), *table, nil)
|
||||
require.NoError(hc.t, err)
|
||||
}
|
||||
|
||||
func TestHeadObject(t *testing.T) {
|
||||
hc := prepareHandlerContextWithMinCache(t)
|
||||
bktName, objName := "bucket", "obj"
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
func (n *layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *ObjectVersion, nodeVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error) {
|
||||
var err error
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
|
||||
tags := n.cache.GetTagging(owner, objectTaggingCacheKey(objVersion))
|
||||
lockInfo := n.cache.GetLockInfo(owner, lockObjectKey(objVersion))
|
||||
|
|
|
@ -76,12 +76,7 @@ func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketIn
|
|||
}
|
||||
|
||||
func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
||||
var (
|
||||
err error
|
||||
own = n.Owner(ctx)
|
||||
res []cid.ID
|
||||
)
|
||||
res, err = n.frostFS.UserContainers(ctx, own)
|
||||
res, err := n.frostFS.UserContainers(ctx, n.BearerOwner(ctx))
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Error("could not list user containers", zap.Error(err))
|
||||
return nil, err
|
||||
|
@ -102,14 +97,13 @@ func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
|||
}
|
||||
|
||||
func (n *layer) createContainer(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error) {
|
||||
ownerID := n.Owner(ctx)
|
||||
if p.LocationConstraint == "" {
|
||||
p.LocationConstraint = api.DefaultLocationConstraint // s3tests_boto3.functional.test_s3:test_bucket_get_location
|
||||
}
|
||||
bktInfo := &data.BucketInfo{
|
||||
Name: p.Name,
|
||||
Zone: v2container.SysAttributeZoneDefault,
|
||||
Owner: ownerID,
|
||||
Owner: n.BearerOwner(ctx),
|
||||
Created: TimeNow(ctx),
|
||||
LocationConstraint: p.LocationConstraint,
|
||||
ObjectLockEnabled: p.ObjectLockEnabled,
|
||||
|
|
|
@ -38,7 +38,6 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
|||
|
||||
prm := PrmObjectCreate{
|
||||
Container: p.BktInfo.CID,
|
||||
Creator: p.BktInfo.Owner,
|
||||
Payload: &buf,
|
||||
Filepath: p.BktInfo.CORSObjectName(),
|
||||
CreationTime: TimeNow(ctx),
|
||||
|
@ -64,7 +63,7 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
|||
}
|
||||
}
|
||||
|
||||
n.cache.PutCORS(n.Owner(ctx), p.BktInfo, cors)
|
||||
n.cache.PutCORS(n.BearerOwner(ctx), p.BktInfo, cors)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -91,9 +91,6 @@ type PrmObjectCreate struct {
|
|||
// Container to store the object.
|
||||
Container cid.ID
|
||||
|
||||
// FrostFS identifier of the object creator.
|
||||
Creator user.ID
|
||||
|
||||
// Key-value object attributes.
|
||||
Attributes [][2]string
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
)
|
||||
|
||||
type TestFrostFS struct {
|
||||
|
@ -34,15 +35,17 @@ type TestFrostFS struct {
|
|||
containers map[string]*container.Container
|
||||
eaclTables map[string]*eacl.Table
|
||||
currentEpoch uint64
|
||||
key *keys.PrivateKey
|
||||
}
|
||||
|
||||
func NewTestFrostFS() *TestFrostFS {
|
||||
func NewTestFrostFS(key *keys.PrivateKey) *TestFrostFS {
|
||||
return &TestFrostFS{
|
||||
objects: make(map[string]*object.Object),
|
||||
objectErrors: make(map[string]error),
|
||||
objectPutErrors: make(map[string]error),
|
||||
containers: make(map[string]*container.Container),
|
||||
eaclTables: make(map[string]*eacl.Table),
|
||||
key: key,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -178,8 +181,8 @@ func (t *TestFrostFS) ReadObject(ctx context.Context, prm PrmObjectRead) (*Objec
|
|||
}
|
||||
|
||||
if obj, ok := t.objects[sAddr]; ok {
|
||||
owner := getOwner(ctx)
|
||||
if !obj.OwnerID().Equals(owner) && !t.isPublicRead(prm.Container) {
|
||||
owner := getBearerOwner(ctx)
|
||||
if !t.checkAccess(prm.Container, owner, eacl.OperationGet) {
|
||||
return nil, ErrAccessDenied
|
||||
}
|
||||
|
||||
|
@ -227,13 +230,16 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.
|
|||
attrs = append(attrs, *a)
|
||||
}
|
||||
|
||||
var owner user.ID
|
||||
user.IDFromKey(&owner, t.key.PrivateKey.PublicKey)
|
||||
|
||||
obj := object.New()
|
||||
obj.SetContainerID(prm.Container)
|
||||
obj.SetID(id)
|
||||
obj.SetPayloadSize(prm.PayloadSize)
|
||||
obj.SetAttributes(attrs...)
|
||||
obj.SetCreationEpoch(t.currentEpoch)
|
||||
obj.SetOwnerID(&prm.Creator)
|
||||
obj.SetOwnerID(&owner)
|
||||
t.currentEpoch++
|
||||
|
||||
if len(prm.Locks) > 0 {
|
||||
|
@ -271,9 +277,9 @@ func (t *TestFrostFS) DeleteObject(ctx context.Context, prm PrmObjectDelete) err
|
|||
return err
|
||||
}
|
||||
|
||||
if obj, ok := t.objects[addr.EncodeToString()]; ok {
|
||||
owner := getOwner(ctx)
|
||||
if !obj.OwnerID().Equals(owner) {
|
||||
if _, ok := t.objects[addr.EncodeToString()]; ok {
|
||||
owner := getBearerOwner(ctx)
|
||||
if !t.checkAccess(prm.Container, owner, eacl.OperationDelete) {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
|
@ -325,26 +331,42 @@ func (t *TestFrostFS) ContainerEACL(_ context.Context, cnrID cid.ID) (*eacl.Tabl
|
|||
return table, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) isPublicRead(cnrID cid.ID) bool {
|
||||
table, ok := t.eaclTables[cnrID.EncodeToString()]
|
||||
func (t *TestFrostFS) checkAccess(cnrID cid.ID, owner user.ID, op eacl.Operation) bool {
|
||||
cnr, ok := t.containers[cnrID.EncodeToString()]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if !cnr.BasicACL().Extendable() {
|
||||
return cnr.Owner().Equals(owner)
|
||||
}
|
||||
|
||||
table, ok := t.eaclTables[cnrID.EncodeToString()]
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, rec := range table.Records() {
|
||||
if rec.Operation() == eacl.OperationGet && len(rec.Filters()) == 0 {
|
||||
if rec.Operation() == op && len(rec.Filters()) == 0 {
|
||||
for _, trgt := range rec.Targets() {
|
||||
if trgt.Role() == eacl.RoleOthers {
|
||||
return rec.Action() == eacl.ActionAllow
|
||||
}
|
||||
var targetOwner user.ID
|
||||
for _, pk := range eacl.TargetECDSAKeys(&trgt) {
|
||||
user.IDFromKey(&targetOwner, *pk)
|
||||
if targetOwner.Equals(owner) {
|
||||
return rec.Action() == eacl.ActionAllow
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getOwner(ctx context.Context) user.ID {
|
||||
return true
|
||||
}
|
||||
|
||||
func getBearerOwner(ctx context.Context) user.ID {
|
||||
if bd, ok := ctx.Value(middleware.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
return bearer.ResolveIssuer(*bd.Gate.BearerToken)
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@ type (
|
|||
|
||||
layer struct {
|
||||
frostFS FrostFS
|
||||
gateOwner user.ID
|
||||
log *zap.Logger
|
||||
anonKey AnonymousKey
|
||||
resolver BucketResolver
|
||||
|
@ -57,6 +58,7 @@ type (
|
|||
}
|
||||
|
||||
Config struct {
|
||||
GateOwner user.ID
|
||||
ChainAddress string
|
||||
Caches *CachesConfig
|
||||
AnonKey AnonymousKey
|
||||
|
@ -295,6 +297,7 @@ func NewLayer(log *zap.Logger, frostFS FrostFS, config *Config) Client {
|
|||
return &layer{
|
||||
frostFS: frostFS,
|
||||
log: log,
|
||||
gateOwner: config.GateOwner,
|
||||
anonKey: config.AnonKey,
|
||||
resolver: config.Resolver,
|
||||
cache: NewCache(config.Caches),
|
||||
|
@ -338,8 +341,8 @@ func TimeNow(ctx context.Context) time.Time {
|
|||
return time.Now()
|
||||
}
|
||||
|
||||
// Owner returns owner id from BearerToken (context) or from client owner.
|
||||
func (n *layer) Owner(ctx context.Context) user.ID {
|
||||
// BearerOwner returns owner id from BearerToken (context) or from client owner.
|
||||
func (n *layer) BearerOwner(ctx context.Context) user.ID {
|
||||
if bd, ok := ctx.Value(middleware.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
return bearer.ResolveIssuer(*bd.Gate.BearerToken)
|
||||
}
|
||||
|
@ -658,7 +661,7 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
},
|
||||
DeleteMarker: &data.DeleteMarkerInfo{
|
||||
Created: TimeNow(ctx),
|
||||
Owner: n.Owner(ctx),
|
||||
Owner: n.gateOwner,
|
||||
},
|
||||
IsUnversioned: settings.VersioningSuspended(),
|
||||
}
|
||||
|
|
|
@ -149,7 +149,7 @@ func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
|
|||
info := &data.MultipartInfo{
|
||||
Key: p.Info.Key,
|
||||
UploadID: p.Info.UploadID,
|
||||
Owner: n.Owner(ctx),
|
||||
Owner: n.gateOwner,
|
||||
Created: TimeNow(ctx),
|
||||
Meta: make(map[string]string, metaSize),
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
|
@ -209,7 +209,6 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
bktInfo := p.Info.Bkt
|
||||
prm := PrmObjectCreate{
|
||||
Container: bktInfo.CID,
|
||||
Creator: bktInfo.Owner,
|
||||
Attributes: make([][2]string, 2),
|
||||
Payload: p.Reader,
|
||||
CreationTime: TimeNow(ctx),
|
||||
|
|
|
@ -27,7 +27,6 @@ func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBu
|
|||
|
||||
prm := PrmObjectCreate{
|
||||
Container: p.BktInfo.CID,
|
||||
Creator: p.BktInfo.Owner,
|
||||
Payload: bytes.NewReader(confXML),
|
||||
Filepath: p.BktInfo.NotificationConfigurationObjectName(),
|
||||
CreationTime: TimeNow(ctx),
|
||||
|
@ -53,13 +52,13 @@ func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBu
|
|||
}
|
||||
}
|
||||
|
||||
n.cache.PutNotificationConfiguration(n.Owner(ctx), p.BktInfo, p.Configuration)
|
||||
n.cache.PutNotificationConfiguration(n.BearerOwner(ctx), p.BktInfo, p.Configuration)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *layer) GetBucketNotificationConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (*data.NotificationConfiguration, error) {
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if conf := n.cache.GetNotificationConfiguration(owner, bktInfo); conf != nil {
|
||||
return conf, nil
|
||||
}
|
||||
|
|
|
@ -238,8 +238,6 @@ func ParseCompletedPartHeader(hdr string) (*Part, error) {
|
|||
|
||||
// PutObject stores object into FrostFS, took payload from io.Reader.
|
||||
func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.ExtendedObjectInfo, error) {
|
||||
owner := n.Owner(ctx)
|
||||
|
||||
bktSettings, err := n.GetBucketSettings(ctx, p.BktInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get versioning settings object: %w", err)
|
||||
|
@ -275,7 +273,6 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
|
||||
prm := PrmObjectCreate{
|
||||
Container: p.BktInfo.CID,
|
||||
Creator: owner,
|
||||
PayloadSize: p.Size,
|
||||
Filepath: p.Object,
|
||||
Payload: r,
|
||||
|
@ -334,7 +331,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
ID: id,
|
||||
CID: p.BktInfo.CID,
|
||||
|
||||
Owner: owner,
|
||||
Owner: n.gateOwner,
|
||||
Bucket: p.BktInfo.Name,
|
||||
Name: p.Object,
|
||||
Size: size,
|
||||
|
@ -349,13 +346,13 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
NodeVersion: newVersion,
|
||||
}
|
||||
|
||||
n.cache.PutObjectWithName(owner, extendedObjInfo)
|
||||
n.cache.PutObjectWithName(n.BearerOwner(ctx), extendedObjInfo)
|
||||
|
||||
return extendedObjInfo, nil
|
||||
}
|
||||
|
||||
func (n *layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.BucketInfo, objectName string) (*data.ExtendedObjectInfo, error) {
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if extObjInfo := n.cache.GetLastObject(owner, bkt.Name, objectName); extObjInfo != nil {
|
||||
return extObjInfo, nil
|
||||
}
|
||||
|
@ -419,7 +416,7 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
}
|
||||
}
|
||||
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if extObjInfo := n.cache.GetObject(owner, newAddress(bkt.CID, foundVersion.OID)); extObjInfo != nil {
|
||||
return extObjInfo, nil
|
||||
}
|
||||
|
@ -546,7 +543,7 @@ func (n *layer) getLatestObjectsVersions(ctx context.Context, p allObjectParams)
|
|||
return nil, nil, nil
|
||||
}
|
||||
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
cacheKey := cache.CreateObjectsListCacheKey(p.Bucket.CID, p.Prefix, true)
|
||||
nodeVersions := n.cache.GetList(owner, cacheKey)
|
||||
|
||||
|
@ -674,7 +671,7 @@ func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams,
|
|||
func (n *layer) bucketNodeVersions(ctx context.Context, bkt *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) {
|
||||
var err error
|
||||
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
cacheKey := cache.CreateObjectsListCacheKey(bkt.CID, prefix, false)
|
||||
nodeVersions := n.cache.GetList(owner, cacheKey)
|
||||
|
||||
|
@ -794,7 +791,7 @@ func (n *layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo
|
|||
return oiDir
|
||||
}
|
||||
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if extInfo := n.cache.GetObject(owner, newAddress(bktInfo.CID, node.OID)); extInfo != nil {
|
||||
return extInfo.ObjectInfo
|
||||
}
|
||||
|
|
|
@ -95,14 +95,14 @@ func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
|
|||
return fmt.Errorf("couldn't put lock into tree: %w", err)
|
||||
}
|
||||
|
||||
n.cache.PutLockInfo(n.Owner(ctx), lockObjectKey(p.ObjVersion), lockInfo)
|
||||
n.cache.PutLockInfo(n.BearerOwner(ctx), lockObjectKey(p.ObjVersion), lockInfo)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *layer) getNodeVersionFromCacheOrFrostfs(ctx context.Context, objVersion *ObjectVersion) (nodeVersion *data.NodeVersion, err error) {
|
||||
// check cache if node version is stored inside extendedObjectVersion
|
||||
nodeVersion = n.getNodeVersionFromCache(n.Owner(ctx), objVersion)
|
||||
nodeVersion = n.getNodeVersionFromCache(n.BearerOwner(ctx), objVersion)
|
||||
if nodeVersion == nil {
|
||||
// else get node version from tree service
|
||||
return n.getNodeVersion(ctx, objVersion)
|
||||
|
@ -114,7 +114,6 @@ func (n *layer) getNodeVersionFromCacheOrFrostfs(ctx context.Context, objVersion
|
|||
func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, lock *data.ObjectLock, copiesNumber []uint32) (oid.ID, error) {
|
||||
prm := PrmObjectCreate{
|
||||
Container: bktInfo.CID,
|
||||
Creator: bktInfo.Owner,
|
||||
Locks: []oid.ID{objID},
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: copiesNumber,
|
||||
|
@ -131,7 +130,7 @@ func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, obj
|
|||
}
|
||||
|
||||
func (n *layer) GetLockInfo(ctx context.Context, objVersion *ObjectVersion) (*data.LockInfo, error) {
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if lockInfo := n.cache.GetLockInfo(owner, lockObjectKey(objVersion)); lockInfo != nil {
|
||||
return lockInfo, nil
|
||||
}
|
||||
|
@ -155,7 +154,7 @@ func (n *layer) GetLockInfo(ctx context.Context, objVersion *ObjectVersion) (*da
|
|||
}
|
||||
|
||||
func (n *layer) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSConfiguration, error) {
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if cors := n.cache.GetCORS(owner, bkt); cors != nil {
|
||||
return cors, nil
|
||||
}
|
||||
|
@ -192,7 +191,7 @@ func lockObjectKey(objVersion *ObjectVersion) string {
|
|||
}
|
||||
|
||||
func (n *layer) GetBucketSettings(ctx context.Context, bktInfo *data.BucketInfo) (*data.BucketSettings, error) {
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
if settings := n.cache.GetSettings(owner, bktInfo); settings != nil {
|
||||
return settings, nil
|
||||
}
|
||||
|
@ -215,7 +214,7 @@ func (n *layer) PutBucketSettings(ctx context.Context, p *PutSettingsParams) err
|
|||
return fmt.Errorf("failed to get settings node: %w", err)
|
||||
}
|
||||
|
||||
n.cache.PutSettings(n.Owner(ctx), p.BktInfo, p.Settings)
|
||||
n.cache.PutSettings(n.BearerOwner(ctx), p.BktInfo, p.Settings)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ type PutObjectTaggingParams struct {
|
|||
|
||||
func (n *layer) GetObjectTagging(ctx context.Context, p *GetObjectTaggingParams) (string, map[string]string, error) {
|
||||
var err error
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
|
||||
if len(p.ObjectVersion.VersionID) != 0 && p.ObjectVersion.VersionID != data.UnversionedObjectVersionID {
|
||||
if tags := n.cache.GetTagging(owner, objectTaggingCacheKey(p.ObjectVersion)); tags != nil {
|
||||
|
@ -82,7 +82,7 @@ func (n *layer) PutObjectTagging(ctx context.Context, p *PutObjectTaggingParams)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
n.cache.PutTagging(n.Owner(ctx), objectTaggingCacheKey(p.ObjectVersion), p.TagSet)
|
||||
n.cache.PutTagging(n.BearerOwner(ctx), objectTaggingCacheKey(p.ObjectVersion), p.TagSet)
|
||||
|
||||
return nodeVersion, nil
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ func (n *layer) DeleteObjectTagging(ctx context.Context, p *ObjectVersion) (*dat
|
|||
}
|
||||
|
||||
func (n *layer) GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) (map[string]string, error) {
|
||||
owner := n.Owner(ctx)
|
||||
owner := n.BearerOwner(ctx)
|
||||
|
||||
if tags := n.cache.GetTagging(owner, bucketTaggingCacheKey(bktInfo.CID)); tags != nil {
|
||||
return tags, nil
|
||||
|
@ -130,7 +130,7 @@ func (n *layer) PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo,
|
|||
return err
|
||||
}
|
||||
|
||||
n.cache.PutTagging(n.Owner(ctx), bucketTaggingCacheKey(bktInfo.CID), tagSet)
|
||||
n.cache.PutTagging(n.BearerOwner(ctx), bucketTaggingCacheKey(bktInfo.CID), tagSet)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ func prepareContext(t *testing.T, cachesConfig ...*CachesConfig) *testContext {
|
|||
GateKey: key.PublicKey(),
|
||||
},
|
||||
})
|
||||
tp := NewTestFrostFS()
|
||||
tp := NewTestFrostFS(key)
|
||||
|
||||
bktName := "testbucket1"
|
||||
bktID, err := tp.CreateContainer(ctx, PrmContainerCreate{
|
||||
|
|
|
@ -139,7 +139,7 @@ func runIssueSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
|
||||
poolCfg := PoolConfig{
|
||||
Key: &key.PrivateKey,
|
||||
Key: key,
|
||||
Address: viper.GetString(peerFlag),
|
||||
DialTimeout: viper.GetDuration(poolDialTimeoutFlag),
|
||||
HealthcheckTimeout: viper.GetDuration(poolHealthcheckTimeoutFlag),
|
||||
|
|
|
@ -68,7 +68,7 @@ func runObtainSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
|
||||
poolCfg := PoolConfig{
|
||||
Key: &key.PrivateKey,
|
||||
Key: key,
|
||||
Address: viper.GetString(peerFlag),
|
||||
DialTimeout: viper.GetDuration(poolDialTimeoutFlag),
|
||||
HealthcheckTimeout: viper.GetDuration(poolHealthcheckTimeoutFlag),
|
||||
|
|
|
@ -81,7 +81,7 @@ func runUpdateSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
|
||||
poolCfg := PoolConfig{
|
||||
Key: &key.PrivateKey,
|
||||
Key: key,
|
||||
Address: viper.GetString(peerFlag),
|
||||
DialTimeout: viper.GetDuration(poolDialTimeoutFlag),
|
||||
HealthcheckTimeout: viper.GetDuration(poolHealthcheckTimeoutFlag),
|
||||
|
|
|
@ -2,7 +2,6 @@ package modules
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -12,13 +11,14 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
type PoolConfig struct {
|
||||
Key *ecdsa.PrivateKey
|
||||
Key *keys.PrivateKey
|
||||
Address string
|
||||
DialTimeout time.Duration
|
||||
HealthcheckTimeout time.Duration
|
||||
|
@ -30,7 +30,7 @@ func createFrostFS(ctx context.Context, log *zap.Logger, cfg PoolConfig) (authma
|
|||
log.Debug("prepare connection pool")
|
||||
|
||||
var prm pool.InitParameters
|
||||
prm.SetKey(cfg.Key)
|
||||
prm.SetKey(&cfg.Key.PrivateKey)
|
||||
prm.SetNodeDialTimeout(cfg.DialTimeout)
|
||||
prm.SetHealthcheckTimeout(cfg.HealthcheckTimeout)
|
||||
prm.SetNodeStreamTimeout(cfg.StreamTimeout)
|
||||
|
@ -47,7 +47,7 @@ func createFrostFS(ctx context.Context, log *zap.Logger, cfg PoolConfig) (authma
|
|||
return nil, fmt.Errorf("dial pool: %w", err)
|
||||
}
|
||||
|
||||
return frostfs.NewAuthmateFrostFS(p), nil
|
||||
return frostfs.NewAuthmateFrostFS(p, cfg.Key), nil
|
||||
}
|
||||
|
||||
func parsePolicies(val string) (authmate.ContainerPolicies, error) {
|
||||
|
|
|
@ -33,6 +33,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
|
@ -96,7 +97,7 @@ func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
|
|||
objPool, treePool, key := getPools(ctx, log.logger, v)
|
||||
|
||||
// prepare auth center
|
||||
ctr := auth.New(frostfs.NewAuthmateFrostFS(objPool), key, v.GetStringSlice(cfgAllowedAccessKeyIDPrefixes), getAccessBoxCacheConfig(v, log.logger))
|
||||
ctr := auth.New(frostfs.NewAuthmateFrostFS(objPool, key), key, v.GetStringSlice(cfgAllowedAccessKeyIDPrefixes), getAccessBoxCacheConfig(v, log.logger))
|
||||
|
||||
app := &App{
|
||||
ctr: ctr,
|
||||
|
@ -133,17 +134,21 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
a.log.Fatal("couldn't generate random key", zap.Error(err))
|
||||
}
|
||||
|
||||
var gateOwner user.ID
|
||||
user.IDFromKey(&gateOwner, a.key.PrivateKey.PublicKey)
|
||||
|
||||
layerCfg := &layer.Config{
|
||||
Caches: getCacheOptions(a.cfg, a.log),
|
||||
AnonKey: layer.AnonymousKey{
|
||||
Key: randomKey,
|
||||
},
|
||||
GateOwner: gateOwner,
|
||||
Resolver: a.bucketResolver,
|
||||
TreeService: tree.NewTree(services.NewPoolWrapper(a.treePool), a.log),
|
||||
}
|
||||
|
||||
// prepare object layer
|
||||
a.obj = layer.NewLayer(a.log, frostfs.NewFrostFS(a.pool), layerCfg)
|
||||
a.obj = layer.NewLayer(a.log, frostfs.NewFrostFS(a.pool, a.key), layerCfg)
|
||||
|
||||
if a.cfg.GetBool(cfgEnableNATS) {
|
||||
nopts := getNotificationsOptions(a.cfg, a.log)
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -29,8 +30,8 @@ type AuthmateFrostFS struct {
|
|||
}
|
||||
|
||||
// NewAuthmateFrostFS creates new AuthmateFrostFS using provided pool.Pool.
|
||||
func NewAuthmateFrostFS(p *pool.Pool) *AuthmateFrostFS {
|
||||
return &AuthmateFrostFS{frostFS: NewFrostFS(p)}
|
||||
func NewAuthmateFrostFS(p *pool.Pool, key *keys.PrivateKey) *AuthmateFrostFS {
|
||||
return &AuthmateFrostFS{frostFS: NewFrostFS(p, key)}
|
||||
}
|
||||
|
||||
// ContainerExists implements authmate.FrostFS interface method.
|
||||
|
@ -116,7 +117,6 @@ func (x *AuthmateFrostFS) CreateObject(ctx context.Context, prm tokens.PrmObject
|
|||
}
|
||||
|
||||
return x.frostFS.CreateObject(ctx, layer.PrmObjectCreate{
|
||||
Creator: prm.Creator,
|
||||
Container: prm.Container,
|
||||
Filepath: prm.Filepath,
|
||||
Attributes: attributes,
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
errorsFrost "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
|
@ -21,6 +22,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
)
|
||||
|
||||
// FrostFS represents virtual connection to the FrostFS network.
|
||||
|
@ -29,6 +31,7 @@ import (
|
|||
type FrostFS struct {
|
||||
pool *pool.Pool
|
||||
await pool.WaitParams
|
||||
owner user.ID
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -37,14 +40,18 @@ const (
|
|||
)
|
||||
|
||||
// NewFrostFS creates new FrostFS using provided pool.Pool.
|
||||
func NewFrostFS(p *pool.Pool) *FrostFS {
|
||||
func NewFrostFS(p *pool.Pool, key *keys.PrivateKey) *FrostFS {
|
||||
var await pool.WaitParams
|
||||
await.SetPollInterval(defaultPollInterval)
|
||||
await.SetTimeout(defaultPollTimeout)
|
||||
|
||||
var owner user.ID
|
||||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
||||
|
||||
return &FrostFS{
|
||||
pool: p,
|
||||
await: await,
|
||||
owner: owner,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -136,12 +143,12 @@ func (x *FrostFS) CreateContainer(ctx context.Context, prm layer.PrmContainerCre
|
|||
return cid.ID{}, handleObjectError("sync container with the network state", err)
|
||||
}
|
||||
|
||||
var prmPut pool.PrmContainerPut
|
||||
prmPut.SetContainer(cnr)
|
||||
prmPut.SetWaitParams(x.await)
|
||||
|
||||
if prm.SessionToken != nil {
|
||||
prmPut.WithinSession(*prm.SessionToken)
|
||||
prmPut := pool.PrmContainerPut{
|
||||
ClientParams: client.PrmContainerPut{
|
||||
Container: &cnr,
|
||||
Session: prm.SessionToken,
|
||||
},
|
||||
WaitParams: &x.await,
|
||||
}
|
||||
|
||||
// send request to save the container
|
||||
|
@ -237,7 +244,7 @@ func (x *FrostFS) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (
|
|||
|
||||
obj := object.New()
|
||||
obj.SetContainerID(prm.Container)
|
||||
obj.SetOwnerID(&prm.Creator)
|
||||
obj.SetOwnerID(&x.owner)
|
||||
obj.SetAttributes(attrs...)
|
||||
obj.SetPayloadSize(prm.PayloadSize)
|
||||
|
||||
|
|
Loading…
Reference in a new issue