Compare commits

...

20 commits

Author SHA1 Message Date
3bcc31696b [#1261] shard: Fix delete objects from FSTree
Replace nil storageID with empty like by shard.Get.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-07-23 17:47:32 +03:00
845d8be672 [#9999] blobovnicza: Drop leaf limit
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-07-23 12:27:19 +03:00
498f9955ea [#1089] control: Add USER and GROUP targets for local override storage
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-04-12 17:35:50 +03:00
f8973f9b05 [#1089] control: Format proto files with clang-format
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-04-12 17:35:50 +03:00
50ec4febcc [#1089] ape: Provide request actor as an additional target
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-04-12 17:35:50 +03:00
59d7a6940d [#1090] tree: Make workaround for APE checks
* Make `verifyClient` method perform APE check if a container
  was created with zero-filled basic ACL.
* Object verbs are used in APE, until tree verbs are introduced.

Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-04-12 12:02:28 +03:00
2ecd427df4 [#1090] ape: Move ape request and resource implementations to common package
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-04-12 12:02:23 +03:00
573ca6d0d5 [#1090] go.mod: Update policy-engine version
Signed-off-by: Airat Arifullin <a.arifullin@yadro.com>
2024-04-12 12:02:18 +03:00
1eb47ab2ce [#1080] metabase: Add StorageID metric
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-04-10 10:00:58 +03:00
954881f1ef [#1080] metabase: Open bucket for container counter once
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-04-10 10:00:58 +03:00
7809928b64 [#1080] ape: Use value for APE request
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-04-09 18:55:27 +03:00
4b902be81e [#1080] ape: Do not read object headers before Head/Get
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2024-04-09 18:55:10 +03:00
161d33c2b7 [#1062] object: Fix buffer allocation for PayloadRange
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-04-09 13:10:41 +03:00
1a7c3db67f [#1077] objectsvc: Fix possible panic in GetRange()
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-04-05 16:13:09 +03:00
c1d90f018b [#1074] pilorama: Allow empty filenames in SortedByFilename()
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-04-04 10:27:45 +00:00
064e18b277 [#1074] pilorama: Remove debug print in tests
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2024-04-04 10:27:45 +00:00
21caa904f4 [#1072] Fix issue from govulncheck
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-04-04 11:37:53 +03:00
f74d058c2e [#1072] node, ir, morph: Set scope None when in upgrade mode
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-04-04 11:24:45 +03:00
c8ce6e9fe4 [#1072] node, ir: Add new config option kludge_compatibility_mode
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-04-04 11:24:37 +03:00
748da78dc7 [#1072] Fix gofumpt issues
Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
2024-04-04 11:24:27 +03:00
54 changed files with 1218 additions and 794 deletions

View file

@ -34,6 +34,7 @@ func reloadConfig() error {
if err != nil {
return err
}
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
err = logPrm.SetLevelString(cfg.GetString("logger.level"))
if err != nil {
return err

View file

@ -43,6 +43,8 @@ func defaultConfiguration(cfg *viper.Viper) {
setControlDefaults(cfg)
cfg.SetDefault("governance.disable", false)
cfg.SetDefault("node.kludge_compatibility_mode", false)
}
func setControlDefaults(cfg *viper.Viper) {

View file

@ -6,6 +6,7 @@ import (
"fmt"
"os"
"sync"
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
@ -37,6 +38,7 @@ var (
cfg *viper.Viper
configFile *string
configDir *string
cmode = &atomic.Bool{}
)
func exitErr(err error) {
@ -62,6 +64,8 @@ func main() {
cfg, err = newConfig()
exitErr(err)
cmode.Store(cfg.GetBool("node.kludge_compatibility_mode"))
metrics := irMetrics.NewInnerRingMetrics()
err = logPrm.SetLevelString(
@ -84,7 +88,7 @@ func main() {
metricsCmp = newMetricsComponent()
metricsCmp.init()
innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics)
innerRing, err = innerring.New(ctx, log, cfg, intErr, metrics, cmode)
exitErr(err)
pprofCmp.start()

View file

@ -109,6 +109,9 @@ type applicationConfiguration struct {
lowMem bool
rebuildWorkers uint32
}
// if need to run node in compatibility with other versions mode
cmode *atomic.Bool
}
type shardCfg struct {
@ -182,11 +185,11 @@ type subStorageCfg struct {
// blobovnicza-specific
size uint64
width uint64
leafWidth uint64
openedCacheSize int
initWorkerCount int
initInAdvance bool
rebuildDropTimeout time.Duration
openedCacheTTL time.Duration
openedCacheExpInterval time.Duration
}
// readConfig fills applicationConfiguration with raw configuration values
@ -204,10 +207,13 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
}
// clear if it is rereading
cmode := a.cmode
*a = applicationConfiguration{}
a.cmode = cmode
}
a._read = true
a.cmode.Store(nodeconfig.CompatibilityMode(c))
// Logger
@ -302,10 +308,8 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol
sCfg.size = sub.Size()
sCfg.depth = sub.ShallowDepth()
sCfg.width = sub.ShallowWidth()
sCfg.leafWidth = sub.LeafWidth()
sCfg.openedCacheSize = sub.OpenedCacheSize()
sCfg.initWorkerCount = sub.InitWorkerCount()
sCfg.initInAdvance = sub.InitInAdvance()
sCfg.rebuildDropTimeout = sub.RebuildDropTimeout()
case fstree.Type:
sub := fstreeconfig.From((*config.Config)(storagesCfg[i]))
@ -648,7 +652,11 @@ type cfgControlService struct {
var persistateSideChainLastBlockKey = []byte("side_chain_last_processed_block")
func initCfg(appCfg *config.Config) *cfg {
c := &cfg{}
c := &cfg{
applicationConfiguration: applicationConfiguration{
cmode: &atomic.Bool{},
},
}
err := c.readConfig(appCfg)
if err != nil {
@ -887,10 +895,8 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
blobovniczatree.WithBlobovniczaSize(sRead.size),
blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth),
blobovniczatree.WithBlobovniczaShallowWidth(sRead.width),
blobovniczatree.WithBlobovniczaLeafWidth(sRead.leafWidth),
blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize),
blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount),
blobovniczatree.WithInitInAdvance(sRead.initInAdvance),
blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout),
blobovniczatree.WithLogger(c.log),
blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit),

View file

@ -98,7 +98,6 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 1, blz.ShallowDepth())
require.EqualValues(t, 4, blz.ShallowWidth())
require.EqualValues(t, 50, blz.OpenedCacheSize())
require.EqualValues(t, 10, blz.LeafWidth())
require.EqualValues(t, 10, blz.InitWorkerCount())
require.EqualValues(t, true, blz.InitInAdvance())
require.EqualValues(t, 30*time.Second, blz.RebuildDropTimeout())
@ -150,7 +149,6 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 1, blz.ShallowDepth())
require.EqualValues(t, 4, blz.ShallowWidth())
require.EqualValues(t, 50, blz.OpenedCacheSize())
require.EqualValues(t, 10, blz.LeafWidth())
require.EqualValues(t, blobovniczaconfig.InitWorkerCountDefault, blz.InitWorkerCount())
require.EqualValues(t, blobovniczaconfig.RebuildDropTimeoutDefault, blz.RebuildDropTimeout())

View file

@ -111,16 +111,6 @@ func (x *Config) BoltDB() *boltdbconfig.Config {
return (*boltdbconfig.Config)(x)
}
// LeafWidth returns the value of "leaf_width" config parameter.
//
// Returns 0 if the value is not a positive number.
func (x *Config) LeafWidth() uint64 {
return config.UintSafe(
(*config.Config)(x),
"leaf_width",
)
}
// InitWorkerCount returns the value of "init_worker_count" config parameter.
//
// Returns InitWorkerCountDefault if the value is not a positive number.

View file

@ -292,3 +292,8 @@ func (l PersistentPolicyRulesConfig) Perm() fs.FileMode {
func (l PersistentPolicyRulesConfig) NoSync() bool {
return config.BoolSafe((*config.Config)(l.cfg), "no_sync")
}
// CompatibilityMode returns true if need to run node in compatibility with previous versions mode.
func CompatibilityMode(c *config.Config) bool {
return config.BoolSafe(c.Sub(subsection), "kludge_compatibility_mode")
}

View file

@ -48,6 +48,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
}),
client.WithSwitchInterval(morphconfig.SwitchInterval(c.appCfg)),
client.WithMorphCacheMetrics(c.metricsCollector.MorphCacheMetrics()),
client.WithCompatibilityMode(c.cmode),
)
if err != nil {
c.log.Info(logs.FrostFSNodeFailedToCreateNeoRPCClient,

View file

@ -63,7 +63,9 @@ func initTreeService(c *cfg) {
tree.WithReplicationChannelCapacity(treeConfig.ReplicationChannelCapacity()),
tree.WithReplicationWorkerCount(treeConfig.ReplicationWorkerCount()),
tree.WithAuthorizedKeys(treeConfig.AuthorizedKeys()),
tree.WithMetrics(c.metricsCollector.TreeService()))
tree.WithMetrics(c.metricsCollector.TreeService()),
tree.WithAPERouter(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine),
)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
tree.RegisterTreeServiceServer(s, c.treeService)

View file

@ -127,7 +127,6 @@ FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_SIZE=4194304
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH=1
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH=4
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_LEAF_WIDTH=10
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_INIT_WORKER_COUNT=10
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_INIT_IN_ADVANCE=TRUE
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_REBUILD_DROP_TIMEOUT=30s
@ -177,7 +176,6 @@ FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_SIZE=4194304
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH=1
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH=4
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_LEAF_WIDTH=10
### FSTree config
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE=fstree
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH=tmp/1/blob

View file

@ -175,7 +175,6 @@
"depth": 1,
"width": 4,
"opened_cache_capacity": 50,
"leaf_width": 10,
"init_worker_count": 10,
"init_in_advance": true,
"rebuild_drop_timeout": "30s"
@ -227,8 +226,7 @@
"size": 4194304,
"depth": 1,
"width": 4,
"opened_cache_capacity": 50,
"leaf_width": 10
"opened_cache_capacity": 50
},
{
"type": "fstree",

View file

@ -151,7 +151,6 @@ storage:
depth: 1 # max depth of object tree storage in key-value DB
width: 4 # max width of object tree storage in key-value DB
opened_cache_capacity: 50 # maximum number of opened database files
leaf_width: 10 # max count of key-value DB on leafs of object tree storage
- perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 5 # max depth of object tree storage in FS

12
go.mod
View file

@ -9,12 +9,11 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240301150205-6fe4e2541d0b
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240307151106-2ec958cbfdfd
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240412130734-0e69e485115a
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/cheggaaa/pb v1.0.29
github.com/chzyer/readline v1.5.1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
github.com/google/uuid v1.6.0
github.com/hashicorp/golang-lru/v2 v2.0.7
@ -40,8 +39,8 @@ require (
go.uber.org/zap v1.26.0
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
golang.org/x/sync v0.6.0
golang.org/x/sys v0.16.0
golang.org/x/term v0.16.0
golang.org/x/sys v0.18.0
golang.org/x/term v0.18.0
google.golang.org/grpc v1.61.0
google.golang.org/protobuf v1.33.0
gopkg.in/yaml.v3 v3.0.1
@ -65,6 +64,7 @@ require (
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/davidmz/go-pageant v1.0.2 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
@ -121,8 +121,8 @@ require (
go.opentelemetry.io/otel/sdk v1.22.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.18.0 // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/text v0.14.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect

BIN
go.sum

Binary file not shown.

View file

@ -0,0 +1,44 @@
package converter
import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
)
func SchemaRoleFromACLRole(role acl.Role) (string, error) {
switch role {
case acl.RoleOwner:
return nativeschema.PropertyValueContainerRoleOwner, nil
case acl.RoleContainer:
return nativeschema.PropertyValueContainerRoleContainer, nil
case acl.RoleInnerRing:
return nativeschema.PropertyValueContainerRoleIR, nil
case acl.RoleOthers:
return nativeschema.PropertyValueContainerRoleOthers, nil
default:
return "", fmt.Errorf("failed to convert %s", role.String())
}
}
func SchemaMethodFromACLOperation(op acl.Op) (string, error) {
switch op {
case acl.OpObjectGet:
return nativeschema.MethodGetObject, nil
case acl.OpObjectHead:
return nativeschema.MethodHeadObject, nil
case acl.OpObjectPut:
return nativeschema.MethodPutObject, nil
case acl.OpObjectDelete:
return nativeschema.MethodDeleteObject, nil
case acl.OpObjectSearch:
return nativeschema.MethodSearchObject, nil
case acl.OpObjectRange:
return nativeschema.MethodRangeObject, nil
case acl.OpObjectHash:
return nativeschema.MethodHashObject, nil
default:
return "", fmt.Errorf("operation cannot be converted: %d", op)
}
}

View file

@ -0,0 +1,55 @@
package ape
import (
aperesource "git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource"
)
type Request struct {
operation string
resource Resource
properties map[string]string
}
func NewRequest(operation string, resource Resource, properties map[string]string) Request {
return Request{
operation: operation,
resource: resource,
properties: properties,
}
}
var _ aperesource.Request = Request{}
func (r Request) Operation() string {
return r.operation
}
func (r Request) Property(key string) string {
return r.properties[key]
}
func (r Request) Resource() aperesource.Resource {
return r.resource
}
type Resource struct {
name string
properties map[string]string
}
var _ aperesource.Resource = Resource{}
func NewResource(name string, properties map[string]string) Resource {
return Resource{
name: name,
properties: properties,
}
}
func (r Resource) Name() string {
return r.name
}
func (r Resource) Property(key string) string {
return r.properties[key]
}

View file

@ -462,6 +462,7 @@ func (s *Server) initMorph(ctx context.Context, cfg *viper.Viper, errChan chan<-
name: morphPrefix,
from: fromSideChainBlock,
morphCacheMetric: s.irMetrics.MorphCacheMetrics(),
cmode: s.cmode,
}
// create morph client

View file

@ -103,6 +103,8 @@ type (
// should report start errors
// to the application.
runners []func(chan<- error) error
cmode *atomic.Bool
}
chainParams struct {
@ -113,6 +115,7 @@ type (
sgn *transaction.Signer
from uint32 // block height
morphCacheMetric metrics.MorphCacheMetrics
cmode *atomic.Bool
}
)
@ -330,12 +333,13 @@ func (s *Server) registerStarter(f func() error) {
// New creates instance of inner ring sever structure.
func New(ctx context.Context, log *logger.Logger, cfg *viper.Viper, errChan chan<- error,
metrics *metrics.InnerRingServiceMetrics,
metrics *metrics.InnerRingServiceMetrics, cmode *atomic.Bool,
) (*Server, error) {
var err error
server := &Server{
log: log,
irMetrics: metrics,
cmode: cmode,
}
server.sdNotify, err = server.initSdNotify(cfg)
@ -485,6 +489,7 @@ func createClient(ctx context.Context, p *chainParams, errChan chan<- error) (*c
}),
client.WithSwitchInterval(p.cfg.GetDuration(p.name+".switch_interval")),
client.WithMorphCacheMetrics(p.morphCacheMetric),
client.WithCompatibilityMode(p.cmode),
)
}

View file

@ -37,17 +37,17 @@ type activeDBManager struct {
closed bool
dbManager *dbManager
leafWidth uint64
rootPath string
}
func newActiveDBManager(dbManager *dbManager, leafWidth uint64) *activeDBManager {
func newActiveDBManager(dbManager *dbManager, rootPath string) *activeDBManager {
return &activeDBManager{
levelToActiveDBGuard: &sync.RWMutex{},
levelToActiveDB: make(map[string]*sharedDB),
levelLock: utilSync.NewKeyLocker[string](),
dbManager: dbManager,
leafWidth: leafWidth,
rootPath: rootPath,
}
}
@ -144,30 +144,25 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error)
}
func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) {
var idx uint64
var iterCount uint64
var nextActiveDBIdx uint64
hasActive, currentIdx := m.hasActiveDB(lvlPath)
if hasActive {
idx = (currentIdx + 1) % m.leafWidth
}
var next *sharedDB
for iterCount < m.leafWidth {
path := filepath.Join(lvlPath, u64ToHexStringExt(idx))
shDB := m.dbManager.GetByPath(path)
db, err := shDB.Open() // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
nextActiveDBIdx = currentIdx + 1
} else {
hasDBs, maxIdx, err := getBlobovniczaMaxIndex(filepath.Join(m.rootPath, lvlPath))
if err != nil {
return nil, err
}
if db.IsFull() {
shDB.Close()
} else {
next = shDB
break
if hasDBs {
nextActiveDBIdx = maxIdx
}
idx = (idx + 1) % m.leafWidth
iterCount++
}
path := filepath.Join(lvlPath, u64ToHexStringExt(nextActiveDBIdx))
next := m.dbManager.GetByPath(path)
_, err := next.Open() // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
if err != nil {
return nil, err
}
previous, updated := m.replace(lvlPath, next)

View file

@ -2,6 +2,7 @@ package blobovniczatree
import (
"errors"
"os"
"strconv"
"strings"
"sync"
@ -80,12 +81,8 @@ func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) {
opts[i](&blz.cfg)
}
if blz.blzLeafWidth == 0 {
blz.blzLeafWidth = blz.blzShallowWidth
}
blz.commondbManager = newDBManager(blz.rootPath, blz.blzOpts, blz.readOnly, blz.metrics.Blobovnicza(), blz.log)
blz.activeDBManager = newActiveDBManager(blz.commondbManager, blz.blzLeafWidth)
blz.activeDBManager = newActiveDBManager(blz.commondbManager, blz.rootPath)
blz.dbCache = newDBCache(blz.openedCacheSize, blz.commondbManager)
blz.deleteProtectedObjects = newAddressMap()
blz.dbFilesGuard = &sync.RWMutex{}
@ -122,6 +119,32 @@ func u64FromHexString(str string) uint64 {
return v
}
func getBlobovniczaMaxIndex(directory string) (bool, uint64, error) {
entries, err := os.ReadDir(directory)
if os.IsNotExist(err) { // non initialized tree
return false, 0, nil
}
if err != nil {
return false, 0, err
}
if len(entries) == 0 {
return false, 0, nil
}
var hasDBs bool
var maxIdx uint64
for _, e := range entries {
if e.IsDir() {
continue
}
hasDBs = true
current := u64FromHexString(e.Name())
if current > maxIdx {
maxIdx = current
}
}
return hasDBs, maxIdx, nil
}
// Type is blobovniczatree storage type used in logs and configuration.
const Type = "blobovnicza"

View file

@ -130,7 +130,14 @@ func (b *Blobovniczas) iterateSorted(ctx context.Context, addr *oid.Address, cur
isLeafLevel := uint64(len(curPath)) == b.blzShallowDepth
levelWidth := b.blzShallowWidth
if isLeafLevel {
levelWidth = b.blzLeafWidth
hasDBs, maxIdx, err := getBlobovniczaMaxIndex(filepath.Join(append([]string{b.rootPath}, curPath...)...))
if err != nil {
return false, err
}
levelWidth = 0
if hasDBs {
levelWidth = maxIdx + 1
}
}
indices := indexSlice(levelWidth)

View file

@ -18,7 +18,6 @@ type cfg struct {
openedCacheSize int
blzShallowDepth uint64
blzShallowWidth uint64
blzLeafWidth uint64
compression *compression.Config
blzOpts []blobovnicza.Option
reportError func(string, error) // reportError is the function called when encountering disk errors.
@ -75,12 +74,6 @@ func WithBlobovniczaShallowWidth(width uint64) Option {
}
}
func WithBlobovniczaLeafWidth(w uint64) Option {
return func(c *cfg) {
c.blzLeafWidth = w
}
}
func WithBlobovniczaShallowDepth(depth uint64) Option {
return func(c *cfg) {
c.blzShallowDepth = depth

View file

@ -210,7 +210,7 @@ func (e *StorageEngine) TreeGetChildren(ctx context.Context, cid cidSDK.ID, tree
}
// TreeSortedByFilename implements the pilorama.Forest interface.
func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node, last string, count int) ([]pilorama.NodeInfo, string, error) {
func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node, last *string, count int) ([]pilorama.NodeInfo, *string, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.TreeSortedByFilename",
trace.WithAttributes(
attribute.String("container_id", cid.EncodeToString()),
@ -222,7 +222,7 @@ func (e *StorageEngine) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID,
var err error
var nodes []pilorama.NodeInfo
var cursor string
var cursor *string
for _, sh := range e.sortShards(cid) {
nodes, cursor, err = sh.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
if err != nil {

View file

@ -232,14 +232,19 @@ func (db *DB) ContainerCount(ctx context.Context, id cid.ID) (ObjectCounters, er
}
func (db *DB) incCounters(tx *bbolt.Tx, cnrID cid.ID, isUserObject bool) error {
if err := db.updateShardObjectCounter(tx, phy, 1, true); err != nil {
b := tx.Bucket(shardInfoBucket)
if b == nil {
return db.incContainerObjectCounter(tx, cnrID, isUserObject)
}
if err := db.updateShardObjectCounterBucket(b, phy, 1, true); err != nil {
return fmt.Errorf("could not increase phy object counter: %w", err)
}
if err := db.updateShardObjectCounter(tx, logical, 1, true); err != nil {
if err := db.updateShardObjectCounterBucket(b, logical, 1, true); err != nil {
return fmt.Errorf("could not increase logical object counter: %w", err)
}
if isUserObject {
if err := db.updateShardObjectCounter(tx, user, 1, true); err != nil {
if err := db.updateShardObjectCounterBucket(b, user, 1, true); err != nil {
return fmt.Errorf("could not increase user object counter: %w", err)
}
}
@ -252,6 +257,10 @@ func (db *DB) updateShardObjectCounter(tx *bbolt.Tx, typ objectType, delta uint6
return nil
}
return db.updateShardObjectCounterBucket(b, typ, delta, inc)
}
func (*DB) updateShardObjectCounterBucket(b *bbolt.Bucket, typ objectType, delta uint64, inc bool) error {
var counter uint64
var counterKey []byte

View file

@ -36,6 +36,14 @@ func (r StorageIDRes) StorageID() []byte {
// StorageID returns storage descriptor for objects from the blobstor.
// It is put together with the object can makes get/delete operation faster.
func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes, err error) {
var (
startedAt = time.Now()
success = false
)
defer func() {
db.metrics.AddMethodDuration("StorageID", time.Since(startedAt), success)
}()
_, span := tracing.StartSpanFromContext(ctx, "metabase.StorageID",
trace.WithAttributes(
attribute.String("address", prm.addr.EncodeToString()),
@ -54,7 +62,7 @@ func (db *DB) StorageID(ctx context.Context, prm StorageIDPrm) (res StorageIDRes
return err
})
success = err == nil
return res, metaerr.Wrap(err)
}

View file

@ -1003,7 +1003,7 @@ func (t *boltForest) hasFewChildren(b *bbolt.Bucket, nodeID Node, threshold int)
}
// TreeSortedByFilename implements the Forest interface.
func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node, last string, count int) ([]NodeInfo, string, error) {
func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node, last *string, count int) ([]NodeInfo, *string, error) {
var (
startedAt = time.Now()
success = false
@ -1025,7 +1025,7 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr
defer t.modeMtx.RUnlock()
if t.mode.NoMetabase() {
return nil, "", ErrDegradedMode
return nil, last, ErrDegradedMode
}
h := newHeap(last, count)
@ -1069,20 +1069,25 @@ func (t *boltForest) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, tr
}
if fewChildren {
result = sortAndCut(result, []byte(last))
result = sortAndCut(result, last)
}
if len(result) != 0 {
last = string(result[len(result)-1].Meta.GetAttr(AttributeFilename))
s := string(result[len(result)-1].Meta.GetAttr(AttributeFilename))
last = &s
}
return result, last, metaerr.Wrap(err)
}
func sortAndCut(result []NodeInfo, last []byte) []NodeInfo {
func sortAndCut(result []NodeInfo, last *string) []NodeInfo {
var lastBytes []byte
if last != nil {
lastBytes = []byte(*last)
}
sort.Slice(result, func(i, j int) bool {
return bytes.Compare(result[i].Meta.GetAttr(AttributeFilename), result[j].Meta.GetAttr(AttributeFilename)) == -1
})
for i := range result {
if bytes.Compare(last, result[i].Meta.GetAttr(AttributeFilename)) == -1 {
if lastBytes == nil || bytes.Compare(lastBytes, result[i].Meta.GetAttr(AttributeFilename)) == -1 {
return result[i:]
}
}

View file

@ -156,11 +156,11 @@ func (f *memoryForest) TreeGetMeta(_ context.Context, cid cid.ID, treeID string,
}
// TreeSortedByFilename implements the Forest interface.
func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeID Node, start string, count int) ([]NodeInfo, string, error) {
func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeID string, nodeID Node, start *string, count int) ([]NodeInfo, *string, error) {
fullID := cid.String() + "/" + treeID
s, ok := f.treeMap[fullID]
if !ok {
return nil, "", ErrTreeNotFound
return nil, start, ErrTreeNotFound
}
if count == 0 {
return nil, start, nil
@ -169,7 +169,14 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
children := s.tree.getChildren(nodeID)
res := make([]NodeInfo, 0, len(children))
for _, childID := range children {
if len(s.infoMap[childID].Meta.GetAttr(AttributeFilename)) == 0 {
var found bool
for _, kv := range s.infoMap[childID].Meta.Items {
if kv.Key == AttributeFilename {
found = true
break
}
}
if !found {
continue
}
res = append(res, NodeInfo{
@ -179,22 +186,24 @@ func (f *memoryForest) TreeSortedByFilename(_ context.Context, cid cid.ID, treeI
})
}
if len(res) == 0 {
return res, "", nil
return res, start, nil
}
sort.Slice(res, func(i, j int) bool {
return bytes.Compare(res[i].Meta.GetAttr(AttributeFilename), res[j].Meta.GetAttr(AttributeFilename)) == -1
})
for i := range res {
if string(res[i].Meta.GetAttr(AttributeFilename)) > start {
if start == nil || string(res[i].Meta.GetAttr(AttributeFilename)) > *start {
finish := i + count
if len(res) < finish {
finish = len(res)
}
return res[i:finish], string(res[finish-1].Meta.GetAttr(AttributeFilename)), nil
last := string(res[finish-1].Meta.GetAttr(AttributeFilename))
return res[i:finish], &last, nil
}
}
return nil, string(res[len(res)-1].Meta.GetAttr(AttributeFilename)), nil
last := string(res[len(res)-1].Meta.GetAttr(AttributeFilename))
return nil, &last, nil
}
// TreeGetChildren implements the Forest interface.

View file

@ -16,7 +16,6 @@ import (
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/davecgh/go-spew/spew"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
@ -216,7 +215,7 @@ func BenchmarkForestSortedIteration(b *testing.B) {
b.Run(providers[i].name+",root", func(b *testing.B) {
for i := 0; i < b.N; i++ {
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, RootID, "", 100)
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, RootID, nil, 100)
if err != nil || len(res) != 100 {
b.Fatalf("err %v, count %d", err, len(res))
}
@ -224,7 +223,7 @@ func BenchmarkForestSortedIteration(b *testing.B) {
})
b.Run(providers[i].name+",leaf", func(b *testing.B) {
for i := 0; i < b.N; i++ {
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, 1, "", 100)
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, 1, nil, 100)
if err != nil || len(res) != 0 {
b.FailNow()
}
@ -247,14 +246,14 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1}
treeID := "version"
treeAdd := func(t *testing.T, ts int) {
treeAdd := func(t *testing.T, ts int, filename string) {
_, err := s.TreeMove(context.Background(), d, treeID, &Move{
Child: RootID + uint64(ts),
Parent: RootID,
Meta: Meta{
Time: Timestamp(ts),
Items: []KeyValue{
{Key: AttributeFilename, Value: []byte(strconv.Itoa(ts))},
{Key: AttributeFilename, Value: []byte(filename)},
},
},
})
@ -262,20 +261,20 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
}
const count = 9
for i := 0; i < count; i++ {
treeAdd(t, i+1)
treeAdd(t, 1, "")
for i := 1; i < count; i++ {
treeAdd(t, i+1, strconv.Itoa(i+1))
}
var result []NodeInfo
treeAppend := func(t *testing.T, last string, count int) string {
treeAppend := func(t *testing.T, last *string, count int) *string {
res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, RootID, last, count)
require.NoError(t, err)
result = append(result, res...)
spew.Dump(last, res)
return cursor
}
last := treeAppend(t, "", 2)
last := treeAppend(t, nil, 2)
last = treeAppend(t, last, 3)
last = treeAppend(t, last, 0)
last = treeAppend(t, last, 1)
@ -284,9 +283,13 @@ func testForestTreeSortedIteration(t *testing.T, s ForestStorage) {
require.Len(t, result, count)
for i := range result {
require.Equal(t, RootID+uint64(i+1), result[i].ID)
if i == 0 {
require.Equal(t, "", string(result[i].Meta.GetAttr(AttributeFilename)))
} else {
require.Equal(t, strconv.Itoa(RootID+i+1), string(result[i].Meta.GetAttr(AttributeFilename)))
}
}
}
func TestForest_TreeSortedFilename(t *testing.T) {
for i := range providers {
@ -343,7 +346,7 @@ func testForestTreeSortedByFilename(t *testing.T, s ForestStorage) {
}
getChildren := func(t *testing.T, id Node) []NodeInfo {
res, _, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, id, "", len(items))
res, _, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, id, nil, len(items))
require.NoError(t, err)
return res
}

View file

@ -17,6 +17,7 @@ func (h filenameHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *filenameHeap) Push(x any) {
*h = append(*h, x.(heapInfo))
}
func (h *filenameHeap) Pop() any {
old := *h
n := len(old)
@ -27,13 +28,13 @@ func (h *filenameHeap) Pop() any {
// fixedHeap maintains a fixed number of smallest elements started at some point.
type fixedHeap struct {
start string
start *string
max string
count int
h *filenameHeap
}
func newHeap(start string, count int) *fixedHeap {
func newHeap(start *string, count int) *fixedHeap {
h := new(filenameHeap)
heap.Init(h)
@ -46,7 +47,7 @@ func newHeap(start string, count int) *fixedHeap {
}
func (h *fixedHeap) push(id Node, filename string) bool {
if filename == "" || filename <= h.start {
if h.start != nil && filename <= *h.start {
return false
}
heap.Push(h.h, heapInfo{id: id, filename: filename})

View file

@ -35,7 +35,7 @@ type Forest interface {
TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node) ([]NodeInfo, error)
// TreeSortedByFilename returns children of the node with the specified ID. The nodes are sorted by the filename attribute..
// Should return ErrTreeNotFound if the tree is not found, and empty result if the node is not in the tree.
TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node, last string, count int) ([]NodeInfo, string, error)
TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID Node, last *string, count int) ([]NodeInfo, *string, error)
// TreeGetOpLog returns first log operation stored at or above the height.
// In case no such operation is found, empty Move and nil error should be returned.
TreeGetOpLog(ctx context.Context, cid cidSDK.ID, treeID string, height uint64) (Move, error)

View file

@ -117,6 +117,12 @@ func (s *Shard) deleteFromBlobstor(ctx context.Context, addr oid.Address) error
return err
}
storageID := res.StorageID()
if storageID == nil {
// if storageID is nil it means:
// 1. there is no such object
// 2. object stored by writecache: should not happen, as `validateWritecacheDoesntContainObject` called before `deleteFromBlobstor`
return nil
}
var delPrm common.DeletePrm
delPrm.Address = addr

View file

@ -184,7 +184,7 @@ func (s *Shard) TreeGetChildren(ctx context.Context, cid cidSDK.ID, treeID strin
}
// TreeSortedByFilename implements the pilorama.Forest interface.
func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node, last string, count int) ([]pilorama.NodeInfo, string, error) {
func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID string, nodeID pilorama.Node, last *string, count int) ([]pilorama.NodeInfo, *string, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "Shard.TreeSortedByFilename",
trace.WithAttributes(
attribute.String("shard_id", s.ID().String()),
@ -196,14 +196,14 @@ func (s *Shard) TreeSortedByFilename(ctx context.Context, cid cidSDK.ID, treeID
defer span.End()
if s.pilorama == nil {
return nil, "", ErrPiloramaDisabled
return nil, last, ErrPiloramaDisabled
}
s.m.RLock()
defer s.m.RUnlock()
if s.info.Mode.NoMetabase() {
return nil, "", ErrDegradedMode
return nil, last, ErrDegradedMode
}
return s.pilorama.TreeSortedByFilename(ctx, cid, treeID, nodeID, last, count)
}

View file

@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@ -48,6 +49,8 @@ type cfg struct {
switchInterval time.Duration
morphCacheMetrics metrics.MorphCacheMetrics
cmode *atomic.Bool
}
const (
@ -311,3 +314,11 @@ func WithMorphCacheMetrics(morphCacheMetrics metrics.MorphCacheMetrics) Option {
c.morphCacheMetrics = morphCacheMetrics
}
}
// WithCompatibilityMode indicates that Client is working in compatibility mode
// in this mode we need to keep backward compatibility with services with previous version.
func WithCompatibilityMode(cmode *atomic.Bool) Option {
return func(c *cfg) {
c.cmode = cmode
}
}

View file

@ -566,14 +566,19 @@ func (c *Client) notaryCosigners(invokedByAlpha bool, ir []*keys.PublicKey, comm
}
s := make([]actor.SignerAccount, 2, 3)
// Proxy contract that will pay for the execution.
s[0] = actor.SignerAccount{
Signer: transaction.Signer{
Account: c.notary.proxy,
// Do not change this:
// We must be able to call NNS contract indirectly from the Container contract.
// Thus, CalledByEntry is not sufficient.
// In future we may restrict this to all the usecases we have.
Scopes: transaction.Global,
scopes := transaction.Global
if c.cfg.cmode != nil && c.cfg.cmode.Load() {
// Set it to None to keep ability to send notary requests during upgrade
scopes = transaction.None
}
s[0] = actor.SignerAccount{
Signer: transaction.Signer{
Account: c.notary.proxy,
Scopes: scopes,
},
Account: notary.FakeContractAccount(c.notary.proxy),
}

View file

@ -15,6 +15,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
session "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@ -26,7 +27,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
@ -148,18 +148,21 @@ func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*co
return nil, err
}
request := &apeRequest{
resource: &apeResource{
name: resourceName(namespace, ""),
props: make(map[string]string),
},
op: nativeschema.MethodListContainers,
props: reqProps,
}
request := aperequest.NewRequest(
nativeschema.MethodListContainers,
aperequest.NewResource(
resourceName(namespace, ""),
make(map[string]string),
),
reqProps,
)
s, found, err := ac.router.IsAllowed(apechain.Ingress,
policyengine.NewRequestTargetWithNamespace(namespace),
request)
rt := policyengine.NewRequestTargetWithNamespace(namespace)
rt.User = &policyengine.Target{
Type: policyengine.User,
Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
}
s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
if err != nil {
return nil, err
}
@ -193,18 +196,21 @@ func (ac *apeChecker) Put(ctx context.Context, req *container.PutRequest) (*cont
return nil, err
}
request := &apeRequest{
resource: &apeResource{
name: resourceName(namespace, ""),
props: make(map[string]string),
},
op: nativeschema.MethodPutContainer,
props: reqProps,
}
request := aperequest.NewRequest(
nativeschema.MethodPutContainer,
aperequest.NewResource(
resourceName(namespace, ""),
make(map[string]string),
),
reqProps,
)
s, found, err := ac.router.IsAllowed(apechain.Ingress,
policyengine.NewRequestTargetWithNamespace(namespace),
request)
rt := policyengine.NewRequestTargetWithNamespace(namespace)
rt.User = &policyengine.Target{
Type: policyengine.User,
Name: fmt.Sprintf("%s:%s", namespace, pk.Address()),
}
s, found, err := ac.router.IsAllowed(apechain.Ingress, rt, request)
if err != nil {
return nil, err
}
@ -277,7 +283,7 @@ func (ac *apeChecker) validateContainerBoundedOperation(containerID *refs.Contai
return err
}
reqProps, err := ac.getRequestProps(mh, vh, cont, id)
reqProps, pk, err := ac.getRequestProps(mh, vh, cont, id)
if err != nil {
return err
}
@ -288,17 +294,17 @@ func (ac *apeChecker) validateContainerBoundedOperation(containerID *refs.Contai
namespace = cntNamespace
}
request := &apeRequest{
resource: &apeResource{
name: resourceName(namespace, id.EncodeToString()),
props: ac.getContainerProps(cont),
},
op: op,
props: reqProps,
}
request := aperequest.NewRequest(
op,
aperequest.NewResource(
resourceName(namespace, id.EncodeToString()),
ac.getContainerProps(cont),
),
reqProps,
)
s, found, err := ac.router.IsAllowed(apechain.Ingress,
policyengine.NewRequestTarget(namespace, id.EncodeToString()),
policyengine.NewRequestTargetExtended(namespace, id.EncodeToString(), fmt.Sprintf("%s:%s", namespace, pk.Address()), nil),
request)
if err != nil {
return err
@ -329,40 +335,6 @@ func getContainerID(reqContID *refs.ContainerID) (cid.ID, error) {
return id, nil
}
type apeRequest struct {
resource *apeResource
op string
props map[string]string
}
// Operation implements resource.Request.
func (r *apeRequest) Operation() string {
return r.op
}
// Property implements resource.Request.
func (r *apeRequest) Property(key string) string {
return r.props[key]
}
// Resource implements resource.Request.
func (r *apeRequest) Resource() resource.Resource {
return r.resource
}
type apeResource struct {
name string
props map[string]string
}
func (r *apeResource) Name() string {
return r.name
}
func (r *apeResource) Property(key string) string {
return r.props[key]
}
func resourceName(namespace string, container string) string {
if namespace == "" && container == "" {
return nativeschema.ResourceFormatRootContainers
@ -384,19 +356,19 @@ func (ac *apeChecker) getContainerProps(c *containercore.Container) map[string]s
func (ac *apeChecker) getRequestProps(mh *session.RequestMetaHeader, vh *session.RequestVerificationHeader,
cont *containercore.Container, cnrID cid.ID,
) (map[string]string, error) {
) (map[string]string, *keys.PublicKey, error) {
actor, pk, err := ac.getActorAndPublicKey(mh, vh, cnrID)
if err != nil {
return nil, err
return nil, nil, err
}
role, err := ac.getRole(actor, pk, cont, cnrID)
if err != nil {
return nil, err
return nil, nil, err
}
return map[string]string{
nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(pk.Bytes()),
nativeschema.PropertyKeyActorRole: role,
}, nil
}, pk, nil
}
func (ac *apeChecker) getRole(actor *user.ID, pk *keys.PublicKey, cont *containercore.Container, cnrID cid.ID) (string, error) {

View file

@ -19,6 +19,10 @@ func apeTarget(chainTarget *control.ChainTarget) (engine.Target, error) {
return engine.ContainerTarget(chainTarget.GetName()), nil
case control.ChainTarget_NAMESPACE:
return engine.NamespaceTarget(chainTarget.GetName()), nil
case control.ChainTarget_USER:
return engine.UserTarget(chainTarget.GetName()), nil
case control.ChainTarget_GROUP:
return engine.GroupTarget(chainTarget.GetName()), nil
default:
}
return engine.Target{}, status.Error(codes.InvalidArgument,
@ -42,6 +46,16 @@ func controlTarget(chainTarget *engine.Target) (control.ChainTarget, error) {
Name: nm,
Type: control.ChainTarget_NAMESPACE,
}, nil
case engine.User:
return control.ChainTarget{
Name: chainTarget.Name,
Type: control.ChainTarget_USER,
}, nil
case engine.Group:
return control.ChainTarget{
Name: chainTarget.Name,
Type: control.ChainTarget_GROUP,
}, nil
default:
}
return control.ChainTarget{}, status.Error(codes.InvalidArgument,

Binary file not shown.

View file

@ -6,7 +6,8 @@ import "pkg/services/control/types.proto";
option go_package = "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control";
// `ControlService` provides an interface for internal work with the storage node.
// `ControlService` provides an interface for internal work with the storage
// node.
service ControlService {
// Performs health check of the storage node.
rpc HealthCheck(HealthCheckRequest) returns (HealthCheckResponse);
@ -27,20 +28,26 @@ service ControlService {
rpc SynchronizeTree(SynchronizeTreeRequest) returns (SynchronizeTreeResponse);
// EvacuateShard moves all data from one shard to the others.
// Deprecated: Use StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
// Deprecated: Use
// StartShardEvacuation/GetShardEvacuationStatus/StopShardEvacuation
rpc EvacuateShard(EvacuateShardRequest) returns (EvacuateShardResponse);
// StartShardEvacuation starts moving all data from one shard to the others.
rpc StartShardEvacuation (StartShardEvacuationRequest) returns (StartShardEvacuationResponse);
rpc StartShardEvacuation(StartShardEvacuationRequest)
returns (StartShardEvacuationResponse);
// GetShardEvacuationStatus returns evacuation status.
rpc GetShardEvacuationStatus (GetShardEvacuationStatusRequest) returns (GetShardEvacuationStatusResponse);
rpc GetShardEvacuationStatus(GetShardEvacuationStatusRequest)
returns (GetShardEvacuationStatusResponse);
// ResetShardEvacuationStatus resets evacuation status if there is no running evacuation process.
rpc ResetShardEvacuationStatus (ResetShardEvacuationStatusRequest) returns (ResetShardEvacuationStatusResponse);
// ResetShardEvacuationStatus resets evacuation status if there is no running
// evacuation process.
rpc ResetShardEvacuationStatus(ResetShardEvacuationStatusRequest)
returns (ResetShardEvacuationStatusResponse);
// StopShardEvacuation stops moving all data from one shard to the others.
rpc StopShardEvacuation (StopShardEvacuationRequest) returns (StopShardEvacuationResponse);
rpc StopShardEvacuation(StopShardEvacuationRequest)
returns (StopShardEvacuationResponse);
// FlushCache moves all data from one shard to the others.
rpc FlushCache(FlushCacheRequest) returns (FlushCacheResponse);
@ -49,22 +56,32 @@ service ControlService {
rpc Doctor(DoctorRequest) returns (DoctorResponse);
// Add local access policy engine overrides to a node.
rpc AddChainLocalOverride (AddChainLocalOverrideRequest) returns (AddChainLocalOverrideResponse);
rpc AddChainLocalOverride(AddChainLocalOverrideRequest)
returns (AddChainLocalOverrideResponse);
// Get local access policy engine overrides stored in the node by chain id.
rpc GetChainLocalOverride (GetChainLocalOverrideRequest) returns (GetChainLocalOverrideResponse);
rpc GetChainLocalOverride(GetChainLocalOverrideRequest)
returns (GetChainLocalOverrideResponse);
// List local access policy engine overrides stored in the node by container id.
rpc ListChainLocalOverrides (ListChainLocalOverridesRequest) returns (ListChainLocalOverridesResponse);
// List local access policy engine overrides stored in the node by container
// id.
rpc ListChainLocalOverrides(ListChainLocalOverridesRequest)
returns (ListChainLocalOverridesResponse);
// Remove local access policy engine overrides stored in the node by chaind id.
rpc RemoveChainLocalOverride (RemoveChainLocalOverrideRequest) returns (RemoveChainLocalOverrideResponse);
// Remove local access policy engine overrides stored in the node by chaind
// id.
rpc RemoveChainLocalOverride(RemoveChainLocalOverrideRequest)
returns (RemoveChainLocalOverrideResponse);
// Remove local access policy engine overrides stored in the node by chaind id.
rpc RemoveChainLocalOverridesByTarget (RemoveChainLocalOverridesByTargetRequest) returns (RemoveChainLocalOverridesByTargetResponse);
// Remove local access policy engine overrides stored in the node by chaind
// id.
rpc RemoveChainLocalOverridesByTarget(
RemoveChainLocalOverridesByTargetRequest)
returns (RemoveChainLocalOverridesByTargetResponse);
// List targets of the local APE overrides stored in the node.
rpc ListTargetsLocalOverrides (ListTargetsLocalOverridesRequest) returns (ListTargetsLocalOverridesResponse);
rpc ListTargetsLocalOverrides(ListTargetsLocalOverridesRequest)
returns (ListTargetsLocalOverridesResponse);
// Flush objects from write-cache and move it to degraded read only mode.
rpc SealWriteCache(SealWriteCacheRequest) returns (SealWriteCacheResponse);
@ -76,8 +93,7 @@ service ControlService {
// Health check request.
message HealthCheckRequest {
// Health check request body.
message Body {
}
message Body {}
// Body of health check request message.
Body body = 1;
@ -131,8 +147,7 @@ message SetNetmapStatusRequest {
// Set netmap status response.
message SetNetmapStatusResponse {
// Set netmap status response body
message Body {
}
message Body {}
// Body of set netmap status response message.
Body body = 1;
@ -160,8 +175,7 @@ message DropObjectsRequest {
// Response to request to drop the objects.
message DropObjectsResponse {
// Response body structure.
message Body {
}
message Body {}
// Body of the response message.
Body body = 1;
@ -173,8 +187,7 @@ message DropObjectsResponse {
// Request to list all shards of the node.
message ListShardsRequest {
// Request body structure.
message Body {
}
message Body {}
// Body of the request message.
Body body = 1;
@ -222,8 +235,7 @@ message SetShardModeRequest {
// SetShardMode response.
message SetShardModeResponse {
// Response body structure.
message Body {
}
message Body {}
// Body of set shard mode response message.
Body body = 1;
@ -252,8 +264,7 @@ message SynchronizeTreeRequest {
// SynchronizeTree response.
message SynchronizeTreeResponse {
// Response body structure.
message Body {
}
message Body {}
// Body of restore shard response message.
Body body = 1;
@ -262,7 +273,6 @@ message SynchronizeTreeResponse {
Signature signature = 2;
}
// EvacuateShard request.
message EvacuateShardRequest {
// Request body structure.
@ -281,9 +291,7 @@ message EvacuateShardRequest {
// EvacuateShard response.
message EvacuateShardResponse {
// Response body structure.
message Body {
uint32 count = 1;
}
message Body { uint32 count = 1; }
Body body = 1;
Signature signature = 2;
@ -295,7 +303,8 @@ message FlushCacheRequest {
message Body {
// ID of the shard.
repeated bytes shard_ID = 1;
// If true, then writecache will be left in read-only mode after flush completed.
// If true, then writecache will be left in read-only mode after flush
// completed.
bool seal = 2;
}
@ -306,14 +315,12 @@ message FlushCacheRequest {
// FlushCache response.
message FlushCacheResponse {
// Response body structure.
message Body {
}
message Body {}
Body body = 1;
Signature signature = 2;
}
// Doctor request.
message DoctorRequest {
// Request body structure.
@ -331,8 +338,7 @@ message DoctorRequest {
// Doctor response.
message DoctorResponse {
// Response body structure.
message Body {
}
message Body {}
Body body = 1;
Signature signature = 2;
@ -390,16 +396,13 @@ message GetShardEvacuationStatusResponse {
}
// Unix timestamp value.
message UnixTimestamp {
int64 value = 1;
}
message UnixTimestamp { int64 value = 1; }
// Duration in seconds.
message Duration {
int64 seconds = 1;
}
message Duration { int64 seconds = 1; }
// Total objects to evacuate count. The value is approximate, so evacuated + failed + skipped == total is not guaranteed after completion.
// Total objects to evacuate count. The value is approximate, so evacuated +
// failed + skipped == total is not guaranteed after completion.
uint64 total_objects = 1;
// Evacuated objects count.
uint64 evacuated_objects = 2;
@ -587,8 +590,7 @@ message RemoveChainLocalOverrideRequest {
}
message RemoveChainLocalOverrideResponse {
message Body {
}
message Body {}
Body body = 1;
@ -607,8 +609,7 @@ message RemoveChainLocalOverridesByTargetRequest {
}
message RemoveChainLocalOverridesByTargetResponse {
message Body {
}
message Body {}
Body body = 1;
@ -645,17 +646,14 @@ message SealWriteCacheResponse {
}
message DetachShardsRequest {
message Body {
repeated bytes shard_ID = 1;
}
message Body { repeated bytes shard_ID = 1; }
Body body = 1;
Signature signature = 2;
}
message DetachShardsResponse {
message Body {
}
message Body {}
Body body = 1;

Binary file not shown.

Binary file not shown.

View file

@ -170,7 +170,6 @@ enum ShardMode {
DEGRADED_READ_ONLY = 4;
}
// ChainTarget is an object to which local overrides
// are applied.
message ChainTarget {
@ -180,6 +179,10 @@ message ChainTarget {
NAMESPACE = 1;
CONTAINER = 2;
USER = 3;
GROUP = 4;
}
TargetType type = 1;

View file

@ -12,6 +12,7 @@ import (
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
type checkerImpl struct {
@ -54,6 +55,9 @@ type Prm struct {
// If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow.
SoftAPECheck bool
// If true, object headers will not retrieved from storage engine.
WithoutHeaderRequest bool
}
var errMissingOID = errors.New("object ID is not set")
@ -81,8 +85,13 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
return fmt.Errorf("failed to create ape request: %w", err)
}
status, ruleFound, err := c.chainRouter.IsAllowed(apechain.Ingress,
policyengine.NewRequestTarget(prm.Namespace, prm.Container.EncodeToString()), r)
pub, err := keys.NewPublicKeyFromString(prm.SenderKey)
if err != nil {
return err
}
rt := policyengine.NewRequestTargetExtended(prm.Namespace, prm.Container.EncodeToString(), fmt.Sprintf("%s:%s", prm.Namespace, pub.Address()), nil)
status, ruleFound, err := c.chainRouter.IsAllowed(apechain.Ingress, rt, r)
if err != nil {
return err
}

View file

@ -16,6 +16,7 @@ import (
policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
@ -147,7 +148,9 @@ var (
role = "Container"
senderKey = hex.EncodeToString([]byte{1, 0, 0, 1})
senderPrivateKey, _ = keys.NewPrivateKey()
senderKey = hex.EncodeToString(senderPrivateKey.PublicKey().Bytes())
)
func TestAPECheck(t *testing.T) {

View file

@ -6,49 +6,16 @@ import (
"strconv"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
aperesource "git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
)
type request struct {
operation string
resource *resource
properties map[string]string
}
var _ aperesource.Request = (*request)(nil)
type resource struct {
name string
properties map[string]string
}
var _ aperesource.Resource = (*resource)(nil)
func (r *resource) Name() string {
return r.name
}
func (r *resource) Property(key string) string {
return r.properties[key]
}
func (r *request) Operation() string {
return r.operation
}
func (r *request) Property(key string) string {
return r.properties[key]
}
func (r *request) Resource() aperesource.Resource {
return r.resource
}
var defaultRequest = aperequest.Request{}
func nativeSchemaRole(role acl.Role) string {
switch role {
@ -123,7 +90,7 @@ func objectProperties(cnr cid.ID, oid *oid.ID, cnrOwner user.ID, header *objectV
// newAPERequest creates an APE request to be passed to a chain router. It collects resource properties from
// header provided by headerProvider. If it cannot be found in headerProvider, then properties are
// initialized from header given in prm (if it is set). Otherwise, just CID and OID are set to properties.
func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (*request, error) {
func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (aperequest.Request, error) {
switch prm.Method {
case nativeschema.MethodGetObject,
nativeschema.MethodHeadObject,
@ -131,32 +98,32 @@ func (c *checkerImpl) newAPERequest(ctx context.Context, prm Prm) (*request, err
nativeschema.MethodHashObject,
nativeschema.MethodDeleteObject:
if prm.Object == nil {
return nil, fmt.Errorf("method %s: %w", prm.Method, errMissingOID)
return defaultRequest, fmt.Errorf("method %s: %w", prm.Method, errMissingOID)
}
case nativeschema.MethodSearchObject, nativeschema.MethodPutObject:
default:
return nil, fmt.Errorf("unknown method: %s", prm.Method)
return defaultRequest, fmt.Errorf("unknown method: %s", prm.Method)
}
var header *objectV2.Header
if prm.Header != nil {
header = prm.Header
} else if prm.Object != nil {
} else if prm.Object != nil && !prm.WithoutHeaderRequest {
headerObjSDK, err := c.headerProvider.GetHeader(ctx, prm.Container, *prm.Object)
if err == nil {
header = headerObjSDK.ToV2().GetHeader()
}
}
return &request{
operation: prm.Method,
resource: &resource{
name: resourceName(prm.Container, prm.Object, prm.Namespace),
properties: objectProperties(prm.Container, prm.Object, prm.ContainerOwner, header),
},
properties: map[string]string{
return aperequest.NewRequest(
prm.Method,
aperequest.NewResource(
resourceName(prm.Container, prm.Object, prm.Namespace),
objectProperties(prm.Container, prm.Object, prm.ContainerOwner, header),
),
map[string]string{
nativeschema.PropertyKeyActorPublicKey: prm.SenderKey,
nativeschema.PropertyKeyActorRole: prm.Role,
},
}, nil
), nil
}

View file

@ -6,6 +6,7 @@ import (
"testing"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@ -256,24 +257,23 @@ func TestNewAPERequest(t *testing.T) {
return
}
expectedRequest := request{
operation: method,
resource: &resource{
name: resourceName(cnr, obj, prm.Namespace),
properties: objectProperties(cnr, obj, testCnrOwner, func() *objectV2.Header {
expectedRequest := aperequest.NewRequest(
method,
aperequest.NewResource(
resourceName(cnr, obj, prm.Namespace),
objectProperties(cnr, obj, testCnrOwner, func() *objectV2.Header {
if headerObjSDK != nil {
return headerObjSDK.ToV2().GetHeader()
}
return prm.Header
}()),
},
properties: map[string]string{
}())),
map[string]string{
nativeschema.PropertyKeyActorPublicKey: prm.SenderKey,
nativeschema.PropertyKeyActorRole: prm.Role,
},
}
)
require.Equal(t, expectedRequest, *r)
require.Equal(t, expectedRequest, r)
})
}
})

View file

@ -133,6 +133,7 @@ func (c *Service) Get(request *objectV2.GetRequest, stream objectSvc.GetObjectSt
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
ContainerOwner: reqCtx.ContainerOwner,
SoftAPECheck: reqCtx.SoftAPECheck,
WithoutHeaderRequest: true,
})
if err != nil {
return toStatusErr(err)
@ -219,6 +220,7 @@ func (c *Service) Head(ctx context.Context, request *objectV2.HeadRequest) (*obj
SenderKey: hex.EncodeToString(reqCtx.SenderKey),
ContainerOwner: reqCtx.ContainerOwner,
SoftAPECheck: reqCtx.SoftAPECheck,
WithoutHeaderRequest: true,
})
if err != nil {
return nil, toStatusErr(err)

View file

@ -114,7 +114,7 @@ func (a *assembler) initializeFromSourceObjectID(ctx context.Context, id oid.ID)
}
to := uint64(0)
if seekOff+seekLen > a.currentOffset+from {
if seekOff+seekLen >= a.currentOffset+from {
to = seekOff + seekLen - a.currentOffset
}

View file

@ -1,6 +1,7 @@
package getsvc
import (
"bytes"
"context"
"crypto/ecdsa"
"crypto/rand"
@ -25,6 +26,9 @@ import (
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
@ -61,6 +65,9 @@ type testEpochReceiver uint64
func (e testEpochReceiver) Epoch() (uint64, error) {
return uint64(e), nil
}
func (e testEpochReceiver) CurrentEpoch() uint64 {
return uint64(e)
}
func newTestStorage() *testStorage {
return &testStorage{
@ -555,21 +562,6 @@ func TestGetRemoteSmall(t *testing.T) {
return p
}
newRngPrm := func(raw bool, w ChunkWriter, off, ln uint64) RangePrm {
p := RangePrm{}
p.SetChunkWriter(w)
p.WithRawFlag(raw)
p.common = new(util.CommonPrm).WithLocalOnly(false)
r := objectSDK.NewRange()
r.SetOffset(off)
r.SetLength(ln)
p.SetRange(r)
return p
}
newHeadPrm := func(raw bool, w ObjectWriter) HeadPrm {
p := HeadPrm{}
p.SetHeaderWriter(w)
@ -1628,6 +1620,204 @@ func TestGetRemoteSmall(t *testing.T) {
})
}
type testTarget struct {
objects []*objectSDK.Object
}
func (tt *testTarget) WriteObject(_ context.Context, obj *objectSDK.Object) error {
tt.objects = append(tt.objects, obj)
return nil
}
func objectChain(t *testing.T, cnr cid.ID, singleSize, totalSize uint64) (oid.ID, []*objectSDK.Object, *objectSDK.Object, []byte) {
pk, err := keys.NewPrivateKey()
require.NoError(t, err)
tt := new(testTarget)
p := transformer.NewPayloadSizeLimiter(transformer.Params{
Key: &pk.PrivateKey,
NextTargetInit: func() transformer.ObjectWriter { return tt },
NetworkState: testEpochReceiver(1),
MaxSize: singleSize,
})
payload := make([]byte, totalSize)
_, err = rand.Read(payload)
require.NoError(t, err)
ver := version.Current()
hdr := objectSDK.New()
hdr.SetContainerID(cnr)
hdr.SetType(objectSDK.TypeRegular)
hdr.SetVersion(&ver)
ctx := context.Background()
require.NoError(t, p.WriteHeader(ctx, hdr))
_, err = p.Write(ctx, payload)
require.NoError(t, err)
res, err := p.Close(ctx)
require.NoError(t, err)
if totalSize <= singleSize {
// Small object, no linking.
require.Len(t, tt.objects, 1)
return res.SelfID, tt.objects, nil, payload
}
return *res.ParentID, tt.objects[:len(tt.objects)-1], tt.objects[len(tt.objects)-1], bytes.Clone(payload)
}
func newRngPrm(raw bool, w ChunkWriter, off, ln uint64) RangePrm {
p := RangePrm{}
p.SetChunkWriter(w)
p.WithRawFlag(raw)
p.common = new(util.CommonPrm)
r := objectSDK.NewRange()
r.SetOffset(off)
r.SetLength(ln)
p.SetRange(r)
return p
}
func TestGetRange(t *testing.T) {
var cnr container.Container
cnr.SetPlacementPolicy(netmaptest.PlacementPolicy())
var idCnr cid.ID
container.CalculateID(&idCnr, cnr)
ns, as := testNodeMatrix(t, []int{2})
testGetRange := func(t *testing.T, svc *Service, addr oid.Address, from, to uint64, payload []byte) {
w := NewSimpleObjectWriter()
rngPrm := newRngPrm(false, w, from, to-from)
rngPrm.WithAddress(addr)
err := svc.GetRange(context.Background(), rngPrm)
require.NoError(t, err)
if from == to {
require.Nil(t, w.Object().Payload())
} else {
require.Equal(t, payload[from:to], w.Object().Payload())
}
}
newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service {
const curEpoch = 13
return &Service{
log: test.NewLogger(t),
localStorage: newTestStorage(),
traverserGenerator: &testTraverserGenerator{
c: cnr,
b: map[uint64]placement.Builder{
curEpoch: b,
},
},
epochSource: testEpochReceiver(curEpoch),
remoteStorageConstructor: c,
keyStore: &testKeyStorage{},
}
}
t.Run("small", func(t *testing.T) {
const totalSize = 5
_, objs, _, payload := objectChain(t, idCnr, totalSize, totalSize)
require.Len(t, objs, 1)
require.Len(t, payload, totalSize)
obj := objs[0]
addr := object.AddressOf(obj)
builder := &testPlacementBuilder{vectors: map[string][][]netmap.NodeInfo{addr.EncodeToString(): ns}}
c1 := newTestClient()
c1.addResult(addr, obj, nil)
svc := newSvc(builder, &testClientCache{
clients: map[string]*testClient{
as[0][0]: c1,
as[0][1]: c1,
},
})
for from := 0; from < totalSize-1; from++ {
for to := from; to < totalSize; to++ {
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
})
}
}
})
t.Run("big", func(t *testing.T) {
const totalSize = 9
id, objs, link, payload := objectChain(t, idCnr, 3, totalSize) // 3 parts
require.Equal(t, totalSize, len(payload))
builder := &testPlacementBuilder{vectors: map[string][][]netmap.NodeInfo{}}
builder.vectors[idCnr.EncodeToString()+"/"+id.EncodeToString()] = ns
builder.vectors[object.AddressOf(link).EncodeToString()] = ns
for i := range objs {
builder.vectors[object.AddressOf(objs[i]).EncodeToString()] = ns
}
var addr oid.Address
addr.SetContainer(idCnr)
addr.SetObject(id)
const (
linkingLast = "splitinfo=last"
linkingChildren = "splitinfo=children"
linkingBoth = "splitinfo=both"
)
lastID, _ := objs[len(objs)-1].ID()
linkID, _ := link.ID()
for _, kind := range []string{linkingLast, linkingChildren, linkingBoth} {
t.Run(kind, func(t *testing.T) {
c1 := newTestClient()
for i := range objs {
c1.addResult(object.AddressOf(objs[i]), objs[i], nil)
}
c1.addResult(object.AddressOf(link), link, nil)
si := objectSDK.NewSplitInfo()
switch kind {
case linkingLast:
si.SetLastPart(lastID)
case linkingChildren:
si.SetLink(linkID)
case linkingBoth:
si.SetLastPart(lastID)
si.SetLink(linkID)
}
c1.addResult(addr, nil, objectSDK.NewSplitInfoError(si))
svc := newSvc(builder, &testClientCache{
clients: map[string]*testClient{
as[0][0]: c1,
as[0][1]: c1,
},
})
for from := 0; from < totalSize-1; from++ {
for to := from; to < totalSize; to++ {
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
})
}
}
})
}
})
}
func TestGetFromPastEpoch(t *testing.T) {
ctx := context.Background()

View file

@ -348,7 +348,7 @@ func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, e
ln = maxInitialBufferSize
}
w := bytes.NewBuffer(make([]byte, ln))
w := bytes.NewBuffer(make([]byte, 0, ln))
_, err = io.CopyN(w, rdr, int64(prm.ln))
if err != nil {
return nil, fmt.Errorf("read payload: %w", err)

70
pkg/services/tree/ape.go Normal file
View file

@ -0,0 +1,70 @@
package tree
import (
"encoding/hex"
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/converter"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
func (s *Service) checkAPE(container *core.Container, cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey) error {
namespace := ""
cntNamespace, hasNamespace := strings.CutSuffix(cnrSDK.ReadDomain(container.Value).Zone(), ".ns")
if hasNamespace {
namespace = cntNamespace
}
schemaMethod, err := converter.SchemaMethodFromACLOperation(operation)
if err != nil {
return apeErr(err)
}
schemaRole, err := converter.SchemaRoleFromACLRole(role)
if err != nil {
return apeErr(err)
}
reqProps := map[string]string{
nativeschema.PropertyKeyActorPublicKey: hex.EncodeToString(publicKey.Bytes()),
nativeschema.PropertyKeyActorRole: schemaRole,
}
var resourceName string
if namespace == "root" || namespace == "" {
resourceName = fmt.Sprintf(nativeschema.ResourceFormatRootContainerObjects, cid.EncodeToString())
} else {
resourceName = fmt.Sprintf(nativeschema.ResourceFormatNamespaceContainerObjects, namespace, cid.EncodeToString())
}
request := aperequest.NewRequest(
schemaMethod,
aperequest.NewResource(resourceName, make(map[string]string)),
reqProps,
)
rt := engine.NewRequestTargetExtended(namespace, cid.EncodeToString(), fmt.Sprintf("%s:%s", namespace, publicKey.Address()), nil)
status, found, err := s.router.IsAllowed(apechain.Ingress, rt, request)
if err != nil {
return apeErr(err)
}
if found && status == apechain.Allow {
return nil
}
err = fmt.Errorf("access to operation %s is denied by access policy engine: %s", schemaMethod, status.String())
return apeErr(err)
}
func apeErr(err error) error {
errAccessDenied := &apistatus.ObjectAccessDenied{}
errAccessDenied.WriteReason(err.Error())
return errAccessDenied
}

View file

@ -130,7 +130,7 @@ func TestGetSubTreeOrderAsc(t *testing.T) {
t.Run("boltdb forest", func(t *testing.T) {
p := pilorama.NewBoltForest(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama")))
require.NoError(t, p.Open(context.Background(), 0644))
require.NoError(t, p.Open(context.Background(), 0o644))
require.NoError(t, p.Init())
testGetSubTreeOrderAsc(t, p)
})

View file

@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
@ -38,6 +39,8 @@ type cfg struct {
containerCacheSize int
authorizedKeys [][]byte
router policyengine.ChainRouter
metrics MetricsRegister
}
@ -139,3 +142,9 @@ func WithAuthorizedKeys(keys keys.PublicKeys) Option {
}
}
}
func WithAPERouter(router policyengine.ChainRouter) Option {
return func(c *cfg) {
c.router = router
}
}

View file

@ -446,7 +446,7 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid
type stackItem struct {
values []pilorama.NodeInfo
parent pilorama.Node
last string
last *string
}
// Traverse the tree in a DFS manner. Because we need to support arbitrary depth,
@ -502,7 +502,7 @@ func getSortedSubTree(ctx context.Context, srv TreeService_GetSubTreeServer, cid
}
if b.GetDepth() == 0 || uint32(len(stack)) < b.GetDepth() {
children, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), node.ID, "", batchSize)
children, last, err := forest.TreeSortedByFilename(ctx, cid, b.GetTreeId(), node.ID, nil, batchSize)
if err != nil {
return err
}

View file

@ -71,7 +71,7 @@ func (s *Service) verifyClient(req message, cid cidSDK.ID, rawBearer []byte, op
return err
}
role, err := roleFromReq(cnr, req, bt)
role, pubKey, err := roleAndPubKeyFromReq(cnr, req, bt)
if err != nil {
return fmt.Errorf("can't get request role: %w", err)
}
@ -79,8 +79,11 @@ func (s *Service) verifyClient(req message, cid cidSDK.ID, rawBearer []byte, op
basicACL := cnr.Value.BasicACL()
// Basic ACL mask can be unset, if a container operations are performed
// with strict APE checks only.
//
// FIXME(@aarifullin): tree service temporiraly performs APE checks on
// object verbs, because tree verbs have not been introduced yet.
if basicACL == 0x0 {
return nil
return s.checkAPE(cnr, cid, op, role, pubKey)
}
if !basicACL.IsOpAllowed(op, role) {
@ -222,7 +225,7 @@ func SignMessage(m message, key *ecdsa.PrivateKey) error {
return nil
}
func roleFromReq(cnr *core.Container, req message, bt *bearer.Token) (acl.Role, error) {
func roleAndPubKeyFromReq(cnr *core.Container, req message, bt *bearer.Token) (acl.Role, *keys.PublicKey, error) {
role := acl.RoleOthers
owner := cnr.Value.Owner()
@ -233,7 +236,7 @@ func roleFromReq(cnr *core.Container, req message, bt *bearer.Token) (acl.Role,
pub, err := keys.NewPublicKeyFromBytes(rawKey, elliptic.P256())
if err != nil {
return role, fmt.Errorf("invalid public key: %w", err)
return role, nil, fmt.Errorf("invalid public key: %w", err)
}
var reqSigner user.ID
@ -243,7 +246,7 @@ func roleFromReq(cnr *core.Container, req message, bt *bearer.Token) (acl.Role,
role = acl.RoleOwner
}
return role, nil
return role, pub, nil
}
func eACLOp(op acl.Op) eacl.Operation {